From 0c6ea7f2a609e97aef6006e4797c366825726f7a Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Tue, 12 Sep 2017 15:38:25 -0600 Subject: [PATCH] Retire Packaging Deb project repos This commit is part of a series to retire the Packaging Deb project. Step 2 is to remove all content from the project repos, replacing it with a README notification where to find ongoing work, and how to recover the repo if needed at some future point (as in https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project). Change-Id: I1f28a2ce2253aff8e93d24f9840245ab7fc52db1 --- .coveragerc | 7 - .gitignore | 42 - .gitreview | 4 - .mailmap | 26 - .testr.conf | 8 - CONTRIBUTING.rst | 27 - HACKING.rst | 24 - LICENSE | 176 - README | 14 + README.rst | 79 - api-ref/source/conf.py | 240 - api-ref/source/index.rst | 27 - api-ref/source/v1/images-images-v1.inc | 344 -- api-ref/source/v1/images-sharing-v1.inc | 150 - api-ref/source/v1/index.rst | 26 - api-ref/source/v1/parameters.yaml | 249 - .../v1/samples/image-member-add-request.json | 4 - .../v1/samples/image-members-add-request.json | 12 - .../image-memberships-list-response.json | 11 - .../v1/samples/image-update-response.json | 25 - .../images-create-reserve-response.json | 22 - .../images-create-with-data-response.json | 22 - .../samples/images-list-details-response.json | 30 - .../v1/samples/images-list-response.json | 15 - .../samples/shared-images-list-response.json | 15 - api-ref/source/v2/images-data.inc | 142 - api-ref/source/v2/images-images-v2.inc | 697 --- .../v2/images-parameters-descriptions.inc | 17 - api-ref/source/v2/images-parameters.yaml | 597 -- api-ref/source/v2/images-schemas.inc | 138 - api-ref/source/v2/images-sharing-v2.inc | 381 -- api-ref/source/v2/images-tags.inc | 52 - api-ref/source/v2/index.rst | 32 - api-ref/source/v2/metadefs-index.rst | 64 - .../source/v2/metadefs-namespaces-objects.inc | 280 - .../v2/metadefs-namespaces-properties.inc | 306 -- .../source/v2/metadefs-namespaces-tags.inc | 300 - api-ref/source/v2/metadefs-namespaces.inc | 337 -- api-ref/source/v2/metadefs-parameters.yaml | 527 -- api-ref/source/v2/metadefs-resourcetypes.inc | 169 - api-ref/source/v2/metadefs-schemas.inc | 326 -- .../v2/samples/image-create-request.json | 6 - .../v2/samples/image-create-response.json | 22 - .../image-details-deactivate-response.json | 21 - .../samples/image-member-create-request.json | 3 - .../samples/image-member-create-response.json | 8 - .../image-member-details-response.json | 8 - .../samples/image-member-update-request.json | 3 - .../samples/image-member-update-response.json | 8 - .../samples/image-members-list-response.json | 21 - .../v2/samples/image-show-response.json | 21 - .../v2/samples/image-update-request.json | 15 - .../v2/samples/image-update-response.json | 24 - .../v2/samples/images-list-response.json | 48 - ...tadef-namespace-create-request-simple.json | 7 - .../metadef-namespace-create-request.json | 39 - ...adef-namespace-create-response-simple.json | 12 - .../metadef-namespace-create-response.json | 41 - .../metadef-namespace-details-response.json | 40 - ...ef-namespace-details-with-rt-response.json | 40 - .../metadef-namespace-update-request.json | 7 - .../metadef-namespace-update-response.json | 12 - .../metadef-namespaces-list-response.json | 96 - .../metadef-object-create-request.json | 24 - .../metadef-object-create-response.json | 28 - .../metadef-object-details-response.json | 28 - .../metadef-object-update-request.json | 12 - .../metadef-object-update-response.json | 16 - .../metadef-objects-list-response.json | 112 - .../metadef-properties-list-response.json | 86 - .../metadef-property-create-request.json | 15 - .../metadef-property-create-response.json | 15 - .../metadef-property-details-response.json | 15 - .../metadef-property-update-request.json | 15 - .../metadef-property-update-response.json | 15 - ...f-resource-type-assoc-create-response.json | 7 - .../metadef-resource-type-create-request.json | 5 - .../metadef-resource-types-list-response.json | 29 - .../samples/metadef-tag-create-response.json | 5 - .../samples/metadef-tag-details-response.json | 5 - .../samples/metadef-tag-update-request.json | 3 - .../samples/metadef-tag-update-response.json | 5 - .../samples/metadef-tags-create-request.json | 13 - .../samples/metadef-tags-create-response.json | 13 - .../samples/metadef-tags-list-response.json | 13 - .../schemas-image-member-show-response.json | 35 - .../schemas-image-members-list-response.json | 52 - .../samples/schemas-image-show-response.json | 236 - .../samples/schemas-images-list-response.json | 267 - ...hemas-metadef-namespace-show-response.json | 234 - ...emas-metadef-namespaces-list-response.json | 265 - .../schemas-metadef-object-show-response.json | 164 - ...schemas-metadef-objects-list-response.json | 195 - ...emas-metadef-properties-list-response.json | 152 - ...chemas-metadef-property-show-response.json | 122 - ...source-type-association-show-response.json | 36 - ...ource-type-associations-list-response.json | 67 - .../schemas-metadef-tag-show-response.json | 25 - .../schemas-metadef-tags-list-response.json | 56 - .../samples/schemas-task-show-response.json | 72 - .../samples/schemas-tasks-list-response.json | 71 - .../v2/samples/task-create-request.json | 11 - .../v2/samples/task-create-response.json | 20 - .../samples/task-show-failure-response.json | 21 - .../task-show-processing-response.json | 20 - .../samples/task-show-success-response.json | 23 - .../v2/samples/tasks-list-response.json | 26 - api-ref/source/v2/tasks-parameters.yaml | 195 - api-ref/source/v2/tasks-schemas.inc | 72 - api-ref/source/v2/tasks.inc | 198 - api-ref/source/versions/index.rst | 65 - .../samples/image-versions-response.json | 84 - api-ref/source/versions/versions.inc | 56 - babel.cfg | 1 - bandit.yaml | 245 - doc/source/_static/.placeholder | 0 doc/source/admin/apache-httpd.rst | 93 - doc/source/admin/authentication.rst | 118 - doc/source/admin/cache.rst | 172 - doc/source/admin/controllingservers.rst | 238 - doc/source/admin/db-sqlalchemy-migrate.rst | 67 - doc/source/admin/db.rst | 249 - doc/source/admin/flows.rst | 30 - doc/source/admin/index.rst | 23 - doc/source/admin/manage-images.rst | 282 - doc/source/admin/notifications.rst | 216 - doc/source/admin/policies.rst | 198 - doc/source/admin/property-protections.rst | 151 - doc/source/admin/requirements.rst | 81 - doc/source/admin/rollingupgrades.rst | 112 - doc/source/admin/tasks.rst | 137 - doc/source/admin/troubleshooting.rst | 460 -- doc/source/cli/footer.txt | 11 - doc/source/cli/general_options.txt | 68 - doc/source/cli/glanceapi.rst | 34 - doc/source/cli/glancecachecleaner.rst | 42 - doc/source/cli/glancecachemanage.rst | 83 - doc/source/cli/glancecacheprefetcher.rst | 35 - doc/source/cli/glancecachepruner.rst | 36 - doc/source/cli/glancecontrol.rst | 53 - doc/source/cli/glancemanage.rst | 104 - doc/source/cli/glanceregistry.rst | 35 - doc/source/cli/glancereplicator.rst | 85 - doc/source/cli/glancescrubber.rst | 58 - doc/source/cli/header.txt | 8 - doc/source/cli/index.rst | 9 - doc/source/cli/openstack_options.txt | 24 - doc/source/conf.py | 299 - doc/source/configuration/configuring.rst | 1603 ------ doc/source/configuration/glance_api.rst | 8 - doc/source/configuration/glance_cache.rst | 8 - doc/source/configuration/glance_manage.rst | 8 - doc/source/configuration/glance_registry.rst | 11 - doc/source/configuration/glance_scrubber.rst | 8 - doc/source/configuration/index.rst | 17 - .../configuration/sample-configuration.rst | 54 - doc/source/contributor/architecture.rst | 83 - doc/source/contributor/blueprints.rst | 88 - .../contributor/database_architecture.rst | 261 - .../contributor/database_migrations.rst | 347 -- doc/source/contributor/documentation.rst | 117 - .../contributor/domain_implementation.rst | 154 - doc/source/contributor/domain_model.rst | 290 - doc/source/contributor/index.rst | 49 - doc/source/contributor/minor-code-changes.rst | 96 - doc/source/contributor/refreshing-configs.rst | 62 - doc/source/contributor/release-cpl.rst | 278 - doc/source/deprecation-note.inc | 6 - doc/source/glossary.rst | 4153 -------------- doc/source/images/architecture.png | Bin 48303 -> 0 bytes doc/source/images/glance_db.png | Bin 108006 -> 0 bytes doc/source/images/glance_layers.png | Bin 32606 -> 0 bytes doc/source/images/image_status_transition.png | Bin 168257 -> 0 bytes doc/source/images/instance-life-1.png | Bin 28449 -> 0 bytes doc/source/images/instance-life-2.png | Bin 39847 -> 0 bytes doc/source/images/instance-life-3.png | Bin 30176 -> 0 bytes doc/source/images_src/architecture.graphml | 890 --- doc/source/images_src/glance_db.graphml | 217 - doc/source/images_src/glance_layers.graphml | 363 -- .../images_src/image_status_transition.dot | 51 - doc/source/index.rst | 48 - doc/source/install/get-started.rst | 71 - doc/source/install/index.rst | 43 - doc/source/install/install-debian.rst | 329 -- doc/source/install/install-obs.rst | 333 -- doc/source/install/install-rdo.rst | 332 -- doc/source/install/install-ubuntu.rst | 329 -- doc/source/install/install.rst | 11 - ...ote_configuration_vary_by_distribution.txt | 7 - doc/source/install/verify.rst | 103 - doc/source/user/common-image-properties.rst | 62 - doc/source/user/formats.rst | 124 - doc/source/user/glanceapi.rst | 891 --- doc/source/user/glanceclient.rst | 26 - doc/source/user/glancemetadefcatalogapi.rst | 605 -- doc/source/user/identifiers.rst | 27 - doc/source/user/index.rst | 16 - doc/source/user/metadefs-concepts.rst | 185 - doc/source/user/signature.rst | 184 - doc/source/user/statuses.rst | 136 - etc/glance-api-paste.ini | 90 - etc/glance-api.conf | 4568 ---------------- etc/glance-cache.conf | 2334 -------- etc/glance-manage.conf | 225 - etc/glance-registry-paste.ini | 35 - etc/glance-registry.conf | 2279 -------- etc/glance-scrubber.conf | 2500 --------- etc/glance-swift.conf.sample | 25 - etc/metadefs/README | 4 - ...cim-processor-allocation-setting-data.json | 154 - .../cim-resource-allocation-setting-data.json | 178 - .../cim-storage-allocation-setting-data.json | 122 - .../cim-virtual-system-setting-data.json | 127 - etc/metadefs/compute-aggr-disk-filter.json | 21 - etc/metadefs/compute-aggr-iops-filter.json | 23 - etc/metadefs/compute-aggr-num-instances.json | 21 - etc/metadefs/compute-cpu-pinning.json | 43 - .../compute-guest-memory-backing.json | 29 - etc/metadefs/compute-guest-shutdown.json | 21 - etc/metadefs/compute-host-capabilities.json | 293 - etc/metadefs/compute-hypervisor.json | 42 - etc/metadefs/compute-instance-data.json | 36 - etc/metadefs/compute-libvirt-image.json | 101 - etc/metadefs/compute-libvirt.json | 32 - etc/metadefs/compute-quota.json | 109 - etc/metadefs/compute-randomgen.json | 29 - etc/metadefs/compute-trust.json | 24 - etc/metadefs/compute-vcputopology.json | 54 - etc/metadefs/compute-vmware-flavor.json | 24 - etc/metadefs/compute-vmware-quota-flavor.json | 26 - etc/metadefs/compute-vmware.json | 207 - etc/metadefs/compute-watchdog.json | 33 - etc/metadefs/compute-xenapi.json | 29 - etc/metadefs/glance-common-image-props.json | 42 - .../image-signature-verification.json | 50 - etc/metadefs/operating-system.json | 32 - etc/metadefs/software-databases.json | 334 -- etc/metadefs/software-runtimes.json | 77 - etc/metadefs/software-webservers.json | 103 - etc/metadefs/storage-volume-type.json | 20 - etc/oslo-config-generator/glance-api.conf | 14 - etc/oslo-config-generator/glance-cache.conf | 7 - etc/oslo-config-generator/glance-manage.conf | 7 - .../glance-registry.conf | 10 - .../glance-scrubber.conf | 10 - etc/ovf-metadata.json.sample | 8 - etc/policy.json | 62 - etc/property-protections-policies.conf.sample | 34 - etc/property-protections-roles.conf.sample | 32 - etc/schema-image.json | 28 - glance/__init__.py | 0 glance/api/__init__.py | 27 - glance/api/authorization.py | 915 ---- glance/api/cached_images.py | 129 - glance/api/common.py | 216 - glance/api/middleware/__init__.py | 0 glance/api/middleware/cache.py | 338 -- glance/api/middleware/cache_manage.py | 84 - glance/api/middleware/context.py | 211 - glance/api/middleware/gzip.py | 65 - glance/api/middleware/version_negotiation.py | 120 - glance/api/policy.py | 680 --- glance/api/property_protections.py | 126 - glance/api/v1/__init__.py | 26 - glance/api/v1/controller.py | 96 - glance/api/v1/filters.py | 40 - glance/api/v1/images.py | 1351 ----- glance/api/v1/members.py | 248 - glance/api/v1/router.py | 109 - glance/api/v1/upload_utils.py | 293 - glance/api/v2/__init__.py | 0 glance/api/v2/discovery.py | 46 - glance/api/v2/image_actions.py | 96 - glance/api/v2/image_data.py | 469 -- glance/api/v2/image_members.py | 391 -- glance/api/v2/image_tags.py | 115 - glance/api/v2/images.py | 1067 ---- glance/api/v2/metadef_namespaces.py | 834 --- glance/api/v2/metadef_objects.py | 367 -- glance/api/v2/metadef_properties.py | 316 -- glance/api/v2/metadef_resource_types.py | 270 - glance/api/v2/metadef_tags.py | 411 -- glance/api/v2/model/__init__.py | 0 glance/api/v2/model/metadef_namespace.py | 79 - glance/api/v2/model/metadef_object.py | 49 - .../v2/model/metadef_property_item_type.py | 27 - glance/api/v2/model/metadef_property_type.py | 61 - glance/api/v2/model/metadef_resource_type.py | 62 - glance/api/v2/model/metadef_tag.py | 34 - glance/api/v2/router.py | 569 -- glance/api/v2/schemas.py | 109 - glance/api/v2/tasks.py | 418 -- glance/api/versions.py | 112 - glance/async/__init__.py | 72 - glance/async/flows/__init__.py | 0 glance/async/flows/api_image_import.py | 361 -- glance/async/flows/base_import.py | 473 -- glance/async/flows/convert.py | 154 - glance/async/flows/introspect.py | 97 - glance/async/flows/ovf_process.py | 268 - glance/async/taskflow_executor.py | 182 - glance/async/utils.py | 79 - glance/cmd/__init__.py | 18 - glance/cmd/api.py | 95 - glance/cmd/cache_cleaner.py | 61 - glance/cmd/cache_manage.py | 522 -- glance/cmd/cache_prefetcher.py | 63 - glance/cmd/cache_pruner.py | 53 - glance/cmd/control.py | 410 -- glance/cmd/manage.py | 470 -- glance/cmd/registry.py | 80 - glance/cmd/replicator.py | 781 --- glance/cmd/scrubber.py | 74 - glance/common/__init__.py | 0 glance/common/auth.py | 279 - glance/common/client.py | 603 -- glance/common/config.py | 847 --- glance/common/crypt.py | 96 - glance/common/exception.py | 458 -- glance/common/location_strategy/__init__.py | 134 - .../location_strategy/location_order.py | 36 - glance/common/location_strategy/store_type.py | 139 - glance/common/property_utils.py | 244 - glance/common/rpc.py | 302 - glance/common/scripts/__init__.py | 58 - .../scripts/api_image_import/__init__.py | 0 .../common/scripts/api_image_import/main.py | 136 - .../common/scripts/image_import/__init__.py | 0 glance/common/scripts/image_import/main.py | 157 - glance/common/scripts/utils.py | 138 - glance/common/store_utils.py | 136 - glance/common/swift_store_utils.py | 142 - glance/common/timeutils.py | 89 - glance/common/trust_auth.py | 117 - glance/common/utils.py | 674 --- glance/common/wsgi.py | 1213 ----- glance/common/wsgi_app.py | 55 - glance/common/wsme_utils.py | 71 - glance/context.py | 73 - glance/db/__init__.py | 872 --- glance/db/metadata.py | 65 - glance/db/migration.py | 59 - glance/db/registry/__init__.py | 0 glance/db/registry/api.py | 546 -- glance/db/simple/__init__.py | 0 glance/db/simple/api.py | 2059 ------- glance/db/sqlalchemy/__init__.py | 0 .../db/sqlalchemy/alembic_migrations/README | 1 - .../sqlalchemy/alembic_migrations/__init__.py | 108 - .../add_artifacts_tables.py | 224 - .../alembic_migrations/add_images_tables.py | 201 - .../alembic_migrations/add_metadefs_tables.py | 171 - .../alembic_migrations/add_tasks_tables.py | 66 - .../sqlalchemy/alembic_migrations/alembic.ini | 69 - .../data_migrations/__init__.py | 70 - .../ocata_migrate01_community_images.py | 103 - .../data_migrations/pike_migrate01_empty.py | 26 - .../db/sqlalchemy/alembic_migrations/env.py | 89 - .../sqlalchemy/alembic_migrations/migrate.cfg | 20 - .../alembic_migrations/script.py.mako | 20 - .../alembic_migrations/versions/__init__.py | 0 .../versions/liberty_initial.py | 40 - .../mitaka01_add_image_created_updated_idx.py | 47 - .../mitaka02_update_metadef_os_nova_server.py | 42 - ...ocata01_add_visibility_remove_is_public.py | 72 - ...cata01_add_visibility_remove_is_public.sql | 162 - .../ocata_contract01_drop_is_public.py | 67 - .../versions/ocata_expand01_add_visibility.py | 151 - .../versions/pike01_drop_artifacts_tables.py | 41 - .../pike_contract01_drop_artifacts_tables.py | 41 - .../versions/pike_expand01_empty.py | 29 - glance/db/sqlalchemy/api.py | 1880 ------- glance/db/sqlalchemy/metadata.py | 506 -- glance/db/sqlalchemy/metadef_api/__init__.py | 0 glance/db/sqlalchemy/metadef_api/namespace.py | 301 - glance/db/sqlalchemy/metadef_api/object.py | 152 - glance/db/sqlalchemy/metadef_api/property.py | 164 - .../sqlalchemy/metadef_api/resource_type.py | 107 - .../metadef_api/resource_type_association.py | 211 - glance/db/sqlalchemy/metadef_api/tag.py | 201 - glance/db/sqlalchemy/metadef_api/utils.py | 23 - glance/db/sqlalchemy/migrate_repo/README | 4 - glance/db/sqlalchemy/migrate_repo/__init__.py | 0 glance/db/sqlalchemy/migrate_repo/manage.py | 21 - glance/db/sqlalchemy/migrate_repo/migrate.cfg | 20 - glance/db/sqlalchemy/migrate_repo/schema.py | 107 - .../versions/001_add_images_table.py | 55 - .../002_add_image_properties_table.py | 78 - .../versions/003_add_disk_format.py | 109 - .../versions/003_sqlite_upgrade.sql | 61 - .../migrate_repo/versions/004_add_checksum.py | 74 - .../versions/005_size_big_integer.py | 74 - .../migrate_repo/versions/006_key_to_name.py | 59 - .../versions/006_mysql_upgrade.sql | 11 - .../versions/006_sqlite_upgrade.sql | 44 - .../migrate_repo/versions/007_add_owner.py | 65 - .../versions/008_add_image_members_table.py | 79 - .../versions/009_add_mindisk_and_minram.py | 69 - .../versions/010_default_update_at.py | 43 - .../011_make_mindisk_and_minram_notnull.py | 26 - .../versions/011_sqlite_upgrade.sql | 59 - .../migrate_repo/versions/012_id_to_uuid.py | 355 -- .../versions/013_add_protected.py | 28 - .../versions/014_add_image_tags_table.py | 66 - .../versions/015_quote_swift_credentials.py | 176 - .../versions/016_add_status_image_member.py | 28 - .../017_quote_encrypted_swift_credentials.py | 237 - .../versions/018_add_image_locations_table.py | 57 - .../versions/019_migrate_image_locations.py | 44 - .../020_drop_images_table_location.py | 26 - .../versions/021_set_engine_mysql_innodb.py | 31 - .../versions/022_image_member_index.py | 61 - .../migrate_repo/versions/023_placeholder.py | 19 - .../migrate_repo/versions/024_placeholder.py | 19 - .../migrate_repo/versions/025_placeholder.py | 19 - .../026_add_location_storage_information.py | 32 - .../versions/027_checksum_index.py | 28 - .../migrate_repo/versions/028_owner_index.py | 28 - ...029_location_meta_data_pickle_to_string.py | 45 - .../versions/030_add_tasks_table.py | 58 - .../031_remove_duplicated_locations.py | 75 - .../versions/032_add_task_info_table.py | 66 - .../versions/033_add_location_status.py | 41 - .../versions/034_add_virtual_size.py | 26 - .../versions/035_add_metadef_tables.py | 208 - .../036_rename_metadef_schema_columns.py | 25 - .../037_add_changes_to_satisfy_models.py | 84 - .../versions/037_sqlite_upgrade.sql | 159 - .../versions/038_add_metadef_tags_table.py | 51 - ...9_add_changes_to_satisfy_models_metadef.py | 196 - ...40_add_changes_to_satisfy_metadefs_tags.py | 24 - .../versions/041_add_artifact_tables.py | 212 - ...to_reinstall_unique_metadef_constraints.py | 442 -- .../043_add_image_created_updated_idx.py | 29 - .../044_update_metadef_os_nova_server.py | 26 - .../versions/045_add_visibility.py | 51 - .../versions/045_sqlite_upgrade.sql | 162 - .../migrate_repo/versions/__init__.py | 0 glance/db/sqlalchemy/models.py | 262 - glance/db/sqlalchemy/models_metadef.py | 180 - glance/db/utils.py | 73 - glance/domain/__init__.py | 681 --- glance/domain/proxy.py | 580 -- glance/gateway.py | 247 - glance/hacking/__init__.py | 0 glance/hacking/checks.py | 159 - glance/i18n.py | 39 - glance/image_cache/__init__.py | 442 -- glance/image_cache/base.py | 21 - glance/image_cache/cleaner.py | 27 - glance/image_cache/client.py | 132 - glance/image_cache/drivers/__init__.py | 0 glance/image_cache/drivers/base.py | 218 - glance/image_cache/drivers/sqlite.py | 508 -- glance/image_cache/drivers/xattr.py | 501 -- glance/image_cache/prefetcher.py | 84 - glance/image_cache/pruner.py | 26 - glance/locale/de/LC_MESSAGES/glance.po | 2169 -------- glance/locale/es/LC_MESSAGES/glance.po | 2135 -------- glance/locale/fr/LC_MESSAGES/glance.po | 2170 -------- glance/locale/it/LC_MESSAGES/glance.po | 2167 -------- glance/locale/ja/LC_MESSAGES/glance.po | 2083 ------- glance/locale/ko_KR/LC_MESSAGES/glance.po | 2051 ------- glance/locale/pt_BR/LC_MESSAGES/glance.po | 2125 -------- glance/locale/ru/LC_MESSAGES/glance.po | 2091 ------- glance/locale/tr_TR/LC_MESSAGES/glance.po | 1867 ------- glance/locale/zh_CN/LC_MESSAGES/glance.po | 2026 ------- glance/locale/zh_TW/LC_MESSAGES/glance.po | 1967 ------- glance/location.py | 511 -- glance/notifier.py | 913 ---- glance/opts.py | 155 - glance/quota/__init__.py | 386 -- glance/registry/__init__.py | 52 - glance/registry/api/__init__.py | 37 - glance/registry/api/v1/__init__.py | 91 - glance/registry/api/v1/images.py | 569 -- glance/registry/api/v1/members.py | 366 -- glance/registry/api/v2/__init__.py | 35 - glance/registry/api/v2/rpc.py | 53 - glance/registry/client/__init__.py | 216 - glance/registry/client/v1/__init__.py | 0 glance/registry/client/v1/api.py | 227 - glance/registry/client/v1/client.py | 276 - glance/registry/client/v2/__init__.py | 0 glance/registry/client/v2/api.py | 110 - glance/registry/client/v2/client.py | 27 - glance/schema.py | 240 - glance/scrubber.py | 479 -- glance/tests/__init__.py | 31 - glance/tests/etc/glance-swift.conf | 34 - glance/tests/etc/policy.json | 62 - .../etc/property-protections-policies.conf | 59 - glance/tests/etc/property-protections.conf | 101 - glance/tests/etc/schema-image.json | 1 - glance/tests/functional/__init__.py | 950 ---- glance/tests/functional/db/__init__.py | 32 - glance/tests/functional/db/base.py | 2561 --------- glance/tests/functional/db/base_metadef.py | 707 --- .../functional/db/migrations/__init__.py | 0 .../functional/db/migrations/test_mitaka01.py | 48 - .../functional/db/migrations/test_mitaka02.py | 65 - .../functional/db/migrations/test_ocata01.py | 142 - .../db/migrations/test_ocata_contract01.py | 64 - .../db/migrations/test_ocata_expand01.py | 174 - .../db/migrations/test_ocata_migrate01.py | 179 - .../functional/db/migrations/test_pike01.py | 54 - .../db/migrations/test_pike_contract01.py | 50 - .../db/migrations/test_pike_expand01.py | 47 - .../db/migrations/test_pike_migrate01.py | 23 - glance/tests/functional/db/test_migrations.py | 173 - glance/tests/functional/db/test_registry.py | 112 - .../tests/functional/db/test_rpc_endpoint.py | 56 - glance/tests/functional/db/test_simple.py | 91 - glance/tests/functional/db/test_sqlalchemy.py | 172 - glance/tests/functional/store_utils.py | 91 - glance/tests/functional/test_api.py | 375 -- .../test_bin_glance_cache_manage.py | 358 -- .../tests/functional/test_cache_middleware.py | 1163 ---- .../functional/test_client_exceptions.py | 138 - .../tests/functional/test_client_redirects.py | 150 - .../tests/functional/test_cors_middleware.py | 86 - glance/tests/functional/test_glance_manage.py | 66 - .../functional/test_glance_replicator.py | 33 - .../tests/functional/test_gzip_middleware.py | 48 - .../functional/test_healthcheck_middleware.py | 55 - glance/tests/functional/test_logging.py | 101 - glance/tests/functional/test_reload.py | 262 - glance/tests/functional/test_scrubber.py | 323 -- glance/tests/functional/test_sqlite.py | 40 - glance/tests/functional/test_ssl.py | 86 - glance/tests/functional/test_wsgi.py | 55 - glance/tests/functional/v1/__init__.py | 0 glance/tests/functional/v1/test_api.py | 961 ---- .../tests/functional/v1/test_copy_to_file.py | 300 - glance/tests/functional/v1/test_misc.py | 122 - .../functional/v1/test_multiprocessing.py | 80 - glance/tests/functional/v2/__init__.py | 0 .../tests/functional/v2/registry_data_api.py | 52 - glance/tests/functional/v2/test_images.py | 3920 ------------- .../functional/v2/test_metadef_namespaces.py | 242 - .../functional/v2/test_metadef_objects.py | 273 - .../functional/v2/test_metadef_properties.py | 225 - .../v2/test_metadef_resourcetypes.py | 265 - .../tests/functional/v2/test_metadef_tags.py | 178 - glance/tests/functional/v2/test_schemas.py | 69 - glance/tests/functional/v2/test_tasks.py | 146 - glance/tests/integration/__init__.py | 0 .../integration/legacy_functional/__init__.py | 0 .../integration/legacy_functional/base.py | 222 - .../legacy_functional/test_v1_api.py | 1735 ------ glance/tests/integration/v2/__init__.py | 0 glance/tests/integration/v2/base.py | 217 - .../v2/test_property_quota_violations.py | 129 - glance/tests/integration/v2/test_tasks_api.py | 557 -- glance/tests/stubs.py | 218 - glance/tests/test_hacking.py | 140 - glance/tests/unit/__init__.py | 0 glance/tests/unit/api/__init__.py | 0 glance/tests/unit/api/middleware/__init__.py | 0 .../unit/api/middleware/test_cache_manage.py | 171 - glance/tests/unit/api/test_cmd.py | 134 - .../tests/unit/api/test_cmd_cache_manage.py | 396 -- glance/tests/unit/api/test_common.py | 145 - .../unit/api/test_property_protections.py | 300 - glance/tests/unit/async/__init__.py | 0 glance/tests/unit/async/flows/__init__.py | 0 glance/tests/unit/async/flows/test_convert.py | 198 - glance/tests/unit/async/flows/test_import.py | 439 -- .../tests/unit/async/flows/test_introspect.py | 119 - .../unit/async/flows/test_ovf_process.py | 169 - glance/tests/unit/async/test_async.py | 49 - .../unit/async/test_taskflow_executor.py | 91 - glance/tests/unit/base.py | 84 - glance/tests/unit/common/__init__.py | 0 glance/tests/unit/common/scripts/__init__.py | 0 .../common/scripts/image_import/__init__.py | 0 .../common/scripts/image_import/test_main.py | 123 - .../unit/common/scripts/test_scripts_utils.py | 129 - glance/tests/unit/common/test_client.py | 90 - glance/tests/unit/common/test_config.py | 111 - glance/tests/unit/common/test_exception.py | 54 - .../unit/common/test_location_strategy.py | 185 - .../tests/unit/common/test_property_utils.py | 491 -- glance/tests/unit/common/test_rpc.py | 358 -- glance/tests/unit/common/test_scripts.py | 42 - .../unit/common/test_swift_store_utils.py | 86 - glance/tests/unit/common/test_timeutils.py | 209 - glance/tests/unit/common/test_utils.py | 498 -- glance/tests/unit/common/test_wsgi.py | 722 --- glance/tests/unit/fake_rados.py | 132 - glance/tests/unit/image_cache/__init__.py | 0 .../unit/image_cache/drivers/__init__.py | 0 .../unit/image_cache/drivers/test_sqlite.py | 40 - glance/tests/unit/test_auth.py | 1095 ---- glance/tests/unit/test_cache_middleware.py | 865 --- glance/tests/unit/test_cached_images.py | 145 - glance/tests/unit/test_context.py | 173 - glance/tests/unit/test_context_middleware.py | 164 - .../unit/test_data_migration_framework.py | 205 - glance/tests/unit/test_db.py | 769 --- glance/tests/unit/test_db_metadef.py | 566 -- glance/tests/unit/test_domain.py | 575 -- glance/tests/unit/test_domain_proxy.py | 304 -- glance/tests/unit/test_glance_manage.py | 87 - glance/tests/unit/test_glance_replicator.py | 614 --- glance/tests/unit/test_image_cache.py | 563 -- glance/tests/unit/test_image_cache_client.py | 132 - glance/tests/unit/test_manage.py | 279 - glance/tests/unit/test_misc.py | 79 - glance/tests/unit/test_notifier.py | 749 --- glance/tests/unit/test_policy.py | 595 -- glance/tests/unit/test_quota.py | 711 --- glance/tests/unit/test_schema.py | 165 - glance/tests/unit/test_scrubber.py | 193 - glance/tests/unit/test_store_image.py | 915 ---- glance/tests/unit/test_store_location.py | 84 - glance/tests/unit/test_versions.py | 383 -- glance/tests/unit/utils.py | 312 -- glance/tests/unit/v1/__init__.py | 0 glance/tests/unit/v1/test_api.py | 4841 ----------------- glance/tests/unit/v1/test_registry_api.py | 2162 -------- glance/tests/unit/v1/test_registry_client.py | 982 ---- glance/tests/unit/v1/test_upload_utils.py | 345 -- glance/tests/unit/v2/__init__.py | 0 .../unit/v2/test_discovery_image_import.py | 31 - .../unit/v2/test_image_actions_resource.py | 191 - .../tests/unit/v2/test_image_data_resource.py | 821 --- .../unit/v2/test_image_members_resource.py | 572 -- .../tests/unit/v2/test_image_tags_resource.py | 105 - glance/tests/unit/v2/test_images_resource.py | 3920 ------------- .../tests/unit/v2/test_metadef_resources.py | 2079 ------- glance/tests/unit/v2/test_registry_api.py | 1620 ------ glance/tests/unit/v2/test_registry_client.py | 779 --- glance/tests/unit/v2/test_schemas_resource.py | 61 - glance/tests/unit/v2/test_tasks_resource.py | 848 --- glance/tests/utils.py | 695 --- glance/tests/var/ca.crt | 21 - glance/tests/var/ca.key | 28 - glance/tests/var/certificate.crt | 92 - glance/tests/var/privatekey.key | 51 - glance/tests/var/testserver-bad-ovf.ova | Bin 10240 -> 0 bytes glance/tests/var/testserver-no-disk.ova | Bin 20480 -> 0 bytes glance/tests/var/testserver-no-ovf.ova | Bin 10240 -> 0 bytes glance/tests/var/testserver-not-tar.ova | Bin 164385 -> 0 bytes glance/tests/var/testserver.ova | Bin 20480 -> 0 bytes glance/version.py | 18 - httpd/README | 2 - httpd/glance-api-uwsgi.ini | 17 - httpd/uwsgi-glance-api.conf | 2 - pylintrc | 27 - rally-jobs/README.rst | 32 - rally-jobs/extra/README.rst | 5 - rally-jobs/extra/fake.img | 0 rally-jobs/glance.yaml | 45 - rally-jobs/plugins/README.rst | 9 - rally-jobs/plugins/plugin_sample.py | 89 - releasenotes/notes/.placeholder | 0 ...-last-image-location-d5ee3e00efe14f34.yaml | 10 - ...ead-pinning-metadata-09b1866b875c4647.yaml | 4 - .../add-ploop-format-fdd583849504ab15.yaml | 11 - ...sslimits-to-qemu-img-c215f5d90f741d8a.yaml | 12 - .../add-vhdx-format-2be99354ad320cca.yaml | 11 - .../alembic-migrations-902b31edae7a5d7d.yaml | 38 - ...i-minor-version-bump-bbd69dc457fc731c.yaml | 20 - .../notes/bug-1537903-54b2822eac6cfc09.yaml | 11 - .../notes/bug-1593177-8ef35458d29ec93c.yaml | 6 - .../notes/bump-api-2-4-efa266aef0928e04.yaml | 13 - ...tore_type_preference-39081e4045894731.yaml | 9 - ...nsistent-store-names-57374b9505d530d0.yaml | 32 - ...cate-glance-api-opts-23bdbd1ad7625999.yaml | 8 - ...ow-multiple-location-9890a1e961def2f6.yaml | 32 - .../deprecate-v1-api-6c7dbefb90fd8772.yaml | 19 - .../exp-emc-mig-fix-a7e28d547ac38f9e.yaml | 7 - .../notes/glare-ectomy-72a1f80f306f2e3b.yaml | 41 - ...e-visibility-changes-fa5aa18dc67244c4.yaml | 175 - ...roved-config-options-221c58a8c37602ba.yaml | 30 - ...on-add-status-checks-b70db66100bc96b7.yaml | 38 - ...k_path_config_option-2771feaa649e4563.yaml | 5 - ...dmin-only-by-default-7def996262e18f7a.yaml | 13 - .../new_image_filters-c888361e6ecf495c.yaml | 16 - .../newton-1-release-065334d464f78fc5.yaml | 20 - .../notes/newton-bugs-06ed3727b973c271.yaml | 61 - ...g-use-stderr-changes-07f5daf3e6abdcd6.yaml | 14 - ...ike-metadefs-changes-95b54e0bf8bbefd6.yaml | 11 - ...range-header-request-83cf11eebf865fb1.yaml | 13 - .../remove-db-downgrade-0d1cc45b97605775.yaml | 11 - ...er-paste-ini-options-c620dedc8f9728ff.yaml | 13 - .../remove-s3-driver-639c60b71761eb6f.yaml | 12 - ...e-config-opts-newton-3a6575b5908c0e0f.yaml | 31 - ...ict_location_updates-05454bb765a8c92c.yaml | 22 - .../notes/scrubber-exit-e5d77f6f1a38ffb7.yaml | 12 - .../soft_delete-tasks-43ea983695faa565.yaml | 10 - ...ust-support-registry-cfd17a6a9ab21d70.yaml | 7 - ...e_locations-helptext-7fa692642b6b6d52.yaml | 11 - .../notes/use-cursive-c6b15d94845232da.yaml | 22 - ...virtuozzo-hypervisor-fada477b64ae829d.yaml | 9 - releasenotes/source/_static/.placeholder | 0 releasenotes/source/_templates/.placeholder | 0 releasenotes/source/conf.py | 278 - releasenotes/source/index.rst | 12 - releasenotes/source/liberty.rst | 6 - releasenotes/source/mitaka.rst | 6 - releasenotes/source/newton.rst | 7 - releasenotes/source/ocata.rst | 6 - releasenotes/source/unreleased.rst | 5 - requirements.txt | 58 - setup.cfg | 93 - setup.py | 29 - test-requirements.txt | 38 - tools/test-setup.sh | 57 - tox.ini | 83 - 711 files changed, 14 insertions(+), 165742 deletions(-) delete mode 100644 .coveragerc delete mode 100644 .gitignore delete mode 100644 .gitreview delete mode 100644 .mailmap delete mode 100644 .testr.conf delete mode 100644 CONTRIBUTING.rst delete mode 100644 HACKING.rst delete mode 100644 LICENSE create mode 100644 README delete mode 100644 README.rst delete mode 100644 api-ref/source/conf.py delete mode 100644 api-ref/source/index.rst delete mode 100644 api-ref/source/v1/images-images-v1.inc delete mode 100644 api-ref/source/v1/images-sharing-v1.inc delete mode 100644 api-ref/source/v1/index.rst delete mode 100644 api-ref/source/v1/parameters.yaml delete mode 100644 api-ref/source/v1/samples/image-member-add-request.json delete mode 100644 api-ref/source/v1/samples/image-members-add-request.json delete mode 100644 api-ref/source/v1/samples/image-memberships-list-response.json delete mode 100644 api-ref/source/v1/samples/image-update-response.json delete mode 100644 api-ref/source/v1/samples/images-create-reserve-response.json delete mode 100644 api-ref/source/v1/samples/images-create-with-data-response.json delete mode 100644 api-ref/source/v1/samples/images-list-details-response.json delete mode 100644 api-ref/source/v1/samples/images-list-response.json delete mode 100644 api-ref/source/v1/samples/shared-images-list-response.json delete mode 100644 api-ref/source/v2/images-data.inc delete mode 100644 api-ref/source/v2/images-images-v2.inc delete mode 100644 api-ref/source/v2/images-parameters-descriptions.inc delete mode 100644 api-ref/source/v2/images-parameters.yaml delete mode 100644 api-ref/source/v2/images-schemas.inc delete mode 100644 api-ref/source/v2/images-sharing-v2.inc delete mode 100644 api-ref/source/v2/images-tags.inc delete mode 100644 api-ref/source/v2/index.rst delete mode 100644 api-ref/source/v2/metadefs-index.rst delete mode 100644 api-ref/source/v2/metadefs-namespaces-objects.inc delete mode 100644 api-ref/source/v2/metadefs-namespaces-properties.inc delete mode 100644 api-ref/source/v2/metadefs-namespaces-tags.inc delete mode 100644 api-ref/source/v2/metadefs-namespaces.inc delete mode 100644 api-ref/source/v2/metadefs-parameters.yaml delete mode 100644 api-ref/source/v2/metadefs-resourcetypes.inc delete mode 100644 api-ref/source/v2/metadefs-schemas.inc delete mode 100644 api-ref/source/v2/samples/image-create-request.json delete mode 100644 api-ref/source/v2/samples/image-create-response.json delete mode 100644 api-ref/source/v2/samples/image-details-deactivate-response.json delete mode 100644 api-ref/source/v2/samples/image-member-create-request.json delete mode 100644 api-ref/source/v2/samples/image-member-create-response.json delete mode 100644 api-ref/source/v2/samples/image-member-details-response.json delete mode 100644 api-ref/source/v2/samples/image-member-update-request.json delete mode 100644 api-ref/source/v2/samples/image-member-update-response.json delete mode 100644 api-ref/source/v2/samples/image-members-list-response.json delete mode 100644 api-ref/source/v2/samples/image-show-response.json delete mode 100644 api-ref/source/v2/samples/image-update-request.json delete mode 100644 api-ref/source/v2/samples/image-update-response.json delete mode 100644 api-ref/source/v2/samples/images-list-response.json delete mode 100644 api-ref/source/v2/samples/metadef-namespace-create-request-simple.json delete mode 100644 api-ref/source/v2/samples/metadef-namespace-create-request.json delete mode 100644 api-ref/source/v2/samples/metadef-namespace-create-response-simple.json delete mode 100644 api-ref/source/v2/samples/metadef-namespace-create-response.json delete mode 100644 api-ref/source/v2/samples/metadef-namespace-details-response.json delete mode 100644 api-ref/source/v2/samples/metadef-namespace-details-with-rt-response.json delete mode 100644 api-ref/source/v2/samples/metadef-namespace-update-request.json delete mode 100644 api-ref/source/v2/samples/metadef-namespace-update-response.json delete mode 100644 api-ref/source/v2/samples/metadef-namespaces-list-response.json delete mode 100644 api-ref/source/v2/samples/metadef-object-create-request.json delete mode 100644 api-ref/source/v2/samples/metadef-object-create-response.json delete mode 100644 api-ref/source/v2/samples/metadef-object-details-response.json delete mode 100644 api-ref/source/v2/samples/metadef-object-update-request.json delete mode 100644 api-ref/source/v2/samples/metadef-object-update-response.json delete mode 100644 api-ref/source/v2/samples/metadef-objects-list-response.json delete mode 100644 api-ref/source/v2/samples/metadef-properties-list-response.json delete mode 100644 api-ref/source/v2/samples/metadef-property-create-request.json delete mode 100644 api-ref/source/v2/samples/metadef-property-create-response.json delete mode 100644 api-ref/source/v2/samples/metadef-property-details-response.json delete mode 100644 api-ref/source/v2/samples/metadef-property-update-request.json delete mode 100644 api-ref/source/v2/samples/metadef-property-update-response.json delete mode 100644 api-ref/source/v2/samples/metadef-resource-type-assoc-create-response.json delete mode 100644 api-ref/source/v2/samples/metadef-resource-type-create-request.json delete mode 100644 api-ref/source/v2/samples/metadef-resource-types-list-response.json delete mode 100644 api-ref/source/v2/samples/metadef-tag-create-response.json delete mode 100644 api-ref/source/v2/samples/metadef-tag-details-response.json delete mode 100644 api-ref/source/v2/samples/metadef-tag-update-request.json delete mode 100644 api-ref/source/v2/samples/metadef-tag-update-response.json delete mode 100644 api-ref/source/v2/samples/metadef-tags-create-request.json delete mode 100644 api-ref/source/v2/samples/metadef-tags-create-response.json delete mode 100644 api-ref/source/v2/samples/metadef-tags-list-response.json delete mode 100644 api-ref/source/v2/samples/schemas-image-member-show-response.json delete mode 100644 api-ref/source/v2/samples/schemas-image-members-list-response.json delete mode 100644 api-ref/source/v2/samples/schemas-image-show-response.json delete mode 100644 api-ref/source/v2/samples/schemas-images-list-response.json delete mode 100644 api-ref/source/v2/samples/schemas-metadef-namespace-show-response.json delete mode 100644 api-ref/source/v2/samples/schemas-metadef-namespaces-list-response.json delete mode 100644 api-ref/source/v2/samples/schemas-metadef-object-show-response.json delete mode 100644 api-ref/source/v2/samples/schemas-metadef-objects-list-response.json delete mode 100644 api-ref/source/v2/samples/schemas-metadef-properties-list-response.json delete mode 100644 api-ref/source/v2/samples/schemas-metadef-property-show-response.json delete mode 100644 api-ref/source/v2/samples/schemas-metadef-resource-type-association-show-response.json delete mode 100644 api-ref/source/v2/samples/schemas-metadef-resource-type-associations-list-response.json delete mode 100644 api-ref/source/v2/samples/schemas-metadef-tag-show-response.json delete mode 100644 api-ref/source/v2/samples/schemas-metadef-tags-list-response.json delete mode 100644 api-ref/source/v2/samples/schemas-task-show-response.json delete mode 100644 api-ref/source/v2/samples/schemas-tasks-list-response.json delete mode 100644 api-ref/source/v2/samples/task-create-request.json delete mode 100644 api-ref/source/v2/samples/task-create-response.json delete mode 100644 api-ref/source/v2/samples/task-show-failure-response.json delete mode 100644 api-ref/source/v2/samples/task-show-processing-response.json delete mode 100644 api-ref/source/v2/samples/task-show-success-response.json delete mode 100644 api-ref/source/v2/samples/tasks-list-response.json delete mode 100644 api-ref/source/v2/tasks-parameters.yaml delete mode 100644 api-ref/source/v2/tasks-schemas.inc delete mode 100644 api-ref/source/v2/tasks.inc delete mode 100644 api-ref/source/versions/index.rst delete mode 100644 api-ref/source/versions/samples/image-versions-response.json delete mode 100644 api-ref/source/versions/versions.inc delete mode 100644 babel.cfg delete mode 100644 bandit.yaml delete mode 100644 doc/source/_static/.placeholder delete mode 100644 doc/source/admin/apache-httpd.rst delete mode 100644 doc/source/admin/authentication.rst delete mode 100644 doc/source/admin/cache.rst delete mode 100644 doc/source/admin/controllingservers.rst delete mode 100644 doc/source/admin/db-sqlalchemy-migrate.rst delete mode 100644 doc/source/admin/db.rst delete mode 100644 doc/source/admin/flows.rst delete mode 100644 doc/source/admin/index.rst delete mode 100644 doc/source/admin/manage-images.rst delete mode 100644 doc/source/admin/notifications.rst delete mode 100644 doc/source/admin/policies.rst delete mode 100644 doc/source/admin/property-protections.rst delete mode 100644 doc/source/admin/requirements.rst delete mode 100644 doc/source/admin/rollingupgrades.rst delete mode 100644 doc/source/admin/tasks.rst delete mode 100644 doc/source/admin/troubleshooting.rst delete mode 100644 doc/source/cli/footer.txt delete mode 100644 doc/source/cli/general_options.txt delete mode 100644 doc/source/cli/glanceapi.rst delete mode 100644 doc/source/cli/glancecachecleaner.rst delete mode 100644 doc/source/cli/glancecachemanage.rst delete mode 100644 doc/source/cli/glancecacheprefetcher.rst delete mode 100644 doc/source/cli/glancecachepruner.rst delete mode 100644 doc/source/cli/glancecontrol.rst delete mode 100644 doc/source/cli/glancemanage.rst delete mode 100644 doc/source/cli/glanceregistry.rst delete mode 100644 doc/source/cli/glancereplicator.rst delete mode 100644 doc/source/cli/glancescrubber.rst delete mode 100644 doc/source/cli/header.txt delete mode 100644 doc/source/cli/index.rst delete mode 100644 doc/source/cli/openstack_options.txt delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/configuration/configuring.rst delete mode 100644 doc/source/configuration/glance_api.rst delete mode 100644 doc/source/configuration/glance_cache.rst delete mode 100644 doc/source/configuration/glance_manage.rst delete mode 100644 doc/source/configuration/glance_registry.rst delete mode 100644 doc/source/configuration/glance_scrubber.rst delete mode 100644 doc/source/configuration/index.rst delete mode 100644 doc/source/configuration/sample-configuration.rst delete mode 100644 doc/source/contributor/architecture.rst delete mode 100644 doc/source/contributor/blueprints.rst delete mode 100644 doc/source/contributor/database_architecture.rst delete mode 100644 doc/source/contributor/database_migrations.rst delete mode 100644 doc/source/contributor/documentation.rst delete mode 100644 doc/source/contributor/domain_implementation.rst delete mode 100644 doc/source/contributor/domain_model.rst delete mode 100644 doc/source/contributor/index.rst delete mode 100644 doc/source/contributor/minor-code-changes.rst delete mode 100644 doc/source/contributor/refreshing-configs.rst delete mode 100644 doc/source/contributor/release-cpl.rst delete mode 100644 doc/source/deprecation-note.inc delete mode 100644 doc/source/glossary.rst delete mode 100644 doc/source/images/architecture.png delete mode 100644 doc/source/images/glance_db.png delete mode 100644 doc/source/images/glance_layers.png delete mode 100644 doc/source/images/image_status_transition.png delete mode 100644 doc/source/images/instance-life-1.png delete mode 100644 doc/source/images/instance-life-2.png delete mode 100644 doc/source/images/instance-life-3.png delete mode 100644 doc/source/images_src/architecture.graphml delete mode 100644 doc/source/images_src/glance_db.graphml delete mode 100644 doc/source/images_src/glance_layers.graphml delete mode 100644 doc/source/images_src/image_status_transition.dot delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/install/get-started.rst delete mode 100644 doc/source/install/index.rst delete mode 100644 doc/source/install/install-debian.rst delete mode 100644 doc/source/install/install-obs.rst delete mode 100644 doc/source/install/install-rdo.rst delete mode 100644 doc/source/install/install-ubuntu.rst delete mode 100644 doc/source/install/install.rst delete mode 100644 doc/source/install/note_configuration_vary_by_distribution.txt delete mode 100644 doc/source/install/verify.rst delete mode 100644 doc/source/user/common-image-properties.rst delete mode 100644 doc/source/user/formats.rst delete mode 100644 doc/source/user/glanceapi.rst delete mode 100644 doc/source/user/glanceclient.rst delete mode 100644 doc/source/user/glancemetadefcatalogapi.rst delete mode 100644 doc/source/user/identifiers.rst delete mode 100644 doc/source/user/index.rst delete mode 100644 doc/source/user/metadefs-concepts.rst delete mode 100644 doc/source/user/signature.rst delete mode 100644 doc/source/user/statuses.rst delete mode 100644 etc/glance-api-paste.ini delete mode 100644 etc/glance-api.conf delete mode 100644 etc/glance-cache.conf delete mode 100644 etc/glance-manage.conf delete mode 100644 etc/glance-registry-paste.ini delete mode 100644 etc/glance-registry.conf delete mode 100644 etc/glance-scrubber.conf delete mode 100644 etc/glance-swift.conf.sample delete mode 100644 etc/metadefs/README delete mode 100644 etc/metadefs/cim-processor-allocation-setting-data.json delete mode 100644 etc/metadefs/cim-resource-allocation-setting-data.json delete mode 100644 etc/metadefs/cim-storage-allocation-setting-data.json delete mode 100644 etc/metadefs/cim-virtual-system-setting-data.json delete mode 100644 etc/metadefs/compute-aggr-disk-filter.json delete mode 100644 etc/metadefs/compute-aggr-iops-filter.json delete mode 100644 etc/metadefs/compute-aggr-num-instances.json delete mode 100644 etc/metadefs/compute-cpu-pinning.json delete mode 100644 etc/metadefs/compute-guest-memory-backing.json delete mode 100644 etc/metadefs/compute-guest-shutdown.json delete mode 100644 etc/metadefs/compute-host-capabilities.json delete mode 100644 etc/metadefs/compute-hypervisor.json delete mode 100644 etc/metadefs/compute-instance-data.json delete mode 100644 etc/metadefs/compute-libvirt-image.json delete mode 100644 etc/metadefs/compute-libvirt.json delete mode 100644 etc/metadefs/compute-quota.json delete mode 100644 etc/metadefs/compute-randomgen.json delete mode 100644 etc/metadefs/compute-trust.json delete mode 100644 etc/metadefs/compute-vcputopology.json delete mode 100644 etc/metadefs/compute-vmware-flavor.json delete mode 100644 etc/metadefs/compute-vmware-quota-flavor.json delete mode 100644 etc/metadefs/compute-vmware.json delete mode 100644 etc/metadefs/compute-watchdog.json delete mode 100644 etc/metadefs/compute-xenapi.json delete mode 100644 etc/metadefs/glance-common-image-props.json delete mode 100644 etc/metadefs/image-signature-verification.json delete mode 100644 etc/metadefs/operating-system.json delete mode 100644 etc/metadefs/software-databases.json delete mode 100644 etc/metadefs/software-runtimes.json delete mode 100644 etc/metadefs/software-webservers.json delete mode 100644 etc/metadefs/storage-volume-type.json delete mode 100644 etc/oslo-config-generator/glance-api.conf delete mode 100644 etc/oslo-config-generator/glance-cache.conf delete mode 100644 etc/oslo-config-generator/glance-manage.conf delete mode 100644 etc/oslo-config-generator/glance-registry.conf delete mode 100644 etc/oslo-config-generator/glance-scrubber.conf delete mode 100644 etc/ovf-metadata.json.sample delete mode 100644 etc/policy.json delete mode 100644 etc/property-protections-policies.conf.sample delete mode 100644 etc/property-protections-roles.conf.sample delete mode 100644 etc/schema-image.json delete mode 100644 glance/__init__.py delete mode 100644 glance/api/__init__.py delete mode 100644 glance/api/authorization.py delete mode 100644 glance/api/cached_images.py delete mode 100644 glance/api/common.py delete mode 100644 glance/api/middleware/__init__.py delete mode 100644 glance/api/middleware/cache.py delete mode 100644 glance/api/middleware/cache_manage.py delete mode 100644 glance/api/middleware/context.py delete mode 100644 glance/api/middleware/gzip.py delete mode 100644 glance/api/middleware/version_negotiation.py delete mode 100644 glance/api/policy.py delete mode 100644 glance/api/property_protections.py delete mode 100644 glance/api/v1/__init__.py delete mode 100644 glance/api/v1/controller.py delete mode 100644 glance/api/v1/filters.py delete mode 100644 glance/api/v1/images.py delete mode 100644 glance/api/v1/members.py delete mode 100644 glance/api/v1/router.py delete mode 100644 glance/api/v1/upload_utils.py delete mode 100644 glance/api/v2/__init__.py delete mode 100644 glance/api/v2/discovery.py delete mode 100644 glance/api/v2/image_actions.py delete mode 100644 glance/api/v2/image_data.py delete mode 100644 glance/api/v2/image_members.py delete mode 100644 glance/api/v2/image_tags.py delete mode 100644 glance/api/v2/images.py delete mode 100644 glance/api/v2/metadef_namespaces.py delete mode 100644 glance/api/v2/metadef_objects.py delete mode 100644 glance/api/v2/metadef_properties.py delete mode 100644 glance/api/v2/metadef_resource_types.py delete mode 100644 glance/api/v2/metadef_tags.py delete mode 100644 glance/api/v2/model/__init__.py delete mode 100644 glance/api/v2/model/metadef_namespace.py delete mode 100644 glance/api/v2/model/metadef_object.py delete mode 100644 glance/api/v2/model/metadef_property_item_type.py delete mode 100644 glance/api/v2/model/metadef_property_type.py delete mode 100644 glance/api/v2/model/metadef_resource_type.py delete mode 100644 glance/api/v2/model/metadef_tag.py delete mode 100644 glance/api/v2/router.py delete mode 100644 glance/api/v2/schemas.py delete mode 100644 glance/api/v2/tasks.py delete mode 100644 glance/api/versions.py delete mode 100644 glance/async/__init__.py delete mode 100644 glance/async/flows/__init__.py delete mode 100644 glance/async/flows/api_image_import.py delete mode 100644 glance/async/flows/base_import.py delete mode 100644 glance/async/flows/convert.py delete mode 100644 glance/async/flows/introspect.py delete mode 100644 glance/async/flows/ovf_process.py delete mode 100644 glance/async/taskflow_executor.py delete mode 100644 glance/async/utils.py delete mode 100644 glance/cmd/__init__.py delete mode 100644 glance/cmd/api.py delete mode 100644 glance/cmd/cache_cleaner.py delete mode 100644 glance/cmd/cache_manage.py delete mode 100644 glance/cmd/cache_prefetcher.py delete mode 100644 glance/cmd/cache_pruner.py delete mode 100644 glance/cmd/control.py delete mode 100644 glance/cmd/manage.py delete mode 100644 glance/cmd/registry.py delete mode 100644 glance/cmd/replicator.py delete mode 100644 glance/cmd/scrubber.py delete mode 100644 glance/common/__init__.py delete mode 100644 glance/common/auth.py delete mode 100644 glance/common/client.py delete mode 100644 glance/common/config.py delete mode 100644 glance/common/crypt.py delete mode 100644 glance/common/exception.py delete mode 100644 glance/common/location_strategy/__init__.py delete mode 100644 glance/common/location_strategy/location_order.py delete mode 100644 glance/common/location_strategy/store_type.py delete mode 100644 glance/common/property_utils.py delete mode 100644 glance/common/rpc.py delete mode 100644 glance/common/scripts/__init__.py delete mode 100644 glance/common/scripts/api_image_import/__init__.py delete mode 100644 glance/common/scripts/api_image_import/main.py delete mode 100644 glance/common/scripts/image_import/__init__.py delete mode 100644 glance/common/scripts/image_import/main.py delete mode 100644 glance/common/scripts/utils.py delete mode 100644 glance/common/store_utils.py delete mode 100644 glance/common/swift_store_utils.py delete mode 100644 glance/common/timeutils.py delete mode 100644 glance/common/trust_auth.py delete mode 100644 glance/common/utils.py delete mode 100644 glance/common/wsgi.py delete mode 100644 glance/common/wsgi_app.py delete mode 100644 glance/common/wsme_utils.py delete mode 100644 glance/context.py delete mode 100644 glance/db/__init__.py delete mode 100644 glance/db/metadata.py delete mode 100644 glance/db/migration.py delete mode 100644 glance/db/registry/__init__.py delete mode 100644 glance/db/registry/api.py delete mode 100644 glance/db/simple/__init__.py delete mode 100644 glance/db/simple/api.py delete mode 100644 glance/db/sqlalchemy/__init__.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/README delete mode 100644 glance/db/sqlalchemy/alembic_migrations/__init__.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/add_artifacts_tables.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/add_images_tables.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/add_metadefs_tables.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/add_tasks_tables.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/alembic.ini delete mode 100644 glance/db/sqlalchemy/alembic_migrations/data_migrations/__init__.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/data_migrations/ocata_migrate01_community_images.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/data_migrations/pike_migrate01_empty.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/env.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/migrate.cfg delete mode 100644 glance/db/sqlalchemy/alembic_migrations/script.py.mako delete mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/__init__.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/liberty_initial.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/mitaka01_add_image_created_updated_idx.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/mitaka02_update_metadef_os_nova_server.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/ocata01_add_visibility_remove_is_public.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/ocata01_add_visibility_remove_is_public.sql delete mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/ocata_contract01_drop_is_public.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/ocata_expand01_add_visibility.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/pike01_drop_artifacts_tables.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/pike_contract01_drop_artifacts_tables.py delete mode 100644 glance/db/sqlalchemy/alembic_migrations/versions/pike_expand01_empty.py delete mode 100644 glance/db/sqlalchemy/api.py delete mode 100644 glance/db/sqlalchemy/metadata.py delete mode 100644 glance/db/sqlalchemy/metadef_api/__init__.py delete mode 100644 glance/db/sqlalchemy/metadef_api/namespace.py delete mode 100644 glance/db/sqlalchemy/metadef_api/object.py delete mode 100644 glance/db/sqlalchemy/metadef_api/property.py delete mode 100644 glance/db/sqlalchemy/metadef_api/resource_type.py delete mode 100644 glance/db/sqlalchemy/metadef_api/resource_type_association.py delete mode 100644 glance/db/sqlalchemy/metadef_api/tag.py delete mode 100644 glance/db/sqlalchemy/metadef_api/utils.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/README delete mode 100644 glance/db/sqlalchemy/migrate_repo/__init__.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/manage.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/migrate.cfg delete mode 100644 glance/db/sqlalchemy/migrate_repo/schema.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/001_add_images_table.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/002_add_image_properties_table.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/003_add_disk_format.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/003_sqlite_upgrade.sql delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/004_add_checksum.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/005_size_big_integer.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/006_key_to_name.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/006_mysql_upgrade.sql delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/006_sqlite_upgrade.sql delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/007_add_owner.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/008_add_image_members_table.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/009_add_mindisk_and_minram.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/010_default_update_at.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/011_make_mindisk_and_minram_notnull.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/011_sqlite_upgrade.sql delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/012_id_to_uuid.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/013_add_protected.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/014_add_image_tags_table.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/016_add_status_image_member.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/018_add_image_locations_table.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/019_migrate_image_locations.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/020_drop_images_table_location.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/021_set_engine_mysql_innodb.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/022_image_member_index.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/023_placeholder.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/024_placeholder.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/025_placeholder.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/026_add_location_storage_information.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/027_checksum_index.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/028_owner_index.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/029_location_meta_data_pickle_to_string.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/030_add_tasks_table.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/031_remove_duplicated_locations.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/032_add_task_info_table.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/033_add_location_status.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/034_add_virtual_size.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/035_add_metadef_tables.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/036_rename_metadef_schema_columns.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/037_add_changes_to_satisfy_models.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/037_sqlite_upgrade.sql delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/038_add_metadef_tags_table.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/039_add_changes_to_satisfy_models_metadef.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/040_add_changes_to_satisfy_metadefs_tags.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/041_add_artifact_tables.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/042_add_changes_to_reinstall_unique_metadef_constraints.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/043_add_image_created_updated_idx.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/044_update_metadef_os_nova_server.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/045_add_visibility.py delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/045_sqlite_upgrade.sql delete mode 100644 glance/db/sqlalchemy/migrate_repo/versions/__init__.py delete mode 100644 glance/db/sqlalchemy/models.py delete mode 100644 glance/db/sqlalchemy/models_metadef.py delete mode 100644 glance/db/utils.py delete mode 100644 glance/domain/__init__.py delete mode 100644 glance/domain/proxy.py delete mode 100644 glance/gateway.py delete mode 100644 glance/hacking/__init__.py delete mode 100644 glance/hacking/checks.py delete mode 100644 glance/i18n.py delete mode 100644 glance/image_cache/__init__.py delete mode 100644 glance/image_cache/base.py delete mode 100644 glance/image_cache/cleaner.py delete mode 100644 glance/image_cache/client.py delete mode 100644 glance/image_cache/drivers/__init__.py delete mode 100644 glance/image_cache/drivers/base.py delete mode 100644 glance/image_cache/drivers/sqlite.py delete mode 100644 glance/image_cache/drivers/xattr.py delete mode 100644 glance/image_cache/prefetcher.py delete mode 100644 glance/image_cache/pruner.py delete mode 100644 glance/locale/de/LC_MESSAGES/glance.po delete mode 100644 glance/locale/es/LC_MESSAGES/glance.po delete mode 100644 glance/locale/fr/LC_MESSAGES/glance.po delete mode 100644 glance/locale/it/LC_MESSAGES/glance.po delete mode 100644 glance/locale/ja/LC_MESSAGES/glance.po delete mode 100644 glance/locale/ko_KR/LC_MESSAGES/glance.po delete mode 100644 glance/locale/pt_BR/LC_MESSAGES/glance.po delete mode 100644 glance/locale/ru/LC_MESSAGES/glance.po delete mode 100644 glance/locale/tr_TR/LC_MESSAGES/glance.po delete mode 100644 glance/locale/zh_CN/LC_MESSAGES/glance.po delete mode 100644 glance/locale/zh_TW/LC_MESSAGES/glance.po delete mode 100644 glance/location.py delete mode 100644 glance/notifier.py delete mode 100644 glance/opts.py delete mode 100644 glance/quota/__init__.py delete mode 100644 glance/registry/__init__.py delete mode 100644 glance/registry/api/__init__.py delete mode 100644 glance/registry/api/v1/__init__.py delete mode 100644 glance/registry/api/v1/images.py delete mode 100644 glance/registry/api/v1/members.py delete mode 100644 glance/registry/api/v2/__init__.py delete mode 100644 glance/registry/api/v2/rpc.py delete mode 100644 glance/registry/client/__init__.py delete mode 100644 glance/registry/client/v1/__init__.py delete mode 100644 glance/registry/client/v1/api.py delete mode 100644 glance/registry/client/v1/client.py delete mode 100644 glance/registry/client/v2/__init__.py delete mode 100644 glance/registry/client/v2/api.py delete mode 100644 glance/registry/client/v2/client.py delete mode 100644 glance/schema.py delete mode 100644 glance/scrubber.py delete mode 100644 glance/tests/__init__.py delete mode 100644 glance/tests/etc/glance-swift.conf delete mode 100644 glance/tests/etc/policy.json delete mode 100644 glance/tests/etc/property-protections-policies.conf delete mode 100644 glance/tests/etc/property-protections.conf delete mode 100644 glance/tests/etc/schema-image.json delete mode 100644 glance/tests/functional/__init__.py delete mode 100644 glance/tests/functional/db/__init__.py delete mode 100644 glance/tests/functional/db/base.py delete mode 100644 glance/tests/functional/db/base_metadef.py delete mode 100644 glance/tests/functional/db/migrations/__init__.py delete mode 100644 glance/tests/functional/db/migrations/test_mitaka01.py delete mode 100644 glance/tests/functional/db/migrations/test_mitaka02.py delete mode 100644 glance/tests/functional/db/migrations/test_ocata01.py delete mode 100644 glance/tests/functional/db/migrations/test_ocata_contract01.py delete mode 100644 glance/tests/functional/db/migrations/test_ocata_expand01.py delete mode 100644 glance/tests/functional/db/migrations/test_ocata_migrate01.py delete mode 100644 glance/tests/functional/db/migrations/test_pike01.py delete mode 100644 glance/tests/functional/db/migrations/test_pike_contract01.py delete mode 100644 glance/tests/functional/db/migrations/test_pike_expand01.py delete mode 100644 glance/tests/functional/db/migrations/test_pike_migrate01.py delete mode 100644 glance/tests/functional/db/test_migrations.py delete mode 100644 glance/tests/functional/db/test_registry.py delete mode 100644 glance/tests/functional/db/test_rpc_endpoint.py delete mode 100644 glance/tests/functional/db/test_simple.py delete mode 100644 glance/tests/functional/db/test_sqlalchemy.py delete mode 100644 glance/tests/functional/store_utils.py delete mode 100644 glance/tests/functional/test_api.py delete mode 100644 glance/tests/functional/test_bin_glance_cache_manage.py delete mode 100644 glance/tests/functional/test_cache_middleware.py delete mode 100644 glance/tests/functional/test_client_exceptions.py delete mode 100644 glance/tests/functional/test_client_redirects.py delete mode 100644 glance/tests/functional/test_cors_middleware.py delete mode 100644 glance/tests/functional/test_glance_manage.py delete mode 100644 glance/tests/functional/test_glance_replicator.py delete mode 100644 glance/tests/functional/test_gzip_middleware.py delete mode 100644 glance/tests/functional/test_healthcheck_middleware.py delete mode 100644 glance/tests/functional/test_logging.py delete mode 100644 glance/tests/functional/test_reload.py delete mode 100644 glance/tests/functional/test_scrubber.py delete mode 100644 glance/tests/functional/test_sqlite.py delete mode 100644 glance/tests/functional/test_ssl.py delete mode 100644 glance/tests/functional/test_wsgi.py delete mode 100644 glance/tests/functional/v1/__init__.py delete mode 100644 glance/tests/functional/v1/test_api.py delete mode 100644 glance/tests/functional/v1/test_copy_to_file.py delete mode 100644 glance/tests/functional/v1/test_misc.py delete mode 100644 glance/tests/functional/v1/test_multiprocessing.py delete mode 100644 glance/tests/functional/v2/__init__.py delete mode 100644 glance/tests/functional/v2/registry_data_api.py delete mode 100644 glance/tests/functional/v2/test_images.py delete mode 100644 glance/tests/functional/v2/test_metadef_namespaces.py delete mode 100644 glance/tests/functional/v2/test_metadef_objects.py delete mode 100644 glance/tests/functional/v2/test_metadef_properties.py delete mode 100644 glance/tests/functional/v2/test_metadef_resourcetypes.py delete mode 100644 glance/tests/functional/v2/test_metadef_tags.py delete mode 100644 glance/tests/functional/v2/test_schemas.py delete mode 100644 glance/tests/functional/v2/test_tasks.py delete mode 100644 glance/tests/integration/__init__.py delete mode 100644 glance/tests/integration/legacy_functional/__init__.py delete mode 100644 glance/tests/integration/legacy_functional/base.py delete mode 100644 glance/tests/integration/legacy_functional/test_v1_api.py delete mode 100644 glance/tests/integration/v2/__init__.py delete mode 100644 glance/tests/integration/v2/base.py delete mode 100644 glance/tests/integration/v2/test_property_quota_violations.py delete mode 100644 glance/tests/integration/v2/test_tasks_api.py delete mode 100644 glance/tests/stubs.py delete mode 100644 glance/tests/test_hacking.py delete mode 100644 glance/tests/unit/__init__.py delete mode 100644 glance/tests/unit/api/__init__.py delete mode 100644 glance/tests/unit/api/middleware/__init__.py delete mode 100644 glance/tests/unit/api/middleware/test_cache_manage.py delete mode 100644 glance/tests/unit/api/test_cmd.py delete mode 100644 glance/tests/unit/api/test_cmd_cache_manage.py delete mode 100644 glance/tests/unit/api/test_common.py delete mode 100644 glance/tests/unit/api/test_property_protections.py delete mode 100644 glance/tests/unit/async/__init__.py delete mode 100644 glance/tests/unit/async/flows/__init__.py delete mode 100644 glance/tests/unit/async/flows/test_convert.py delete mode 100644 glance/tests/unit/async/flows/test_import.py delete mode 100644 glance/tests/unit/async/flows/test_introspect.py delete mode 100644 glance/tests/unit/async/flows/test_ovf_process.py delete mode 100644 glance/tests/unit/async/test_async.py delete mode 100644 glance/tests/unit/async/test_taskflow_executor.py delete mode 100644 glance/tests/unit/base.py delete mode 100644 glance/tests/unit/common/__init__.py delete mode 100644 glance/tests/unit/common/scripts/__init__.py delete mode 100644 glance/tests/unit/common/scripts/image_import/__init__.py delete mode 100644 glance/tests/unit/common/scripts/image_import/test_main.py delete mode 100644 glance/tests/unit/common/scripts/test_scripts_utils.py delete mode 100644 glance/tests/unit/common/test_client.py delete mode 100644 glance/tests/unit/common/test_config.py delete mode 100644 glance/tests/unit/common/test_exception.py delete mode 100644 glance/tests/unit/common/test_location_strategy.py delete mode 100644 glance/tests/unit/common/test_property_utils.py delete mode 100644 glance/tests/unit/common/test_rpc.py delete mode 100644 glance/tests/unit/common/test_scripts.py delete mode 100644 glance/tests/unit/common/test_swift_store_utils.py delete mode 100644 glance/tests/unit/common/test_timeutils.py delete mode 100644 glance/tests/unit/common/test_utils.py delete mode 100644 glance/tests/unit/common/test_wsgi.py delete mode 100644 glance/tests/unit/fake_rados.py delete mode 100644 glance/tests/unit/image_cache/__init__.py delete mode 100644 glance/tests/unit/image_cache/drivers/__init__.py delete mode 100644 glance/tests/unit/image_cache/drivers/test_sqlite.py delete mode 100644 glance/tests/unit/test_auth.py delete mode 100644 glance/tests/unit/test_cache_middleware.py delete mode 100644 glance/tests/unit/test_cached_images.py delete mode 100644 glance/tests/unit/test_context.py delete mode 100644 glance/tests/unit/test_context_middleware.py delete mode 100644 glance/tests/unit/test_data_migration_framework.py delete mode 100644 glance/tests/unit/test_db.py delete mode 100644 glance/tests/unit/test_db_metadef.py delete mode 100644 glance/tests/unit/test_domain.py delete mode 100644 glance/tests/unit/test_domain_proxy.py delete mode 100644 glance/tests/unit/test_glance_manage.py delete mode 100644 glance/tests/unit/test_glance_replicator.py delete mode 100644 glance/tests/unit/test_image_cache.py delete mode 100644 glance/tests/unit/test_image_cache_client.py delete mode 100644 glance/tests/unit/test_manage.py delete mode 100644 glance/tests/unit/test_misc.py delete mode 100644 glance/tests/unit/test_notifier.py delete mode 100644 glance/tests/unit/test_policy.py delete mode 100644 glance/tests/unit/test_quota.py delete mode 100644 glance/tests/unit/test_schema.py delete mode 100644 glance/tests/unit/test_scrubber.py delete mode 100644 glance/tests/unit/test_store_image.py delete mode 100644 glance/tests/unit/test_store_location.py delete mode 100644 glance/tests/unit/test_versions.py delete mode 100644 glance/tests/unit/utils.py delete mode 100644 glance/tests/unit/v1/__init__.py delete mode 100644 glance/tests/unit/v1/test_api.py delete mode 100644 glance/tests/unit/v1/test_registry_api.py delete mode 100644 glance/tests/unit/v1/test_registry_client.py delete mode 100644 glance/tests/unit/v1/test_upload_utils.py delete mode 100644 glance/tests/unit/v2/__init__.py delete mode 100644 glance/tests/unit/v2/test_discovery_image_import.py delete mode 100644 glance/tests/unit/v2/test_image_actions_resource.py delete mode 100644 glance/tests/unit/v2/test_image_data_resource.py delete mode 100644 glance/tests/unit/v2/test_image_members_resource.py delete mode 100644 glance/tests/unit/v2/test_image_tags_resource.py delete mode 100644 glance/tests/unit/v2/test_images_resource.py delete mode 100644 glance/tests/unit/v2/test_metadef_resources.py delete mode 100644 glance/tests/unit/v2/test_registry_api.py delete mode 100644 glance/tests/unit/v2/test_registry_client.py delete mode 100644 glance/tests/unit/v2/test_schemas_resource.py delete mode 100644 glance/tests/unit/v2/test_tasks_resource.py delete mode 100644 glance/tests/utils.py delete mode 100644 glance/tests/var/ca.crt delete mode 100644 glance/tests/var/ca.key delete mode 100644 glance/tests/var/certificate.crt delete mode 100644 glance/tests/var/privatekey.key delete mode 100644 glance/tests/var/testserver-bad-ovf.ova delete mode 100644 glance/tests/var/testserver-no-disk.ova delete mode 100644 glance/tests/var/testserver-no-ovf.ova delete mode 100644 glance/tests/var/testserver-not-tar.ova delete mode 100644 glance/tests/var/testserver.ova delete mode 100644 glance/version.py delete mode 100644 httpd/README delete mode 100644 httpd/glance-api-uwsgi.ini delete mode 100644 httpd/uwsgi-glance-api.conf delete mode 100644 pylintrc delete mode 100644 rally-jobs/README.rst delete mode 100644 rally-jobs/extra/README.rst delete mode 100644 rally-jobs/extra/fake.img delete mode 100644 rally-jobs/glance.yaml delete mode 100644 rally-jobs/plugins/README.rst delete mode 100644 rally-jobs/plugins/plugin_sample.py delete mode 100644 releasenotes/notes/.placeholder delete mode 100644 releasenotes/notes/Prevent-removing-last-image-location-d5ee3e00efe14f34.yaml delete mode 100644 releasenotes/notes/add-cpu-thread-pinning-metadata-09b1866b875c4647.yaml delete mode 100644 releasenotes/notes/add-ploop-format-fdd583849504ab15.yaml delete mode 100644 releasenotes/notes/add-processlimits-to-qemu-img-c215f5d90f741d8a.yaml delete mode 100644 releasenotes/notes/add-vhdx-format-2be99354ad320cca.yaml delete mode 100644 releasenotes/notes/alembic-migrations-902b31edae7a5d7d.yaml delete mode 100644 releasenotes/notes/api-minor-version-bump-bbd69dc457fc731c.yaml delete mode 100644 releasenotes/notes/bug-1537903-54b2822eac6cfc09.yaml delete mode 100644 releasenotes/notes/bug-1593177-8ef35458d29ec93c.yaml delete mode 100644 releasenotes/notes/bump-api-2-4-efa266aef0928e04.yaml delete mode 100644 releasenotes/notes/clean-up-acceptable-values-store_type_preference-39081e4045894731.yaml delete mode 100644 releasenotes/notes/consistent-store-names-57374b9505d530d0.yaml delete mode 100644 releasenotes/notes/deprecate-glance-api-opts-23bdbd1ad7625999.yaml delete mode 100644 releasenotes/notes/deprecate-show-multiple-location-9890a1e961def2f6.yaml delete mode 100644 releasenotes/notes/deprecate-v1-api-6c7dbefb90fd8772.yaml delete mode 100644 releasenotes/notes/exp-emc-mig-fix-a7e28d547ac38f9e.yaml delete mode 100644 releasenotes/notes/glare-ectomy-72a1f80f306f2e3b.yaml delete mode 100644 releasenotes/notes/image-visibility-changes-fa5aa18dc67244c4.yaml delete mode 100644 releasenotes/notes/improved-config-options-221c58a8c37602ba.yaml delete mode 100644 releasenotes/notes/location-add-status-checks-b70db66100bc96b7.yaml delete mode 100644 releasenotes/notes/lock_path_config_option-2771feaa649e4563.yaml delete mode 100644 releasenotes/notes/make-task-api-admin-only-by-default-7def996262e18f7a.yaml delete mode 100644 releasenotes/notes/new_image_filters-c888361e6ecf495c.yaml delete mode 100644 releasenotes/notes/newton-1-release-065334d464f78fc5.yaml delete mode 100644 releasenotes/notes/newton-bugs-06ed3727b973c271.yaml delete mode 100644 releasenotes/notes/oslo-log-use-stderr-changes-07f5daf3e6abdcd6.yaml delete mode 100644 releasenotes/notes/pike-metadefs-changes-95b54e0bf8bbefd6.yaml delete mode 100644 releasenotes/notes/range-header-request-83cf11eebf865fb1.yaml delete mode 100644 releasenotes/notes/remove-db-downgrade-0d1cc45b97605775.yaml delete mode 100644 releasenotes/notes/remove-osprofiler-paste-ini-options-c620dedc8f9728ff.yaml delete mode 100644 releasenotes/notes/remove-s3-driver-639c60b71761eb6f.yaml delete mode 100644 releasenotes/notes/reordered-store-config-opts-newton-3a6575b5908c0e0f.yaml delete mode 100644 releasenotes/notes/restrict_location_updates-05454bb765a8c92c.yaml delete mode 100644 releasenotes/notes/scrubber-exit-e5d77f6f1a38ffb7.yaml delete mode 100644 releasenotes/notes/soft_delete-tasks-43ea983695faa565.yaml delete mode 100644 releasenotes/notes/trust-support-registry-cfd17a6a9ab21d70.yaml delete mode 100644 releasenotes/notes/update-show_multiple_locations-helptext-7fa692642b6b6d52.yaml delete mode 100644 releasenotes/notes/use-cursive-c6b15d94845232da.yaml delete mode 100644 releasenotes/notes/virtuozzo-hypervisor-fada477b64ae829d.yaml delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/_templates/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/liberty.rst delete mode 100644 releasenotes/source/mitaka.rst delete mode 100644 releasenotes/source/newton.rst delete mode 100644 releasenotes/source/ocata.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100644 requirements.txt delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 test-requirements.txt delete mode 100755 tools/test-setup.sh delete mode 100644 tox.ini diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index e99bee85..00000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = glance -omit = glance/tests/* - -[report] -ignore_errors = True diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 681ad1af..00000000 --- a/.gitignore +++ /dev/null @@ -1,42 +0,0 @@ -*.pyc -*.log -.glance-venv -.venv -.testrepository/ -.tox -.coverage* -cover/* -covhtml -nosetests.xml -coverage.xml -glance.sqlite -AUTHORS -ChangeLog -build -dist -*.egg -.eggs/* -glance.egg-info -tests.sqlite -glance/versioninfo -subunit.log - -# Swap files range from .saa to .swp -*.s[a-w][a-p] - -# Files created by doc build -doc/source/api -doc/source/_static/*.sample -doc/source/contributor/api - -# Files created by releasenotes build -releasenotes/build - -# IDE files -.project -.pydevproject -.idea -.e4p -.eric5project/ -.issues/ -.ropeproject diff --git a/.gitreview b/.gitreview deleted file mode 100644 index 31aa568b..00000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/glance.git diff --git a/.mailmap b/.mailmap deleted file mode 100644 index 3b1e17d3..00000000 --- a/.mailmap +++ /dev/null @@ -1,26 +0,0 @@ -# Format is: -# -# - - - - - - - - - - - - - - - - - - - -Zhongyue Luo -Zhenguo Niu - -David Koo diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index b3a5d372..00000000 --- a/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./glance/tests} $LISTOPT $IDOPTION - -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index cf568c76..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,27 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps documented at: - - http://docs.openstack.org/infra/manual/developers.html#getting-started - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - http://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/glance - -Additionally, specific guidelines for contributing to Glance may be found in -Glance's Documentation: - - http://docs.openstack.org/developer/glance/contributing - -Please read and follow these Glance-specific guidelines, particularly the -section on `Disallowed Minor Code Changes -`_. -You will thereby prevent your friendly review team from pulling out whatever -hair they have left. Thank you for your cooperation. diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index d6b25c75..00000000 --- a/HACKING.rst +++ /dev/null @@ -1,24 +0,0 @@ -glance Style Commandments -========================= - -- Step 1: Read the OpenStack Style Commandments - http://docs.openstack.org/developer/hacking/ -- Step 2: Read on - -glance Specific Commandments ----------------------------- - -- [G316] Change assertTrue(isinstance(A, B)) by optimal assert like - assertIsInstance(A, B) -- [G317] Change assertEqual(type(A), B) by optimal assert like - assertIsInstance(A, B) -- [G318] Change assertEqual(A, None) or assertEqual(None, A) by optimal assert like - assertIsNone(A) -- [G319] Validate that debug level logs are not translated -- [G320] For python 3 compatibility, use six.text_type() instead of unicode() -- [G327] Prevent use of deprecated contextlib.nested -- [G328] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs -- [G329] Python 3: Do not use xrange. -- [G330] Python 3: do not use dict.iteritems. -- [G331] Python 3: do not use dict.iterkeys. -- [G332] Python 3: do not use dict.itervalues. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a0..00000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README b/README new file mode 100644 index 00000000..8fcd2b2f --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For ongoing work on maintaining OpenStack packages in the Debian +distribution, please see the Debian OpenStack packaging team at +https://wiki.debian.org/OpenStack/. + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index 2a645d08..00000000 --- a/README.rst +++ /dev/null @@ -1,79 +0,0 @@ -======================== -Team and repository tags -======================== - -.. image:: http://governance.openstack.org/badges/glance.svg - :target: http://governance.openstack.org/reference/tags/index.html - :alt: The following tags have been asserted for the Glance project: - "project:official", - "tc:approved-release", - "stable:follows-policy", - "tc:starter-kit:compute", - "vulnerability:managed", - "team:diverse-affiliation", - "assert:supports-upgrade", - "assert:follows-standard-deprecation". - Follow the link for an explanation of these tags. -.. NOTE(rosmaita): the alt text above will have to be updated when - additional tags are asserted for Glance. (The SVG in the - governance repo is updated automatically.) - -.. Change things from this point on - -====== -Glance -====== - -Glance is a project that provides services and associated libraries -to store, browse, share, distribute and manage bootable disk images, -other data closely associated with initializing compute resources, -and metadata definitions. - -Use the following resources to learn more: - -API ---- - -To learn how to use Glance's API, consult the documentation available -online at: - -* `Image Service APIs `_ - -Developers ----------- - -For information on how to contribute to Glance, please see the contents -of the CONTRIBUTING.rst in this repository. - -Any new code must follow the development guidelines detailed in the -HACKING.rst file, and pass all unit tests. - -Further developer focused documentation is available at: - -* `Official Glance documentation `_ -* `Official Client documentation `_ - -Operators ---------- - -To learn how to deploy and configure OpenStack Glance, consult the -documentation available online at: - -* `Openstack Glance `_ - -In the unfortunate event that bugs are discovered, they should be -reported to the appropriate bug tracker. You can raise bugs here: - -* `Bug Tracker `_ - -Other Information ------------------ - -During each design summit, we agree on what the whole community wants -to focus on for the upcoming release. You can see image service plans: - -* `Image Service Plans `_ - -For more information about the Glance project please see: - -* `Glance Project `_ diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py deleted file mode 100644 index 26917f6b..00000000 --- a/api-ref/source/conf.py +++ /dev/null @@ -1,240 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# glance api-ref build config file, copied from: -# nova documentation build configuration file, created by -# sphinx-quickstart on Sat May 1 15:17:47 2010. -# -# This file is execfile()d with the current directory set to -# its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import subprocess -import sys -import warnings - -import openstackdocstheme - -extensions = [ - 'os_api_ref', -] - - -html_theme = 'openstackdocs' -html_theme_path = [openstackdocstheme.get_html_theme_path()] -html_theme_options = { - "sidebar_mode": "toc", -} - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Image Service API Reference' -copyright = u'2010-present, OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -from glance.version import version_info -# The full version, including alpha/beta/rc tags. -release = version_info.release_string() -# The short X.Y version. -version = version_info.version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# The reST default role (used for this markup: `text`) to use -# for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# Config logABug feature -# source tree -giturl = ( - u'https://git.openstack.org/cgit/openstack/glance/tree/api-ref/source') -# html_context allows us to pass arbitrary values into the html template -html_context = {'bug_tag': 'api-ref', - 'giturl': giturl, - 'bug_project': 'glance'} - -# -- Options for man page output ---------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' -git_cmd = [ - "git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1" -] -try: - html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8') -except Exception: - warnings.warn('Cannot get last updated time from git repository. ' - 'Not setting "html_last_updated_fmt".') - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'glancedoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'Glance.tex', u'OpenStack Image Service API Documentation', - u'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst deleted file mode 100644 index 7a90c135..00000000 --- a/api-ref/source/index.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. - Copyright 2010 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================== -Image Service APIs -================== - -.. toctree:: - :maxdepth: 2 - - versions/index - v1/index - v2/index - v2/metadefs-index diff --git a/api-ref/source/v1/images-images-v1.inc b/api-ref/source/v1/images-images-v1.inc deleted file mode 100644 index 34f45310..00000000 --- a/api-ref/source/v1/images-images-v1.inc +++ /dev/null @@ -1,344 +0,0 @@ -.. -*- rst -*- - -Images -****** - - -Create image -~~~~~~~~~~~~ - -.. rest_method:: POST /v1/images - -Creates a metadata record of a virtual machine (VM) image and optionally -stores the image data. - -Image metadata fields are passed as HTTP headers prefixed with one of -the strings ``x-image-meta-`` or ``x-image-meta-property-``. See the -API documentation for details. - -If there is no request body, an image record will be created in status -``queued``. This is called *reserving an image*. The image data can be -uploaded later using the `Update image`_ call. - -If image data will be uploaded as part of this request, then the following -image metadata must be included among the request headers: - -- ``name`` -- ``disk_format`` -- ``container_format`` - -Additionally, if image data is uploaded as part of this request, the API -will return a 400 under the following circumstances: - -- The ``x-image-meta-size`` header is present and the length in bytes of - the request body does not match the value of this header. -- The ``x-image-meta-checksum`` header is present and MD5 checksum generated - by the backend store while storing the data does not match the value of - this header. - -Normal response codes: 201 - -Error response codes: 400, 409 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - image data: createImage - - x-image-meta-name: x-image-meta-name - - x-image-meta-container_format: x-image-meta-container_format - - x-image-meta-disk_format: x-image-meta-disk_format - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - location: location - - image: image-object - - -Response Example (create with data) ------------------------------------ - - :: - - HTTP/1.1 100 Continue - - HTTP/1.1 201 Created - Content-Type: application/json - Content-Length: 491 - Location: http://glance.openstack.example.org/v1/images/de2f2211-3ac7-4260-9142-41db0ecfb425 - Etag: 7b1b10607acc1319506185e7227ca30d - X-Openstack-Request-Id: req-70adeab4-740c-4db3-a002-fd1559ecf40f - Date: Tue, 10 May 2016 21:41:41 GMT - -.. literalinclude:: samples/images-create-with-data-response.json - :language: json - - -Response Example (reserve an image) ------------------------------------ - -This is an extreme example of reserving an image. It was created by a POST -with no headers specified and no data passed. Here's the response: - - :: - - HTTP/1.1 201 Created - Content-Type: application/json - Content-Length: 447 - Location: http://glance.openstack.example.org/v1/images/6b3ecfca-d445-4946-a8d1-c4938352b251 - X-Openstack-Request-Id: req-db1ff3c7-3d4f-451f-9ef1-c414343f809d - Date: Tue, 10 May 2016 21:35:14 GMT - -.. literalinclude:: samples/images-create-reserve-response.json - :language: json - - - -List images -~~~~~~~~~~~ - -.. rest_method:: GET /v1/images - -Lists all VM images available to the user making the call. This list will -include all public images, any images owned by the requestor, and any images -shared with the requestor. - -Various query filters can be applied to the URL to restrict the content of -the response. - -Normal response codes: 200 - -Error response codes: 400, 403 - -.. note:: need to add info about sorting and pagination - -Request -------- - -.. rest_parameters:: parameters.yaml - - - name: name-in-query - - container_format: container_format-in-query - - disk_format: disk_format-in-query - - status: status-in-query - - size_min: size_min - - size_max: size_max - - changes-since: changes-since - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - images: images-list - - -Response Example ----------------- - -.. literalinclude:: samples/images-list-response.json - :language: json - - -List images with details -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/images/detail - -Lists all available images with details. - -Various query filters can be applied to the URL to restrict the content of -the response. - -Normal response codes: 200 - -Error response codes: 400, 403 - -.. note:: need to add info about sorting and pagination - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - name: name-in-query - - container_format: container_format-in-query - - disk_format: disk_format-in-query - - status: status-in-query - - size_min: size_min - - size_max: size_max - - changes-since: changes-since - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - images: images-detail-list - - previous: previous - - next: next - - -Response Example ----------------- - -.. literalinclude:: samples/images-list-details-response.json - :language: json - - -Update image -~~~~~~~~~~~~ - -.. rest_method:: PUT /v1/images/{image_id} - -Updates the metadata for an image or uploads an image file. - -Image metadata is updated by passing HTTP headers prefixed with one of the -strings ``x-image-meta-`` or ``x-image-meta-property-``. See the API -documentation for details. - -If the image is in ``queued`` status, image data may be added by -including it in the request body. Otherwise, attempting to add data -will result in a 409 Conflict response. - -If the request contains a body, the API will return a 400 under the following -circumstances: - -- The ``x-image-meta-size`` header is present and the length in bytes of - the request body does not match the value of this header. -- The ``x-image-meta-checksum`` header is present and MD5 checksum generated - by the backend store while storing the data does not match the value of - this header. - -Normal response codes: 200 - -Error response codes: 400, 404, 409 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - image_id: image_id-in-path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - image: image-object - - - -Response Example ----------------- - -.. literalinclude:: samples/image-update-response.json - :language: json - - -Show image details and image data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/images/{image_id} - -Returns the image metadata as headers; the image data is returned in the -body of the response. - -Standard image properties are returned in headers prefixed by -``x-image-meta-`` (for example, ``x-image-meta-name``). Custom image -properties are returned in headers prefixed by the string -``x-image-meta-property-`` (for example, ``x-image-meta-property-foo``). - -Normal response codes: 200 - -Error response codes: 404, 403 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - image_id: image_id-in-path - - - -Show image metadata -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: HEAD /v1/images/{image_id} - -Returns the image metadata information as response headers. - -The Image system does not return a response body for the HEAD -operation. - -If the request succeeds, the operation returns the ``200`` response -code. - -Normal response codes: 200 - -Error response codes: 404, 409 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - image_id: image_id-in-path - - -Response Example ----------------- - - :: - - X-Image-Meta-Checksum: 8a40c862b5735975d82605c1dd395796 - X-Image-Meta-Container_format: aki - X-Image-Meta-Created_at: 2016-01-06T03:22:20.000000 - X-Image-Meta-Deleted: false - X-Image-Meta-Disk_format: aki - X-Image-Meta-Id: 03bc0a8b-659c-4de9-b6bd-13c6e86e6455 - X-Image-Meta-Is_public: true - X-Image-Meta-Min_disk: 0 - X-Image-Meta-Min_ram: 0 - X-Image-Meta-Name: cirros-0.3.4-x86_64-uec-kernel - X-Image-Meta-Owner: 13cc6052265b41529e2fd0fc461fa8ef - X-Image-Meta-Protected: false - X-Image-Meta-Size: 4979632 - X-Image-Meta-Status: deactivated - X-Image-Meta-Updated_at: 2016-02-25T03:02:05.000000 - X-Openstack-Request-Id: req-d5208320-28ed-4c22-b628-12dc6456d983 - - -Delete image -~~~~~~~~~~~~ - -.. rest_method:: DELETE /v1/images/{image_id} - -Deletes an image. - -Normal response codes: 204 - -Error response codes: 404, 403 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - image_id: image_id-in-path diff --git a/api-ref/source/v1/images-sharing-v1.inc b/api-ref/source/v1/images-sharing-v1.inc deleted file mode 100644 index e7a1bbeb..00000000 --- a/api-ref/source/v1/images-sharing-v1.inc +++ /dev/null @@ -1,150 +0,0 @@ -.. -*- rst -*- - -Sharing -******* - -Image sharing provides a means for one tenant (the "producer") to make a -private image available to other tenants (the "consumers"). This ability -can unfortunately be misused to spam tenants' image lists, so these calls -may not be exposed in some deployments. (The Images v2 API has a more -sophisticated sharing scheme that contains an anti-spam provision.) - -Add member to image -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v1/images/{image_id}/members/{member_id} - -Adds the tenant whose tenant ID is ``member_id`` as a member of the -image denoted by ``image_id``. - -By default, an image member cannot further share the image with other -tenants. This behavior can be overridden by supplying a request body -with the call that specifies ``can_share`` as ``true``. - -Thus: - -- If you omit the request body, this call adds the specified tenant as a - member of the image with the ``can_share`` attribute set to ``false``. -- If you include a request body, the ``can_share`` attribute will be set - to the appropriate boolean value you have supplied in the request body. -- If the specified tenant is already a member, and there is no request - body, the membership (including the ``can_share`` attribute) remains - unmodified. -- If the specified tenant is already a member and the request includes - a body, the ``can_share`` attribute of the tenant will be set to whatever - value is specified in the request body. - -Normal response codes: 204 - -Error response codes: 404 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - image_id: image_id-in-path - - member_id: member_id-in-path - - can_share: can_share - - member_id: member_id - - -Request Example ---------------- - -.. literalinclude:: samples/image-member-add-request.json - :language: json - - -Replace membership list for an image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v1/images/{image_id}/members - -Replaces the membership list for an image so that the tenants whose -tenant IDs are listed in the member objects comprising the request body -become all and only the members of the image denoted by ``image_id``. - -If the ``can_share`` attribute is omitted for in any member object: - -- If the member already exists on the image, that member's ``can_share`` - setting remains unchanged. -- If the member did not already exist on the image, that member's - ``can_share`` attribute is set to ``false``. - -Normal response codes: 204 - -Error response codes: 404 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - image_id: image_id-in-path - - memberships: memberships - -Request Example ---------------- - -.. literalinclude:: samples/image-members-add-request.json - :language: json - - -Remove member -~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v1/images/{image_id}/members/{member_id} - -Removes a member from an image. - -Normal response codes: 204 - -Error response codes: 404 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - image_id: image_id-in-path - - member_id: member_id-in-path - - -List shared images -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/shared-images/{owner_id} - -Lists the VM images that an owner shares. The ``owner_id`` is the tenant ID -of the image owner. - -Normal response codes: 200 - -Error response codes: 404 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - owner_id: owner_id-in-path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - shared_images: shared_images - - -Response Example ----------------- - -.. literalinclude:: samples/shared-images-list-response.json - :language: json diff --git a/api-ref/source/v1/index.rst b/api-ref/source/v1/index.rst deleted file mode 100644 index b9d82e53..00000000 --- a/api-ref/source/v1/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. - Copyright 2010 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -:tocdepth: 3 - -================================= -Image Service API v1 (DEPRECATED) -================================= - -.. rest_expand_all:: - -.. include:: images-images-v1.inc -.. include:: images-sharing-v1.inc diff --git a/api-ref/source/v1/parameters.yaml b/api-ref/source/v1/parameters.yaml deleted file mode 100644 index 8759e920..00000000 --- a/api-ref/source/v1/parameters.yaml +++ /dev/null @@ -1,249 +0,0 @@ -# variables in header -location: - description: | - A URI location for the image record. - format: uri - in: header - required: true - type: string -x-image-meta-container_format: - description: | - The image ``container_format`` property. (Optional when only reserving - an image.) - - A container format defines the file format of the - file that contains the image and metadata about the actual VM. - For a VM image with a ``bare`` container format, the image is a - blob of unstructured data. You can set the container format to - one of these values: - - - ``aki`` - Amazon kernel image. - - ``ami`` - Amazon machine image. - - ``ari`` - Amazon ramdisk image. - - ``bare`` - No container or metadata envelope for the image. - - ``docker`` - Docker tar archive of the container filesystem. - - ``ova`` - OVA container format. - - ``ovf`` - OVF container format. - in: header - required: true - type: enum -x-image-meta-disk_format: - description: | - The image ``disk_format`` property. (Optional when only reserving - an image.) - - The disk format of a VM image is the format of - the underlying disk image. Virtual appliance vendors have - different formats for laying out the information contained in a VM - disk image. You can set the disk format for your image to one of - these values: - - - ``aki`` - An Amazon kernel image. - - ``ami`` - An Amazon machine image. - - ``ari`` - An Amazon ramdisk image. - - ``iso`` - An archive format for the data contents of an optical - disc, such as CDROM. - - ``qcow2``- Supported by the QEMU emulator that can expand dynamically - and supports Copy on Write. - - ``raw`` - Unstructured disk image format. - - ``vhd`` - VHD disk format, a common disk format used by hypervisors - from VMWare, Xen, Microsoft, VirtualBox, and others. - - ``vdi`` - Supported by VirtualBox VM monitor and the QEMU emulator. - - ``vmdk`` - A common disk format that supported by many hypervisors. - in: header - required: true - type: enum -x-image-meta-name: - description: | - The image ``name`` property. (Optional when only reserving - an image.) - - An image name is not required to be unique, though of course it will be - easier to tell your images apart if you give them distinct descriptive - names. Names are limited to 255 chars. - in: header - required: true - type: string -x-openstack-request-id: - description: | - Request identifier passed through by the various OpenStack services. - in: header - required: false - type: string - -# variables in path -image_id-in-path: - description: | - Image ID stored through the image API. Typically a UUID. - in: path - required: true - type: string -member_id-in-path: - description: | - The tenant ID of the tenant with whom an image is shared, that is, the - tenant ID of the image member. - in: path - required: true - type: string -owner_id-in-path: - description: | - Owner ID, which is the tenant ID. - in: path - required: true - type: string - -# variables in query -changes-since: - description: | - Filters the image list to those images that have changed since a time - stamp value. - in: query - required: false - type: string -container_format-in-query: - description: | - Filters the image list by a container format. A - valid value is ``aki``, ``ami``, ``ari``, ``bare``, ``docker``, - ``ova``, or ``ovf``. - in: query - required: false - type: string -disk_format-in-query: - description: | - Filters the image list by a disk format. A valid - value is ``aki``, ``ami``, ``ari``, ``iso``, ``qcow2``, ``raw``, - ``vhd``, ``vdi``, or ``vmdk``. - in: query - required: false - type: string -name-in-query: - description: | - Filters the image list by an image name, in string format. - in: query - required: false - type: string -size_max: - description: | - Filters the image list by a maximum image size, in bytes. - in: query - required: false - type: int -size_min: - description: | - Filters the image list by a minimum image size, in bytes. - in: query - required: false - type: int -status-in-query: - description: | - Filters the image list by a status. A valid value is ``queued``, - ``saving``, ``active``, ``killed``, ``deleted``, or ``pending_delete``. - in: query - required: false - type: string - -# variables in body -can_share: - description: | - Indicates whether the image member whose tenant ID is ``member_id`` - is authorized to share the image. If the member can share the image, - this value is ``true``. Otherwise, this value is ``false``. - in: body - required: false - type: boolean -createImage: - description: | - The virtual machine image data. Do not include this if you are only - reserving an image. - in: body - required: true - type: binary -image-object: - description: | - A JSON representation of the image. Includes all metadata fields. - in: body - required: true - type: object -images-detail-list: - description: | - A list of image objects. - - Each object contains the following fields: - - - ``checksum`` - The MD5 checksum of the image data. - - ``container_format`` - The container format. - - ``created_at`` - Timestamp of image record creation. - - ``deleted`` - ``true`` if the image is deleted, ``false`` - otherwise. - - ``deleted_at`` - Timestamp when the image went to ``deleted`` - status. - - ``disk_format`` - The disk format. - - ``id`` - The image ID, typically a UUID. - - ``is_public`` - This is ``true`` if the image is public, - ``false`` otherwise. - - ``name`` - The name of the image. - - ``owner`` - The image owner, usually the tenant_id. - - ``properties`` - A dict of user-specified key:value pairs (that - is, custom image metadata). - - ``protected`` - A boolean value that must be ``false`` or the - image cannot be deleted. Default value is ``false``. - - ``size`` - The size of the stored image data in bytes. - - ``status`` - The image status. - - ``updated_at`` - Timestamp of when the image record was most - recently modified. - - ``virtual_size`` - The size of the virtual machine image (the - virtual disk itself, not the containing package, if any) in bytes. - in: body - required: true - type: array -images-list: - description: | - A list of image objects in a sparse representation. - - Each object contains the following fields: - - - ``checksum`` - The MD5 checksum of the image data. - - ``container_format`` - The container format. - - ``disk_format`` - The disk format. - - ``id`` - The image ID, typically a UUID. - - ``name`` - The name of the image. - - ``size`` - The size of the image in bytes. - in: body - required: true - type: array -member_id: - description: | - The tenant ID of the tenant with whom an image is shared, that is, the - tenant ID of the image member. - in: body - required: true - type: string -memberships: - description: | - List of image member objects. - in: body - required: true - type: array -next: - description: | - Show the next item in the list. - format: uri - in: body - required: false - type: string -previous: - description: | - Show the previous item in the list. - format: uri - in: body - required: false - type: string -shared_images: - description: | - A list of objects, each of which contains an ``image_id`` and a - ``can_share`` field. If all the members of the image are such that - ``can_share`` is ``true`` for each member, then the ``can_share`` - value in this object will be ``true``, otherwise it will be ``false``. - in: body - required: true - type: array diff --git a/api-ref/source/v1/samples/image-member-add-request.json b/api-ref/source/v1/samples/image-member-add-request.json deleted file mode 100644 index fb1fecbe..00000000 --- a/api-ref/source/v1/samples/image-member-add-request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "member_id": "eb5d80bd5f1e49f1818988148c70eabf", - "can_share": false -} diff --git a/api-ref/source/v1/samples/image-members-add-request.json b/api-ref/source/v1/samples/image-members-add-request.json deleted file mode 100644 index 26da6021..00000000 --- a/api-ref/source/v1/samples/image-members-add-request.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "memberships": [ - { - "member_id": "eb5d80bd5f1e49f1818988148c70eabf", - "can_share": false - }, - { - "member_id": "8f450f44647d4080a0e7ca505057b5ca", - "can_share": false - } - ] -} diff --git a/api-ref/source/v1/samples/image-memberships-list-response.json b/api-ref/source/v1/samples/image-memberships-list-response.json deleted file mode 100644 index 92bb003a..00000000 --- a/api-ref/source/v1/samples/image-memberships-list-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "memberships": [ - { - "member_id": "tenant1", - "can_share": false - }, - { - "...": "..." - } - ] -} diff --git a/api-ref/source/v1/samples/image-update-response.json b/api-ref/source/v1/samples/image-update-response.json deleted file mode 100644 index f727fe15..00000000 --- a/api-ref/source/v1/samples/image-update-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "image": { - "checksum": "eb9139e4942121f22bbc2afc0400b2a4", - "container_format": "bare", - "created_at": "2016-03-15T15:09:07.000000", - "deleted": false, - "deleted_at": null, - "disk_format": "vmdk", - "id": "1086fa65-8c63-4081-9a0a-ddf7e88e485b", - "is_public": false, - "min_disk": 22, - "min_ram": 11, - "name": "Silas Marner", - "owner": "c60b1d57c5034e0d86902aedf8c49be0", - "properties": { - "foo": "bar", - "qe_status": "approved" - }, - "protected": false, - "size": 25165824, - "status": "active", - "updated_at": "2016-05-10T21:14:04.000000", - "virtual_size": null - } -} diff --git a/api-ref/source/v1/samples/images-create-reserve-response.json b/api-ref/source/v1/samples/images-create-reserve-response.json deleted file mode 100644 index b6f0eeb0..00000000 --- a/api-ref/source/v1/samples/images-create-reserve-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "image": { - "checksum": null, - "container_format": null, - "created_at": "2016-05-10T21:35:15.000000", - "deleted": false, - "deleted_at": null, - "disk_format": null, - "id": "6b3ecfca-d445-4946-a8d1-c4938352b251", - "is_public": false, - "min_disk": 0, - "min_ram": 0, - "name": null, - "owner": "c60b1d57c5034e0d86902aedf8c49be0", - "properties": {}, - "protected": false, - "size": 0, - "status": "queued", - "updated_at": "2016-05-10T21:35:15.000000", - "virtual_size": null - } -} diff --git a/api-ref/source/v1/samples/images-create-with-data-response.json b/api-ref/source/v1/samples/images-create-with-data-response.json deleted file mode 100644 index bd47a73e..00000000 --- a/api-ref/source/v1/samples/images-create-with-data-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "image": { - "checksum": "7b1b10607acc1319506185e7227ca30d", - "container_format": "bare", - "created_at": "2016-05-10T21:41:41.000000", - "deleted": false, - "deleted_at": null, - "disk_format": "raw", - "id": "de2f2211-3ac7-4260-9142-41db0ecfb425", - "is_public": false, - "min_disk": 0, - "min_ram": 0, - "name": "Fake Image", - "owner": "c60b1d57c5034e0d86902aedf8c49be0", - "properties": {}, - "protected": false, - "size": 3908, - "status": "active", - "updated_at": "2016-05-10T21:41:41.000000", - "virtual_size": null - } -} diff --git a/api-ref/source/v1/samples/images-list-details-response.json b/api-ref/source/v1/samples/images-list-details-response.json deleted file mode 100644 index f01bd488..00000000 --- a/api-ref/source/v1/samples/images-list-details-response.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "images": [ - { - "checksum": "eb9139e4942121f22bbc2afc0400b2a4", - "container_format": "bare", - "created_at": "2016-03-15T15:09:07.000000", - "deleted": false, - "deleted_at": null, - "disk_format": "vmdk", - "id": "1086fa65-8c63-4081-9a0a-ddf7e88e485b", - "is_public": false, - "min_disk": 22, - "min_ram": 11, - "name": "Silas Marner", - "owner": "c60b1d57c5034e0d86902aedf8c49be0", - "properties": { - "foo": "bar", - "qe_status": "approved" - }, - "protected": false, - "size": 25165824, - "status": "active", - "updated_at": "2016-05-10T21:14:04.000000", - "virtual_size": null - }, - { - "...": "..." - } - ] -} diff --git a/api-ref/source/v1/samples/images-list-response.json b/api-ref/source/v1/samples/images-list-response.json deleted file mode 100644 index d57d4741..00000000 --- a/api-ref/source/v1/samples/images-list-response.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "images": [ - { - "checksum": "eb9139e4942121f22bbc2afc0400b2a4", - "container_format": "ovf", - "disk_format": "vmdk", - "id": "008cc101-c3ee-40dd-8477-cd8d99dcbf3d", - "name": "Harry", - "size": 25165824 - }, - { - "...": "..." - } - ] -} diff --git a/api-ref/source/v1/samples/shared-images-list-response.json b/api-ref/source/v1/samples/shared-images-list-response.json deleted file mode 100644 index b49385e2..00000000 --- a/api-ref/source/v1/samples/shared-images-list-response.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "shared_images": [ - { - "can_share": false, - "image_id": "008cc101-c3ee-40dd-8477-cd8d99dcbf3d" - }, - { - "can_share": true, - "image_id": "de2f2211-3ac7-4260-9142-41db0ecfb425" - }, - { - "...": "..." - } - ] -} diff --git a/api-ref/source/v2/images-data.inc b/api-ref/source/v2/images-data.inc deleted file mode 100644 index 257c0d0a..00000000 --- a/api-ref/source/v2/images-data.inc +++ /dev/null @@ -1,142 +0,0 @@ -.. -*- rst -*- - -.. _image-data: - -Image data -********** - -Uploads and downloads raw image data. - -*These operations may be restricted to administrators. Consult your cloud -operator's documentation for details.* - - -Upload binary image data -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/images/{image_id}/file - -Uploads binary image data. -*(Since Image API v2.0)* - -Set the ``Content-Type`` request header to ``application/octet-stream``. - -Example call: - -:: - - curl -i -X PUT -H "X-Auth-Token: $token" \ - -H "Content-Type: application/octet-stream" \ - -d @/home/glance/ubuntu-12.10.qcow2 \ - $image_url/v2/images/{image_id}/file - -**Preconditions** - -Before you can store binary image data, you must meet the following -preconditions: - -- The image must exist. - -- You must set the disk and container formats in the image. - -- The image status must be ``queued``. - -- Your image storage quota must be sufficient. - -- The size of the data that you want to store must not exceed the - size that the OpenStack Image service allows. - -**Synchronous Postconditions** - -- With correct permissions, you can see the image status as - ``active`` through API calls. - -- With correct access, you can see the stored data in the storage - system that the OpenStack Image Service manages. - -**Troubleshooting** - -- If you cannot store the data, either your request lacks required - information or you exceeded your allotted quota. Ensure that you - meet the preconditions and run the request again. If the request - fails again, review your API request. - -- The storage back ends for storing the data must have enough free - storage space to accommodate the size of the data. - -Normal response codes: 204 - -Error response codes: 400, 401, 403, 404, 409, 410, 413, 415, 503 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - Content-type: Content-Type-data - - image_id: image_id-in-path - - -Download binary image data -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/images/{image_id}/file - -Downloads binary image data. -*(Since Image API v2.0)* - -Example call: ``curl -i -X GET -H "X-Auth-Token: $token" -$image_url/v2/images/{image_id}/file`` - -The response body contains the raw binary data that represents the -actual virtual disk. The ``Content-Type`` header contains the -``application/octet-stream`` value. The ``Content-MD5`` header -contains an MD5 checksum of the image data. Use this checksum to -verify the integrity of the image data. - -**Preconditions** - -- The image must exist. - -**Synchronous Postconditions** - -- You can download the binary image data in your machine if the - image has image data. - -- If image data exists, the call returns the HTTP ``200`` response code for a - full image download request. - -- If image data exists, the call returns the HTTP ``206`` response code for a - partial download request. - -- If no image data exists, the call returns the HTTP ``204`` (No Content) - response code. - -- If no image record exists, the call returns the HTTP ``404`` response code - for an attempted full image download request. - -- For an unsatisfiable partial download request, the call returns the HTTP - ``416`` response code. - -Normal response codes: 200, 204, 206 - -Error response codes: 400, 403, 404, 416 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - image_id: image_id-in-path - - Range: Range - -Response --------- -.. rest_parameters:: images-parameters.yaml - - - Content-Type: Content-Type-data-response - - Content-Md5: Content-Md5 - - Content-Length: Content-Length - - Content-Range: Content-Range diff --git a/api-ref/source/v2/images-images-v2.inc b/api-ref/source/v2/images-images-v2.inc deleted file mode 100644 index b14e35c4..00000000 --- a/api-ref/source/v2/images-images-v2.inc +++ /dev/null @@ -1,697 +0,0 @@ -.. -*- rst -*- - -Images -****** - -Creates, lists, shows, updates, deletes, and performs other operations on -images. - -General information -~~~~~~~~~~~~~~~~~~~ - -**Images** - -An *image* is represented by a JSON Object, that is, as a set of key:value -pairs. Some of these keys are *base properties* that are managed by the -Image service. The remainder are properties put on the image by the operator -or the image owner. - -.. note:: - Another common term for "image properties" is "image metadata" because - what we're talking about here are properties that *describe* the image - data that can be consumed by various OpenStack services (for example, - by the Compute service to boot a server, or by the Volume service to - create a bootable volume). - -Here's some important information about image properties: - -* The base properties are always included in the image representation. A - base property that doesn't have a value is displayed with its value set - to ``null`` (that is, the JSON null data type). - -* Additional properties, whose value is always a string data type, are - only included in the response if they have a value. - -* Since version 2.2, the Images API allows an operator to configure - *property protections*, by which the create, read, update, and delete - operations on specific image properties may be restricted to particular - user roles. Consult the documentation of your cloud operator for details. - -* Arguably the most important properties of an image are its *id*, which - uniquely identifies the image, its *status*, which indicates the current - situation of the image (which, in turn, indicates what you can do with the - image), and its *visibility*, which indicates who has access to the image. - -.. note:: - In addition to image properties, there's usually a data payload that is - accessible via the image. In order to give image consumers some guarantees - about the data payload (for example, that the data associated with image - ``06b73bc7-9d62-4d37-ad95-d4708f37734f`` is the same today as it was when - you used it to boot a server yesterday) the Image service controls - particular image properties (for example, ``checksum``) that cannot be - modified. A shorthand way to refer to the way the image data payload is - related to its representation as an *image* in the Images API is to say that - "images are immutable". (This obviously applies to the image data payload, - not its representation in the Image service.) See the :ref:`Image Data - ` section of this document for more information. - -**Image status** - -The possible status values for images are presented in the following table. - -.. list-table:: - :header-rows: 1 - - * - Status - - Description - * - queued - - The Image service reserved an image ID for the image in the catalog - but did not yet upload any image data. - * - saving - - The Image service is in the process of saving the raw data for - the image into the backing store. - * - active - - The image is active and ready for consumption in the Image service. - * - killed - - An image data upload error occurred. - * - deleted - - The Image service retains information about the image but the image is - no longer available for use. - * - pending_delete - - Similar to the ``deleted`` status. An image in this state is not - recoverable. - * - deactivated - - The image data is not available for use. - -**Image visibility** - -The possible values for image visibility are presented in the following table. - -.. list-table:: - :header-rows: 1 - - * - Visibility - - Description - * - ``public`` - - Any user may read the image and its data payload. Additionally, the - image appears in the default image list of all users. - * - ``community`` - - Any user may read the image and its data payload, but the image does - *not* appear in the default image list of any user other than the - owner. - - *(This visibility value was added in the Image API v2.5)* - * - ``shared`` - - An image must have this visibility in order for *image members* to be - added to it. Only the owner and the specific image members who have - been added to the image may read the image or its data payload. - - The image appears in the default image list of the owner. It also - appears in the default image list of members who have *accepted* the - image. See the :ref:`Image Sharing ` section of this - document for more information. - - If you do not specify a visibility value when you create an image, - it is assigned this visibility by default. Non-owners, however, will - not have access to the image until they are added as image members. - - *(This visibility value was added in the Image API v2.5)* - * - ``private`` - - Only the owner image may read the image or its data payload. - Additionally, the image appears in the owner's default image list. - - *Since Image API v2.5, an image with private visibility cannot have - members added to it.* - -Note that the descriptions above discuss *read* access to images. Only the -image owner (or an administrator) has write access to image properties and the -image data payload. Further, in order to promise image immutability, the Image -service will allow even the owner (or an administrator) only write-once -permissions to specific image properties and the image data payload. - - -Create an image -~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/images - -Creates a catalog record for an operating system disk image. -*(Since Image API v2.0)* - -The ``Location`` response header contains the URI for the image. -The response body contains the new image entity. - -Synchronous Postconditions - -- With correct permissions, you can see the image status as - ``queued`` through API calls. - -Normal response codes: 201 - -Error response codes: 400, 401, 403, 409, 413, 415 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - container_format: container_format-in-request - - disk_format: disk_format-in-request - - id: id-in-request - - min_disk: min_disk-in-request - - min_ram: min_ram-in-request - - name: name-in-request - - protected: protected-in-request - - tags: tags-in-request - - visibility: visibility-in-request - -Additionally, you may include additional properties specified as key:value -pairs, where the value must be a string data type. Keys and values are limited -to 255 chars in length. Available key names may be limited by the cloud's -property protection configuration. - -Request Example ---------------- - -.. literalinclude:: samples/image-create-request.json - :language: json - -Response Parameters -------------------- - -.. rest_parameters:: images-parameters.yaml - - - Location: Location - - checksum: checksum - - container_format: container_format - - created_at: created_at - - disk_format: disk_format - - file: file - - id: id - - min_disk: min_disk - - min_ram: min_ram - - name: name - - owner: owner - - protected: protected - - schema: schema-image - - self: self - - size: size - - status: status - - tags: tags - - updated_at: updated_at - - virtual_size: virtual_size - - visibility: visibility - - direct_url: direct_url - - locations: locations - -The response may also include additional properties specified as key:value -pairs if additional properties were specified in the request. - -Response Example ----------------- - -.. literalinclude:: samples/image-create-response.json - :language: json - - -Show image details -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/images/{image_id} - -Shows details for an image. -*(Since Image API v2.0)* - -The response body contains a single image entity. - -Preconditions - -- The image must exist. - -Normal response codes: 200 - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - image_id: image_id-in-path - - -Response Parameters -------------------- - -.. rest_parameters:: images-parameters.yaml - - - checksum: checksum - - container_format: container_format - - created_at: created_at - - disk_format: disk_format - - file: file - - id: id - - min_disk: min_disk - - min_ram: min_ram - - name: name - - owner: owner - - protected: protected - - schema: schema-image - - self: self - - size: size - - status: status - - tags: tags - - updated_at: updated_at - - virtual_size: virtual_size - - visibility: visibility - - direct_url: direct_url - - locations: locations - -The response may also include additional properties specified as key:value -pairs if such properties have been added to the image by the owner or an -administrator. - -Response Example ----------------- - -.. literalinclude:: samples/image-show-response.json - :language: json - - -Show images -~~~~~~~~~~~ - -.. rest_method:: GET /v2/images - -Lists public virtual machine (VM) images. -*(Since Image API v2.0)* - -**Pagination** - -Returns a subset of the larger collection of images and a link that you can use -to get the next set of images. You should always check for the presence of a -``next`` link and use it as the URI in a subsequent HTTP GET request. You -should follow this pattern until a ``next`` link is no longer provided. - -The ``next`` link preserves any query parameters that you send in your initial -request. You can use the ``first`` link to jump back to the first page of the -collection. If you prefer to paginate through images manually, use the -``limit`` and ``marker`` parameters. - -**Query Filters** - -The list operation accepts query parameters to filter the response. - -A client can provide direct comparison filters by using most image attributes, -such as ``name=Ubuntu``, ``visibility=public``, and so on. - -To filter using image tags, use the filter ``tag`` (note the singular). To -filter on multiple tags, include each tag separately in the query. For -example, to find images with the tag **ready**, include ``tag=ready`` in your -query string. To find images tagged with **ready** and **approved**, include -``tag=ready&tag=approved`` in your query string. (Note that only images -containing *both* tags will be included in the response.) - -A client cannot use any ``link`` in the json-schema, such as self, file, or -schema, to filter the response. - -You can list VM images that have a status of ``active``, ``queued``, or -``saving``. - -**The** ``in`` **Operator** - -As a convenience, you may specify several values for any of the following -fields by using the ``in`` operator: - -* container_format -* disk_format -* id -* name -* status - -For most of these, usage is straight forward. For example, to list images -in queued or saving status, use: - -``GET /v2/images?status=in:saving,queued`` - -To find images in a particular list of image IDs, use: - -``GET /v2/images?id=in:3afb79c1-131a-4c38-a87c-bc4b801d14e6,2e011209-660f-44b5-baf2-2eb4babae53d`` - -Using the ``in`` operator with the ``name`` property of images can be a bit -trickier, depending upon how creatively you have named your images. The -general rule is that if an image name contains a comma (``,``), you must -enclose the entire name in quotation marks (``"``). As usual, you must URL -encode any characters that require it. - -For example, to find images named ``glass, darkly`` or ``share me``, you would -use the following filter specification: - -``GET v2/images?name=in:"glass,%20darkly",share%20me`` - -As with regular filtering by name, you must specify the complete name you are -looking for. Thus, for example, the query string ``name=in:glass,share`` will -only match images with the exact name ``glass`` or the exact name ``share``. -It will not find an image named ``glass, darkly`` or an image named ``share -me``. - -**Size Comparison Filters** - -You can use the ``size_min`` and ``size_max`` query parameters to filter images -that are greater than or less than the image size. The size, in bytes, is the -size of an image on disk. - -For example, to filter the container to include only images that are from 1 to -4 MB, set the ``size_min`` query parameter to ``1048576`` and the ``size_max`` -query parameter to ``4194304``. - -.. _v2-comparison-ops: - -**Time Comparison Filters** - -You can use a *comparison operator* along with the ``created_at`` or -``updated_at`` fields to filter your results. Specify the operator first, a -colon (``:``) as a separator, and then the time in `ISO 8601 Format -`_. Available comparison operators -are: - -.. list-table:: - :header-rows: 1 - - * - Operator - - Description - * - ``gt`` - - Return results more recent than the specified time. - * - ``gte`` - - Return any results matching the specified time and also any more recent - results. - * - ``eq`` - - Return any results matching the specified time exactly. - * - ``neq`` - - Return any results that do not match the specified time. - * - ``lt`` - - Return results older than the specified time. - * - ``lte`` - - Return any results matching the specified time and also any older - results. - -For example: - -.. code-block:: console - - GET v2/images?created_at=gt:2016-04-18T21:38:54Z - -**Sorting** - -You can use query parameters to sort the results of this operation. - -- ``sort_key``. Sorts by an image attribute. Sorts in the natural - sorting direction of the image attribute. - -- ``sort_dir``. Sorts in a sort direction. - -- ``sort``. Sorts by one or more sets of attribute and sort - direction combinations. If you omit the sort direction in a set, - the default is ``desc``. - -To sort the response, use the ``sort_key`` and ``sort_dir`` query -parameters: - -.. code-block:: console - - GET /v2/images?sort_key=name&sort_dir=asc&sort_key=status&sort_dir=desc - -Alternatively, specify the ``sort`` query parameter: - -.. code-block:: console - - GET /v2/images?sort=name:asc,status:desc - -.. note:: - Although this call has been available since version 2.0 of this API, - it has been enhanced from release to release. The filtering and - sorting functionality and syntax described above apply to the most - recent release (Newton). Not everything described above will be - available in prior releases. - -Normal response codes: 200 - -Error response codes: 400, 401, 403 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - limit: limit - - marker: marker - - name: name-in-query - - owner: owner-in-query - - status: status-in-query - - tag: tag-in-query - - visibility: visibility-in-query - - member_status: member_status-in-query - - size_max: size_max - - size_min: size_min - - created_at: created_at-in-query - - updated_at: updated_at-in-query - - sort_dir: sort_dir - - sort_key: sort_key - - sort: sort - - -Response Parameters -------------------- - -.. rest_parameters:: images-parameters.yaml - - - images: images - - first: first - - next: next - - schema: schema-images - - - -Response Example ----------------- - -.. literalinclude:: samples/images-list-response.json - :language: json - - -.. _v2-image-update: - -Update an image -~~~~~~~~~~~~~~~ - -.. rest_method:: PATCH /v2/images/{image_id} - -Updates an image. -*(Since Image API v2.0)* - -Conceptually, you update an image record by patching the JSON representation of -the image, passing a request body conforming to one of the following media -types: - -- ``application/openstack-images-v2.0-json-patch`` *(deprecated)* -- ``application/openstack-images-v2.1-json-patch`` *(since Image API v2.1)* - -Attempting to make a PATCH call using some other media type will provoke a -response code of 415 (Unsupported media type). - -The ``application/openstack-images-v2.1-json-patch`` media type provides a -useful and compatible subset of the functionality defined in JavaScript Object -Notation (JSON) Patch `RFC6902 `_, which -defines the ``application/json-patch+json`` media type. - -.. note:: - The ``application/openstack-images-v2.0-json-patch`` media type is based on - draft 4 of the standard. Its use is deprecated. - -For information about the PATCH method and the available media types, see -`Image API v2 HTTP PATCH media types -`_. - -Attempting to modify some image properties will cause the entire request to -fail with a 403 (Forbidden) response code: - -- An attempt to modify any of the "base" image properties that are managed by - the Image Service. These are the properties specified as read only in the - :ref:`Image Schema `. - -- An attempt to create or modify image properties for which you do not have - permission to do so *(since Image API v2.2)*. This depends upon how property - protections are configured in the OpenStack cloud in which you are making the - call. Consult your cloud's documentation for details. - -Attempting to add a location path to an image that is not in ``queued`` or -``active`` state will result in a 409 (Conflict) response code -*(since Image API v2.4)*. - -Normal response codes: 200 - -Error response codes: 400, 401, 403, 404, 409, 413, 415 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - Content-Type: Content-Type-patch - - image_id: image_id-in-path - -The request body must conform to the -``application/openstack-images-v2.1-json-patch`` media type definition (see -above). - - -Request Example ---------------- - -.. literalinclude:: samples/image-update-request.json - :language: json - - -Response Parameters -------------------- - -.. rest_parameters:: images-parameters.yaml - - - checksum: checksum - - container_format: container_format - - created_at: created_at - - disk_format: disk_format - - file: file - - id: id - - min_disk: min_disk - - min_ram: min_ram - - owner: owner - - name: name - - protected: protected - - schema: schema-image - - self: self - - size: size - - status: status - - tags: tags - - updated_at: updated_at - - visibility: visibility - - direct_url: direct_url - - locations: locations - - - -Response Example ----------------- - -.. literalinclude:: samples/image-update-response.json - :language: json - - -Delete an image -~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/images/{image_id} - -(Since Image API v2.0) Deletes an image. - -You cannot delete images with the ``protected`` attribute set to -``true`` (boolean). - -Preconditions - -- You can delete an image in any status except ``deleted``. - -- The ``protected`` attribute of the image cannot be ``true``. - -- You have permission to perform image deletion under the configured image - deletion policy. - -Synchronous Postconditions - -- The response is empty and returns the HTTP ``204`` response code. - -- The API deletes the image from the images index. - -- If the image has associated binary image data in the storage backend, the - OpenStack Image service deletes the data. - -Normal response codes: 204 - -Error response codes: 400, 401, 403, 404, 409 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - image_id: image_id-in-path - - -Deactivate image -~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/images/{image_id}/actions/deactivate - -Deactivates an image. -*(Since Image API v2.3)* - -By default, this operation is restricted to administrators only. - -If you try to download a deactivated image, you will receive a 403 (Forbidden) -response code. Additionally, only administrative users can view image -locations for deactivated images. - -The deactivate operation returns an error if the image status is -not ``active`` or ``deactivated``. - -Preconditions - -- The image must exist. - -Normal response codes: 204 - -Error response codes: 400, 403, 404 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - image_id: image_id-in-path - - -Reactivate image -~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/images/{image_id}/actions/reactivate - -Reactivates an image. -*(Since Image API v2.3)* - -By default, this operation is restricted to administrators only. - -The reactivate operation returns an error if the image status is -not ``active`` or ``deactivated``. - -Preconditions - -- The image must exist. - -Normal response codes: 204 - -Error response codes: 400, 403, 404 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - image_id: image_id-in-path diff --git a/api-ref/source/v2/images-parameters-descriptions.inc b/api-ref/source/v2/images-parameters-descriptions.inc deleted file mode 100644 index ef0d3c47..00000000 --- a/api-ref/source/v2/images-parameters-descriptions.inc +++ /dev/null @@ -1,17 +0,0 @@ -.. |p-start| raw:: html - -

- -.. |p-end| raw:: html - -

- -.. |disk_format_description| replace:: - |p-start|\ The format of the disk.\ |p-end| - |p-start|\ Responses may vary based on the configuration available in a - particular OpenStack cloud. See the :ref:`Image Schema ` - response from the cloud itself for the valid values available.\ |p-end| - |p-start|\ Example formats are: ``ami``, ``ari``, ``aki``, ``vhd``, - ``vhdx``, ``vmdk``, ``raw``, ``qcow2``, ``vdi``, or ``iso``.\ |p-end| - |p-start|\ The value might be ``null`` (JSON null data type).\ |p-end| - **Newton changes**: The ``vhdx`` disk format is a supported value. diff --git a/api-ref/source/v2/images-parameters.yaml b/api-ref/source/v2/images-parameters.yaml deleted file mode 100644 index 9054fd6e..00000000 --- a/api-ref/source/v2/images-parameters.yaml +++ /dev/null @@ -1,597 +0,0 @@ -# variables in header -Content-Length: - description: | - The length of the body in octets (8-bit bytes) - in: header - required: true - type: string -Content-Md5: - description: | - The MD5 checksum of the body. - in: header - required: true - type: string -Content-Range: - description: | - The content range of image data. For details, see - `Hypertext Transfer Protocol (HTTP/1.1): Range Requests - `_. - in: header - required: false - type: string -Content-Type-data: - description: | - The media type descriptor for the request body. Use - ``application/octet-stream`` - in: header - required: true - type: string -Content-Type-data-response: - description: | - The media type descriptor of the response body, namely - ``application/octet-stream`` - in: header - required: true - type: string -Content-Type-json: - description: | - The media type descriptor for the request body. Use - ``application/json``. - in: header - required: true - type: string -Content-Type-patch: - description: | - The media type descriptor for the request body. Use - ``application/openstack-images-v2.1-json-patch``. (You can also use - ``application/openstack-images-v2.0-json-patch``, but keep in mind that - it's deprecated.) - in: header - required: true - type: string -Location: - description: | - The URL to access the image file from the - external store. - in: header - required: true - type: string -Range: - description: | - The range of image data requested. Note that multi range requests are - not supported. For details, see - `Hypertext Transfer Protocol (HTTP/1.1): Range Requests - `_. - in: header - required: false - type: string - -# variables in path -image_id-in-path: - description: | - The UUID of the image. - in: path - required: true - type: string -member_id-in-path: - description: | - The ID of the image member. An image member is usually the project (also - called the "tenant") with whom the image is shared. - in: path - required: true - type: string -tag-in-path: - description: | - The image tag. A tag is limited to 255 chars in length. You may wish - to use characters that can easily be written in a URL. - in: path - required: true - type: string - -# variables in query -created_at-in-query: - description: | - Specify a *comparison filter* based on the date and time when the resource - was created. (See :ref:`Time Comparison Filters `). - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - - If you omit the time zone, the UTC time zone is assumed. - in: query - required: false - type: string -limit: - description: | - Requests a page size of items. Returns a number of items up to a limit - value. Use the ``limit`` parameter to make an initial limited request and - use the ID of the last-seen item from the response as the ``marker`` - parameter value in a subsequent limited request. - in: query - required: false - type: integer -marker: - description: | - The ID of the last-seen item. Use the ``limit`` parameter to make an - initial limited request and use the ID of the last-seen item from the - response as the ``marker`` parameter value in a subsequent limited request. - in: query - required: false - type: string -member_status-in-query: - description: | - Filters the response by a member status. A valid value is ``accepted``, - ``pending``, ``rejected``, or ``all``. Default is ``accepted``. - in: query - required: false - type: string -name-in-query: - description: | - Filters the response by a name, as a string. A valid value is the name of - an image. - in: query - required: false - type: string -owner-in-query: - description: | - Filters the response by a project (also called a "tenant") ID. Shows only - images that are shared with you by the specified owner. - in: query - required: false - type: string -size_max: - description: | - Filters the response by a maximum image size, in - bytes. - in: query - required: false - type: string -size_min: - description: | - Filters the response by a minimum image size, in - bytes. - in: query - required: false - type: string -sort: - description: | - Sorts the response by one or more attribute and sort direction - combinations. You can also set multiple sort keys and directions. - Default direction is ``desc``. - - Use the comma (``,``) character to separate multiple values. For - example: - - .. code-block:: none - - GET /v2/images?sort=name:asc,status:desc - in: query - required: false - type: string -sort_dir: - description: | - Sorts the response by a set of one or more sort - direction and attribute (``sort_key``) combinations. A valid value - for the sort direction is ``asc`` (ascending) or ``desc`` - (descending). If you omit the sort direction in a set, the default - is ``desc``. - in: query - required: false - type: string -sort_key: - description: | - Sorts the response by an attribute, such as - ``name``, ``id``, or ``updated_at``. Default is ``created_at``. - The API uses the natural sorting direction of the ``sort_key`` - image attribute. - in: query - required: false - type: string -status-in-query: - description: | - Filters the response by an image status. - in: query - required: false - type: integer -tag-in-query: - description: | - Filters the response by the specified tag value. May be repeated, but keep - in mind that you're making a conjunctive query, so only images containing - *all* the tags specified will appear in the response. - in: query - required: false - type: string -updated_at-in-query: - description: | - Specify a *comparison filter* based on the date and time when the resource - was most recently modified. (See :ref:`Time Comparison Filters - `). - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - - If you omit the time zone, the UTC time zone is assumed. - in: query - required: false - type: string -visibility-in-query: - description: | - Filters the response by an image visibility value. A valid value is - ``public``, ``private``, ``community``, or ``shared``. (Note that if you - filter on ``shared``, the images included in the response will only be - those where your member status is ``accepted`` unless you explicitly - include a ``member_status`` filter in the request.) If you omit this - parameter, the response shows ``public``, ``private``, and those ``shared`` - images with a member status of ``accepted``. - in: query - required: false - type: string - -# variables in body -checksum: - description: | - Hash that is used over the image data. The Image - service uses this value for verification. The value might be - ``null`` (JSON null data type). - in: body - required: true - type: string -container_format: - description: | - Format of the image container. A valid value is one of - ``ami``, ``ari``, ``aki``, ``bare``, ``ovf``, ``ova``, or - ``docker``. The value might be ``null`` (JSON null data type). - in: body - required: true - type: enum -container_format-in-request: - description: | - Format of the image container. A valid value is one of - ``ami``, ``ari``, ``aki``, ``bare``, ``ovf``, ``ova``, or - ``docker``. - in: body - required: false - type: enum -created_at: - description: | - The date and time when the resource was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. - in: body - required: true - type: string -direct_url: - description: | - The URL to access the image file kept in external store. *It is present - only if the* ``show_image_direct_url`` *option is* ``true`` *in the Image - service's configuration file.* **Because it presents a security risk, this - option is disabled by default.** - in: body - required: false - type: string -disk_format: - description: | - |disk_format_description| - in: body - required: true - type: enum -disk_format-in-request: - description: | - |disk_format_description| - in: body - required: false - type: enum -file: - description: | - The URL for the virtual machine image file. - in: body - required: true - type: string -first: - description: | - The URI for the first page of response. - in: body - required: true - type: string -id: - description: | - A unique, user-defined image UUID, in the format: - - :: - - nnnnnnnn-nnnn-nnnn-nnnn-nnnnnnnnnnnn - - Where **n** is a hexadecimal digit from 0 to f, or F. - - For example: - - :: - - b2173dd3-7ad6-4362-baa6-a68bce3565cb - - If you omit this value, the API generates a UUID for the image. - in: body - required: true - type: string -id-in-request: - description: | - A unique, user-defined image UUID, in the format: - - :: - - nnnnnnnn-nnnn-nnnn-nnnn-nnnnnnnnnnnn - - Where **n** is a hexadecimal digit from 0 to f, or F. - - For example: - - :: - - b2173dd3-7ad6-4362-baa6-a68bce3565cb - - If you omit this value, the API generates a UUID for the image. If you - specify a value that has already been assigned, the request fails with - a ``409`` response code. - in: body - required: false - type: string -image_id-in-body: - description: | - The UUID of the image. - in: body - required: true - type: string -images: - description: | - A list of *image* objects, as described by the :ref:`Images Schema - `. - in: body - required: true - type: array -locations: - description: | - A list of objects, each of which describes an image location. Each object - contains a ``url`` key, whose value is a URL specifying a location, and a - ``metadata`` key, whose value is a dict of key:value pairs containing - information appropriate to the use of whatever external store is indicated - by the URL. *This list appears only if the* ``show_multiple_locations`` - *option is set to* ``true`` *in the Image service's configuration file.* - **Because it presents a security risk, this option is disabled by - default.** - in: body - required: false - type: array -member_id: - description: | - The ID of the image member. An image member is usually a project (also - called the "tenant") with whom the image is shared. - in: body - required: true - type: string -member_status: - description: | - The status of this image member. Value is one of ``pending``, - ``accepted``, ``rejected``. - in: body - required: true - type: string -members: - description: | - A list of *member* objects, as described by the :ref:`Image Members Schema - `. Each *member* object describes a member with whom - this image is being shared. - in: body - required: true - type: array -min_disk: - description: | - Amount of disk space in GB that is required to boot the image. - The value might be ``null`` (JSON null data type). - in: body - required: true - type: integer -min_disk-in-request: - description: | - Amount of disk space in GB that is required to boot the image. - in: body - required: false - type: integer -min_ram: - description: | - Amount of RAM in MB that is required to boot the image. - The value might be ``null`` (JSON null data type). - in: body - required: true - type: integer -min_ram-in-request: - description: | - Amount of RAM in MB that is required to boot the image. - in: body - required: false - type: integer -name: - description: | - The name of the image. Value might be ``null`` (JSON null data type). - in: body - required: true - type: string -name-in-request: - description: | - The name of the image. - in: body - required: false - type: string -next: - description: | - The URI for the next page of response. Will not be present on the last - page of the response. - in: body - required: true - type: string -owner: - description: | - An identifier for the owner of the image, usually the project (also - called the "tenant") ID. - The value might be ``null`` (JSON null data type). - in: body - required: true - type: string -protected: - description: | - A boolean value that must be ``false`` or the image cannot be deleted. - in: body - required: true - type: boolean -protected-in-request: - description: | - Image protection for deletion. Valid value is ``true`` or ``false``. - Default is ``false``. - in: body - required: false - type: boolean -schema-image: - description: | - The URL for the schema describing a virtual machine image. - in: body - required: true - type: string -schema-images: - description: | - The URL for the schema describing a list of images. - in: body - required: true - type: string -schema-member: - description: | - The URL for the schema describing an image member. - in: body - required: true - type: string -schema-members: - description: | - The URL for the schema describing an image member list. - in: body - required: true - type: string -self: - description: | - The URL for the virtual machine image. - in: body - required: true - type: string -size: - description: | - The size of the image data, in bytes. The value - might be ``null`` (JSON null data type). - in: body - required: true - type: integer -status: - description: | - The image status. - in: body - required: true - type: string -tags: - description: | - List of tags for this image, possibly an empty list. - in: body - required: true - type: array -tags-in-request: - description: | - List of tags for this image. Each tag is a string of at most 255 chars. - The maximum number of tags allowed on an image is set by the operator. - in: body - required: false - type: array -updated_at: - description: | - The date and time when the resource was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. In the previous example, the offset value is ``-05:00``. - - If the ``updated_at`` date and time stamp is not set, its value is - ``null``. - in: body - required: true - type: string -url: - description: | - The URL to access the image file kept in external - store. - in: body - required: true - type: string -value: - description: | - Value of image property used in add or replace - operations expressed in JSON notation. For example, you must - enclose strings in quotation marks, and you do not enclose numeric - values in quotation marks. - in: body - required: true - type: string -virtual_size: - description: | - The virtual size of the image. The value might - be ``null`` (JSON null data type). - in: body - required: true - type: integer -visibility: - description: | - Image visibility, that is, the access permission for the image. - in: body - required: true - type: string -visibility-in-request: - description: | - Visibility for this image. Valid value is one of: ``public``, ``private``, - ``shared``, or ``community``. - At most sites, only an administrator can make an image ``public``. - Some sites may restrict what users can make an image ``community``. - Some sites may restrict what users can perform member operations on - a ``shared`` image. - *Since the Image API v2.5, the default value is ``shared``.* - in: body - required: false - type: string diff --git a/api-ref/source/v2/images-schemas.inc b/api-ref/source/v2/images-schemas.inc deleted file mode 100644 index 70315dc7..00000000 --- a/api-ref/source/v2/images-schemas.inc +++ /dev/null @@ -1,138 +0,0 @@ -.. -*- rst -*- - -.. note: You can get a 400 on a GET if you pass a request body - (see router.py) - -Image Schemas -************* - -Gets a JSON-schema document that represents the various entities talked -about by the Images v2 API. - -.. _images-schema: - -Show images schema -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/images - -*(Since Images v2.0)* - -Shows a JSON schema document that represents an *images* entity. - -An images entity is a container of image entities. - -The following schema is solely an example. Consider only the -response to the API call as authoritative. - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -This operation has no request parameters and does not accept a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-images-list-response.json - :language: json - -.. _image-schema: - -Show image schema -~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/image - -*(Since Images v2.0)* - -Shows a JSON schema document that represents an *image* entity. - -The following schema is solely an example. Consider only the -response to the API call as authoritative. - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -This operation has no request parameters and does not accept a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-image-show-response.json - :language: json - -.. _image-members-schema: - -Show image members schema -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/members - -*(Since Images v2.1)* - -Shows a JSON schema document that represents an *image members* entity. - -An image members entity is a container of image member entities. - -The following schema is solely an example. Consider only the -response to the API call as authoritative. - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -This operation has no request parameters and does not accept a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-image-members-list-response.json - :language: json - -.. _image-member-schema: - -Show image member schema -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/member - -*(Since Images v2.1)* - -Shows a JSON schema document that represents an *image member* entity. - -The following schema is solely an example. Consider only the -response to the API call as authoritative. - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -This operation has no request parameters and does not accept a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-image-member-show-response.json - :language: json diff --git a/api-ref/source/v2/images-sharing-v2.inc b/api-ref/source/v2/images-sharing-v2.inc deleted file mode 100644 index feda75bc..00000000 --- a/api-ref/source/v2/images-sharing-v2.inc +++ /dev/null @@ -1,381 +0,0 @@ -.. -*- rst -*- - -.. _image-sharing: - -Sharing -******* - -Images may be shared among projects by creating *members* on the image. Image -members have read-only privileges on the image. The following calls allow you -to create, list, update, and delete image members. - -.. note:: - - An image member is an identifier for a consumer with whom the image is - shared. In most OpenStack clouds, where the value of the ``owner`` property - of an image is a project ID, the appropriate identifier to use for the - ``member_id`` is the consumer's project ID (also known as the "tenant ID"). - In these clouds, image sharing is project-to-project, and all the individual - users in the consuming project have access to the image. - - * Some deployments may choose instead to have the identifier of the user who - created the image as the value of the ``owner`` property. In such clouds, - the appropriate identifier to use for the ``member_id`` is the user ID of - the person with whom you want to share the image. In these clouds, image - sharing is user-to-user. - - * Note that you, as an image owner, do not have a choice about what value to - use for the ``member_id``. If, like most OpenStack clouds, your cloud - uses the tenant ID for the image ``owner``, sharing will not work if you - use a user ID as the ``member_id`` of an image (and vice-versa). - - * Please consult your cloud's local documentation for details. - -When an image is shared, the member is given immediate access to the image. -In order to prevent spamming other users' image lists, a shared image does not -appear in a member's image list until the member "accepts" the image. - -Only the image owner may create members. Only an image member may modify his -or her member status. - -.. TODO(rosmaita): update the following reference when the "narrative" API - docs have a final resting place - -For a conceptual overview of image sharing, including a suggested workflow, -please consult `Image API v2 Sharing`_. - -.. _Image API v2 Sharing: - http://specs.openstack.org/openstack/glance-specs/specs/api/v2/sharing-image-api-v2.html - -.. note:: - - If you don't want to maintain a sharing relationship with particular - image consumers, but instead want to make an image available to *all* - users, you may update your image's ``visibility`` property to - ``community``. - - * In some clouds, the ability to "communitize" an image may be prohibited - or restricted to trusted users. Please consult your cloud's local - documentation for details. - -Create image member -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/images/{image_id}/members - -Adds a tenant ID as an image member. -*(Since Image API v2.1)* - - -Preconditions - -- The image must exist. - -- The image must have a ``visibility`` value of ``shared``. - -- You must be the owner of the image. - -Synchronous Postconditions - -- With correct permissions, you can see the member status of the - image member as ``pending`` through API calls. - -Troubleshooting - -- Even if you have correct permissions, if the ``visibility`` - attribute is not set to ``shared``, the request returns the HTTP - ``403`` response code. Ensure that you meet the preconditions and - run the request again. If the request fails again, review your - API request. - -- If the member is already a member for the image, the service - returns the ``Conflict (409)`` response code. If you meant to - specify a different member, run the request again. - - -Normal response codes: 200 - -Error response codes: 400, 401, 403, 404, 409, 413 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - image_id: image_id-in-path - - member: member_id - -Request Example ---------------- - -.. literalinclude:: samples/image-member-create-request.json - :language: json - - - -Response Parameters -------------------- - -.. rest_parameters:: images-parameters.yaml - - - created_at: created_at - - image_id: image_id-in-body - - member_id: member_id - - schema: schema-member - - status: member_status - - updated_at: updated_at - -Response Example ----------------- - -.. literalinclude:: samples/image-member-create-response.json - :language: json - - -Show image member details -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/images/{image_id}/members/{member_id} - -Shows image member details. -*(Since Image API v2.1)* - -Response body is a single image member entity. - -Preconditions - -- The image must exist. - -- The image must have a ``visibility`` value of ``shared``. - -- You must be the owner or the member of the image who's referenced in the - call. - - -Normal response codes: 200 - -Error response codes: 400, 401, 404 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - image_id: image_id-in-path - - member_id: member_id-in-path - - -Response Parameters -------------------- - -.. rest_parameters:: images-parameters.yaml - - - created_at: created_at - - image_id: image_id-in-body - - member_id: member_id - - schema: schema-member - - status: member_status - - updated_at: updated_at - - -Response Example ----------------- - -.. literalinclude:: samples/image-member-details-response.json - :language: json - - -List image members -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/images/{image_id}/members - -Lists the tenants that share this image. -*(Since Image API v2.1)* - -If the image owner makes this call, the complete member list is -returned. - -If a user who is an image member makes this call, the member list -contains only information for that user. - -If a user who is not an image member makes this call, the call -returns the HTTP ``404`` response code. - -Preconditions - -- The image must exist. - -- The image must have a ``visibility`` value of ``shared``. - -- You must be the owner or a member of the image. - - -Normal response codes: 200 - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - image_id: image_id-in-path - - -Response Parameters -------------------- - -.. rest_parameters:: images-parameters.yaml - - - members: members - - schema: schema-members - - -Response Example ----------------- - -.. literalinclude:: samples/image-members-list-response.json - :language: json - - -Update image member -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/images/{image_id}/members/{member_id} - -Sets the status for an image member. -*(Since Image API v2.1)* - -This call allows an image member to change his or her *member status*. - -When an image is shared with you, you have immediate access to the image. What -updating your member status on the image does for you is that it affects -whether the image will appear in your image list response. - -- When an image is shared with you, your member_status is ``pending``. You - won't see the image unless you go looking for it, either by making a show - image detail request using the image's ID, or by making an image list call - specifically looking for a shared image in member status ``pending``. This - way, other users cannot "spam" your image list with images you may not want - to see. - -- If you want to see a particular shared image in your image list, then you - must use this call to change your member status on the image to ``accepted``. - -- The image owner can see what your member status is on an image, but the owner - *cannot* change the status. Only you (or an administrator) can do that. - -- There are three member status values: ``pending``, ``accepted``, and - ``rejected``. The ``pending`` and ``rejected`` statuses are functionally - identical. The difference is that ``pending`` indicates to the owner that - you haven't updated the image, so perhaps you aren't aware that it's been - shared with you. The ``rejected`` status indicates that you are aware that - the image exists and you specifically decided that you don't want to see it - in your image list response. - -For a more detailed discussion of image sharing, please consult `Image API v2 -Sharing`_. - -Preconditions - -- The image must exist. - -- The image must have a ``visibility`` value of ``shared``. - -- You must be the member of the image referenced in the call. - -Synchronous Postconditions - -- If you update the member status to ``accepted`` and have the - correct permissions, you see the image in list images responses. - -- With correct permissions, you can make API calls to see the - updated member status of the image. - - -Normal response codes: 200 - -Error response codes: 400, 401, 404, 403 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - image_id: image_id-in-path - - member_id: member_id-in-path - - status: member_status - -Request Example ---------------- - -.. literalinclude:: samples/image-member-update-request.json - :language: json - - - -Response Parameters -------------------- - -.. rest_parameters:: images-parameters.yaml - - - created_at: created_at - - image_id: image_id-in-body - - member_id: member_id - - schema: schema-member - - status: member_status - - updated_at: updated_at - - -Response Example ----------------- - -.. literalinclude:: samples/image-member-update-response.json - :language: json - - -Delete image member -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/images/{image_id}/members/{member_id} - -Deletes a tenant ID from the member list of an image. -*(Since Image API v2.1)* - -Preconditions - -- The image must exist. - -- The image must have a ``visibility`` value of ``shared``. - -- You must be the owner of the image. - -Synchronous Postconditions - -- The API removes the member from the image members. - -Troubleshooting - -- Even if you have correct permissions, if you are not the owner of - the image or you specify an incorrect image ID or member ID, the - call returns the HTTP ``403`` or ``404`` response code. Ensure - that you meet the preconditions and run the request again. If the - request fails again, review your API request URI. - -Normal response codes: 204 - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - image_id: image_id-in-path - - member_id: member_id-in-path diff --git a/api-ref/source/v2/images-tags.inc b/api-ref/source/v2/images-tags.inc deleted file mode 100644 index 9affb62e..00000000 --- a/api-ref/source/v2/images-tags.inc +++ /dev/null @@ -1,52 +0,0 @@ -.. -*- rst -*- - -Image tags -********** - -Adds and deletes image tags. - -Image tags may also be modfied by the :ref:`v2-image-update` call. - - -Add image tag -~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/images/{image_id}/tags/{tag} - -Adds a tag to an image. -*(Since Image API v2.0)* - -Normal response codes: 204 - -Error response codes: 400, 401, 403, 404, 413 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - image_id: image_id-in-path - - tag: tag-in-path - - -Delete image tag -~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/images/{image_id}/tags/{tag} - -Deletes a tag from an image. -*(Since Image API v2.0)* - -Normal response codes: 204 - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: images-parameters.yaml - - - image_id: image_id-in-path - - tag: tag-in-path diff --git a/api-ref/source/v2/index.rst b/api-ref/source/v2/index.rst deleted file mode 100644 index 40c718ef..00000000 --- a/api-ref/source/v2/index.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. - Copyright 2010 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -:tocdepth: 3 - -============================== -Image Service API v2 (CURRENT) -============================== - -.. rest_expand_all:: - -.. include:: images-parameters-descriptions.inc -.. include:: images-images-v2.inc -.. include:: images-sharing-v2.inc -.. include:: images-tags.inc -.. include:: images-schemas.inc -.. include:: images-data.inc -.. include:: tasks.inc -.. include:: tasks-schemas.inc diff --git a/api-ref/source/v2/metadefs-index.rst b/api-ref/source/v2/metadefs-index.rst deleted file mode 100644 index 39128f6d..00000000 --- a/api-ref/source/v2/metadefs-index.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. - Copyright 2010 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -:tocdepth: 3 - -============================================= -Metadata Definitions Service API v2 (CURRENT) -============================================= - -.. rest_expand_all:: - -Metadefs -******** - -General information -~~~~~~~~~~~~~~~~~~~ - -The Metadata Definitions Service ("metadefs", for short) provides a common API -for vendors, operators, administrators, services, and users to meaningfully -define available key:value pairs that can be used on different types of cloud -resources (for example, images, artifacts, volumes, flavors, aggregates, -and other resources). - -To get you started, Glance contains a default catalog of metadefs that may be -installed at your site; see the `README -`_ in the -code repository for details. - -Once a common catalog of metadata definitions has been created, the catalog is -available for querying through the API. Note that this service stores only the -*catalog*, because metadefs are meta-metadata. Metadefs provide information -*about* resource metadata, but do not themselves serve as actual metadata. - -Actual key:value pairs are stored on the resources to which they apply using -the metadata facilities provided by the appropriate API. (For example, the -Images API would be used to put specific key:value pairs on a virtual machine -image.) - -A metadefs definition includes a property’s key, its description, its -constraints, and the resource types to which it can be associated. See -`Metadata Definition Concepts -`_ in the -Glance Developer documentation for more information. - - -.. include:: metadefs-namespaces.inc -.. include:: metadefs-resourcetypes.inc -.. include:: metadefs-namespaces-objects.inc -.. include:: metadefs-namespaces-properties.inc -.. include:: metadefs-namespaces-tags.inc -.. include:: metadefs-schemas.inc diff --git a/api-ref/source/v2/metadefs-namespaces-objects.inc b/api-ref/source/v2/metadefs-namespaces-objects.inc deleted file mode 100644 index 4c697556..00000000 --- a/api-ref/source/v2/metadefs-namespaces-objects.inc +++ /dev/null @@ -1,280 +0,0 @@ -.. -*- rst -*- - -Metadata definition objects -*************************** - -Creates, lists, shows details for, updates, and deletes metadata definition -objects. - -*Since API v2.2* - -Create object -~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/metadefs/namespaces/{namespace_name}/objects - -Creates an object definition in a namespace. - -Normal response codes: 201 - -Error response codes: 400, 401, 403, 404, 409 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - name: object-name - - description: object-description-in-request - - properties: object-properties-in-request - - required: object-required-in-request - -Request Example ---------------- - -.. literalinclude:: samples/metadef-object-create-request.json - :language: json - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - created_at: created_at - - description: object-description - - name: object-name - - properties: object-properties - - required: object-required - - schema: object-schema - - self: self - - updated_at: updated_at - -Response Example ----------------- - -.. literalinclude:: samples/metadef-object-create-response.json - :language: json - - -List objects -~~~~~~~~~~~~ - -.. rest_method:: GET /v2/metadefs/namespaces/{namespace_name}/objects - -Lists object definitions in a namespace. - -Returns a subset of the larger collection of namespaces and a link -that you can use to get the next set of namespaces. You should -always check for the presence of a ``next`` link and use it as the -URI in a subsequent HTTP GET request. You should follow this -pattern until a ``next`` link is no longer provided. The next link -preserves any query parameters that you send in your initial -request. You can use the ``first`` link to jump back to the first -page of the collection. If you prefer to paginate through -namespaces manually, use the ``limit`` and ``marker`` parameters. - -Use the ``resource_types`` and ``visibility`` query parameters to -filter the response. - -For example, set the ``resource_types`` query parameter to -``OS::Glance::Image,OS::Nova::Flavor`` to filter the response to -include only namespaces that are associated with the given resource -types. - -You can sort the results of this operation by using the -``sort_key`` and ``sort_dir`` parameters. The API uses the natural -sorting of whatever namespace attribute is provided as the -``sort_key``. - - -Normal response codes: 200 - -Error response codes: 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - visibility: visibility-in-query - - resource_types: resource_types-in-query - - sort_key: sort_key - - sort_dir: sort_dir - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - display_name: display_name - - description: description - - namespace: namespace - - visibility: visibility - - protected: protected - - namespaces: namespaces - - resource_type_associations: resource_type_associations - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-objects-list-response.json - :language: json - - - -Show object -~~~~~~~~~~~ - -.. rest_method:: GET /v2/metadefs/namespaces/{namespace_name}/objects/{object_name} - -Shows the definition for an object. - -The response body shows a single object entity. - -Normal response codes: 200 - -.. yep, 400 if the request includes a body - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - object_name: object_name - -There is no request body. - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - created_at: created_at - - description: object-description - - name: object-name - - properties: object-properties - - required: object-required - - schema: object-schema - - self: self - - updated_at: updated_at - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-object-details-response.json - :language: json - - - -Update object -~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/metadefs/namespaces/{namespace_name}/objects/{object_name} - -Updates an object definition in a namespace. - -The object resource is completely replaced by what you specify in the request -body. Thus, if you leave out any of the optional parameters, and they exist in -the current object, they will be eliminated by this call. - -It is possible to change the name of the object with this call; if you do, note -that the URL for the object (specified by the ``self`` field) will change. - -Normal response codes: 200 - -Error response codes: 400, 401, 403, 404, 409 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - object_name: object_name - - name: object-name - - description: object-description-in-request - - properties: object-properties-in-request - - required: object-required-in-request - - -Request Example ---------------- - -.. literalinclude:: samples/metadef-object-update-request.json - :language: json - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - created_at: created_at - - description: object-description - - name: object-name - - properties: object-properties - - required: object-required - - schema: object-schema - - self: self - - updated_at: updated_at - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-object-update-response.json - :language: json - - - -Delete object -~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/metadefs/namespaces/{namespace_name}/objects/{object_name} - -Deletes an object definition from a namespace. - -.. note:: - If the namespace containing the object is protected, that is, if the - ``protected`` attribute of the namespace is ``true``, then you must first - set the ``protected`` attribute to ``false`` on the namespace before you - will be permitted to delete the object. - - * If you try to delete an object from a protected namespace, the call - returns the ``403`` response code. - * To change the ``protected`` attribute of a namespace, use the - :ref:`Update namespace ` call. - -When you successfully delete an object from a namespace, the -response is empty and the response code is ``204``. - -Normal response codes: 204 - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - object_name: object_name - -There is no request body. - -There is no response body. diff --git a/api-ref/source/v2/metadefs-namespaces-properties.inc b/api-ref/source/v2/metadefs-namespaces-properties.inc deleted file mode 100644 index 4059cbcf..00000000 --- a/api-ref/source/v2/metadefs-namespaces-properties.inc +++ /dev/null @@ -1,306 +0,0 @@ -.. -*- rst -*- - -Metadata definition properties -****************************** - -Creates, lists, shows details for, updates, and deletes metadata definition -properties. - -*Since API v2.2* - -Create property -~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/metadefs/namespaces/{namespace_name}/properties - -Creates a property definition in a namespace. - -The schema is a subset of the JSON property definition schema. - -Normal response codes: 201 - -Error response codes: 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - name: name - - title: title - - type: type - - additionalItems: additionalItems - - description: property-description-in-request - - default: default - - items: items - - operators: operators - - enum: enum - - maximum: maximum - - minItems: minItems - - readonly: readonly - - minimum: minimum - - maxItems: maxItems - - maxLength: maxLength - - uniqueItems: uniqueItems - - pattern: pattern - - minLength: minLength - -Request Example ---------------- - -.. literalinclude:: samples/metadef-property-create-request.json - :language: json - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - additionalItems: additionalItems - - description: property-description - - title: title - - default: default - - items: items - - operators: operators - - enum: enum - - maximum: maximum - - minItems: minItems - - readonly: readonly - - minimum: minimum - - maxItems: maxItems - - maxLength: maxLength - - uniqueItems: uniqueItems - - pattern: pattern - - type: type - - minLength: minLength - - name: name - -Response Example ----------------- - -.. literalinclude:: samples/metadef-property-create-response.json - :language: json - - -List properties -~~~~~~~~~~~~~~~ - -.. rest_method:: - GET /v2/metadefs/namespaces/{namespace_name}/properties - -Lists property definitions in a namespace. - -Normal response codes: 200 - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - -There is no request body. - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - properties: properties-dict - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-properties-list-response.json - :language: json - - -Show property definition -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: - GET /v2/metadefs/namespaces/{namespace_name}/properties/{property_name} - -Shows the definition for a property. - -If you use the ``resource_type`` query parameter, the API removes the prefix of -the resource type from the property name before it submits the query. This -enables you to look for a property name that starts with a prefix from an -associated resource type. - -The response body shows a single property entity. - - -Normal response codes: 200 - -Error response codes: 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - property_name: property_name - - namespace_name: namespace_name - - resource_type: resource_type-in-query - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - additionalItems: additionalItems - - description: property-description - - title: title - - default: default - - items: items - - operators: operators - - enum: enum - - maximum: maximum - - minItems: minItems - - readonly: readonly - - minimum: minimum - - maxItems: maxItems - - maxLength: maxLength - - uniqueItems: uniqueItems - - pattern: pattern - - type: type - - minLength: minLength - - name: name - - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-property-details-response.json - :language: json - - - - -Update property definition -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: - PUT /v2/metadefs/namespaces/{namespace_name}/properties/{property_name} - -Updates a property definition. - -Normal response codes: 200 - -Error response codes: 400, 401, 403, 404, 409 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - property_name: property_name - - name: name-property - - title: title - - type: type - - additionalItems: additionalItems - - description: description - - default: default - - items: items - - operators: operators - - enum: enum - - maximum: maximum - - minItems: minItems - - readonly: readonly - - minimum: minimum - - maxItems: maxItems - - maxLength: maxLength - - uniqueItems: uniqueItems - - pattern: pattern - - minLength: minLength - -Request Example ---------------- - -.. literalinclude:: samples/metadef-property-create-request.json - :language: json - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - additionalItems: additionalItems - - description: description - - title: title - - default: default - - items: items - - operators: operators - - enum: enum - - maximum: maximum - - minItems: minItems - - readonly: readonly - - minimum: minimum - - maxItems: maxItems - - maxLength: maxLength - - uniqueItems: uniqueItems - - pattern: pattern - - type: type - - minLength: minLength - - name: name-property - - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-property-update-response.json - :language: json - - - -Remove property definition -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: - DELETE /v2/metadefs/namespaces/{namespace_name}/properties/{property_name} - -Removes a property definition from a namespace. - -.. note:: - If the namespace containing the property is protected, that is, if the - ``protected`` attribute of the namespace is ``true``, then you must first - set the ``protected`` attribute to ``false`` on the namespace before you - will be permitted to delete the property. - - * If you try to delete a property from a protected namespace, the call - returns the ``403`` response code. - * To change the ``protected`` attribute of a namespace, use the - :ref:`Update namespace ` call. - -When you successfully delete a property from a namespace, the -response is empty and the response code is ``204``. - -Normal response codes: 204 - -Error response codes: 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - property_name: property_name - - namespace_name: namespace_name - diff --git a/api-ref/source/v2/metadefs-namespaces-tags.inc b/api-ref/source/v2/metadefs-namespaces-tags.inc deleted file mode 100644 index 6d52fad5..00000000 --- a/api-ref/source/v2/metadefs-namespaces-tags.inc +++ /dev/null @@ -1,300 +0,0 @@ -.. -*- rst -*- - -Metadata definition tags -************************ - -Creates, lists, shows details for, updates, and deletes metadata -definition tags. - -*Since API v2.2* - - -Create tag definition -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: - POST /v2/metadefs/namespaces/{namespace_name}/tags/{tag_name} - -Adds a tag to the list of namespace tag definitions. - -Normal response codes: 201 - -Error response codes: 400, 401, 403, 404, 409 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - tag_name: tag_name - -There is no request body. - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - created_at: created_at - - name: name-tag - - updated_at: updated_at - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-tag-create-response.json - :language: json - - -Get tag definition -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: - GET /v2/metadefs/namespaces/{namespace_name}/tags/{tag_name} - -Gets a definition for a tag. - -The response body shows a single tag entity. - -Normal response codes: 200 - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - tag_name: tag_name - - namespace_name: namespace_name - -There is no request body. - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - created_at: created_at - - name: name-tag - - updated_at: updated_at - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-tag-details-response.json - :language: json - - -Update tag definition -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: - PUT /v2/metadefs/namespaces/{namespace_name}/tags/{tag_name} - -Renames a tag definition. - -Normal response codes: 200 - -Error response codes: 400, 401, 403, 404, 409 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - tag_name: tag_name - - namespace_name: namespace_name - - name: name-tag - -Request Example ---------------- - -.. literalinclude:: samples/metadef-tag-update-request.json - :language: json - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - created_at: created_at - - name: name-tag - - updated_at: updated_at - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-tag-update-response.json - :language: json - - -Delete tag definition -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: - DELETE /v2/metadefs/namespaces/{namespace_name}/tags/{tag_name} - -Deletes a tag definition within a namespace. - -.. note:: - If the namespace containing the tag is protected, that is, if the - ``protected`` attribute of the namespace is ``true``, then you must first - set the ``protected`` attribute to ``false`` on the namespace before you - will be permitted to delete the tag. - - * If you try to delete a tag from a protected namespace, the call returns - the ``403`` response code. - * To change the ``protected`` attribute of a namespace, use the - :ref:`Update namespace ` call. - -When you successfully delete a tag from a namespace, the response is empty and -the response code is ``204``. - -Normal response codes: 204 - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - tag_name: tag_name - - -Create tags -~~~~~~~~~~~ - -.. rest_method:: POST /v2/metadefs/namespaces/{namespace_name}/tags - -Creates one or more tag definitions in a namespace. - -Normal response codes: 201 - -Error response codes: 400, 401, 403, 404, 409 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - tags: tags - -Request Example ---------------- - -.. literalinclude:: samples/metadef-tags-create-request.json - :language: json - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - name: name - - tags: tags - -Response Example ----------------- - -.. literalinclude:: samples/metadef-tag-create-response.json - :language: json - - - -List tags -~~~~~~~~~ - -.. rest_method:: - GET /v2/metadefs/namespaces/{namespace_name}/tags - -Lists the tag definitions within a namespace. - -To manually paginate through the list of tags, use the ``limit`` -and ``marker`` parameters. - -To sort the results of this operation use the ``sort_key`` and -``sort_dir`` parameters. The API uses the natural sort order of the -tag attribute of the ``sort_key`` parameter. - - -Normal response codes: 200 - -Error response codes: 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - limit: limit-tags - - marker: marker-tags - - sort_key: sort_key-tags - - sort_dir: sort_dir - -There is no request body. - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - tags: tags - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-tags-list-response.json - :language: json - - -Delete all tag definitions -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: - DELETE /v2/metadefs/namespaces/{namespace_name}/tags - -Deletes all tag definitions within a namespace. - -.. note:: - If the namespace containing the tags is protected, that is, if the - ``protected`` attribute of the namespace is ``true``, then you must first - set the ``protected`` attribute to ``false`` on the namespace before you - will be permitted to delete the tags. If you try to delete the tags - from a protected namespace, the call returns the ``403`` response code. - -When you successfully delete the tags from a namespace, the -response is empty and the response code is ``204``. - -Normal response codes: 204 - -Error response codes: 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - -There is no request body. - -There is no response body. diff --git a/api-ref/source/v2/metadefs-namespaces.inc b/api-ref/source/v2/metadefs-namespaces.inc deleted file mode 100644 index 70a5f8af..00000000 --- a/api-ref/source/v2/metadefs-namespaces.inc +++ /dev/null @@ -1,337 +0,0 @@ -.. -*- rst -*- - -Metadata definition namespaces -****************************** - -Creates, lists, shows details for, updates, and deletes metadata -definition namespaces. Defines namespaces that can contain property -definitions, object definitions, and resource type associations. - -*Since API v2.2* - - -Create namespace -~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/metadefs/namespaces - -Creates a namespace. - -A namespace must be unique across all users. Attempting to create an already -existing namespace will result in a 409 (Conflict) response. - -The ``Location`` response header contains the newly-created URI for -the namespace. - -Normal response codes: 201 - -Error response codes: 400, 401, 403, 409 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace: namespace - - display_name: display_name - - description: description - - visibility: visibility-in-request - - protected: protected-in-request - -The request body may also contain properties, objects, and resource type -associations, or these can be added later by the :ref:`v2-update-namespace` -call. - -Request Example ---------------- - -.. literalinclude:: samples/metadef-namespace-create-request-simple.json - :language: json - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - Location: Location - - created_at: created_at - - description: description - - display_name: display_name - - namespace: namespace - - owner: owner - - protected: protected - - schema: schema-namespace - - self: self - - updated_at: updated_at - - visibility: visibility - -If the request body contained properties, objects, or resource type -associations, these will be included in the response. - - -Response Example ----------------- - -.. code-block:: console - - HTTP/1.1 201 Created - Content-Length: 427 - Content-Type: application/json; charset=UTF-8 - Location: http://glance.openstack.example.org/v2/metadefs/namespaces/FredCo::SomeCategory::Example - X-Openstack-Request-Id: req-6d4a8ad2-c018-4bfc-8fe5-1a36c23c43eb - Date: Thu, 19 May 2016 16:05:48 GMT - -.. literalinclude:: samples/metadef-namespace-create-response-simple.json - :language: json - - -List namespaces -~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/metadefs/namespaces - -Lists available namespaces. - -Returns a list of namespaces to which the authenticated user has access. If -the list is too large to fit in a single response, either because of operator -configuration or because you've included a ``limit`` query parameter in the -request to restrict the response size, the response will contain a link that -you can use to get the next page of namespaces. Check for the presence of a -``next`` link and use it as the URI in a subsequent HTTP GET request. Follow -this pattern until a ``next`` link is no longer provided. - -The ``next`` link preserves any query parameters that you send in your initial -request. You can use the ``first`` link to return to the first page in the -collection. If you prefer to paginate through namespaces manually, use the -``limit`` and ``marker`` parameters. - -The list operation accepts the ``resource_types`` and ``visibility`` query -parameters, which you can use to filter the response. - -To sort the results of this operation, use the ``sort_key`` and ``sort_dir`` -parameters. The API uses the natural sorting order in the namespace attribute -that you provide as the ``sort_key`` parameter. - -Normal response codes: 200 - -Error response codes: 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - limit: limit - - marker: marker - - visibility: visibility-in-query - - resource_types: resource_types-in-query - - sort_key: sort_key - - sort_dir: sort_dir - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - first: first - - namespaces: namespaces - - next: next - - schema: schema-namespaces - -Response Example ----------------- - -.. literalinclude:: samples/metadef-namespaces-list-response.json - :language: json - - -Get namespace details -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/metadefs/namespaces/{namespace_name} - -Gets details for a namespace. - -The response body shows a single namespace entity with all details -including properties, objects, and resource type associations. - -If the namespace contains a resource type association that specifies a prefix, -you may optionally include the name of the resource type as a query parameter. -In that case, the prefix will be applied to all property names in the response. -(See below for an example.) - -Normal response codes: 200 - -.. returns 400 if a request body is sent - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - resource_type: resource_type-in-query-namespace-detail - -The request does not take a body. - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - created_at: created_at - - description: description - - display_name: display_name - - namespace: namespace - - objects: objects - - owner: owner - - properties: properties-dict - - protected: protected - - resource_type_associations: resource_type_associations - - schema: schema-namespace - - self: self - - visibility: visibility - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-namespace-details-response.json - :language: json - -Response Example (with resource_type query parameter) ------------------------------------------------------ - -This is the result of the following request: - -``GET /v2/metadefs/namespaces/OS::Compute::Libvirt?resource_type=OS::Glance::Image`` - -Note that the name of each property has had the appropriate prefix applied to -it. - -.. literalinclude:: samples/metadef-namespace-details-with-rt-response.json - :language: json - - -.. _v2-update-namespace: - -Update namespace -~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/metadefs/namespaces/{namespace_name} - -Updates a namespace. - -.. note:: - Be careful using this call, especially when all you want to do is change the - ``protected`` value so that you can delete some objects, properties, or - resource type associations in the namespace. - - While only the ``namespace`` is required in the request body, if this call - is made with *only* the ``namespace`` in request body, the other attributes - listed below will be set to their default values -- which in the case of - ``description`` and ``display_name``, is null. - - So if you want to change *only* the ``protected`` value with this call, be - sure to also include the current values of the following parameters in the - request body: - - - ``description`` - - ``display_name`` - - ``namespace`` - - ``visibility`` - - The objects, properties, and resource type associations in a namespace - are unaffected by this call. - -Normal response codes: 200 - -Error response codes: 400, 401, 403, 404, 409 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - description: description - - display_name: display_name - - namespace: namespace - - protected: protected-in-request - - visibility: visibility-in-request - -Request Example ---------------- - -.. literalinclude:: samples/metadef-namespace-update-request.json - :language: json - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - created_at: created_at - - description: description - - display_name: display_name - - namespace: namespace - - owner: owner - - protected: protected - - schema: schema-namespace - - self: self - - updated_at: updated_at - - visibility: visibility - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-namespace-update-response.json - :language: json - - -Delete namespace -~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/metadefs/namespaces/{namespace_name} - -Deletes a namespace and its properties, objects, and any resource type -associations. - -.. note:: - - If the namespace is protected, that is, if the ``protected`` attribute of - the namespace is ``true``, then you must first set the ``protected`` - attribute to ``false`` on the namespace before you will be permitted to - delete it. - - * If you try to delete a protected namespace, the call returns the ``403`` - response code. - * To change the ``protected`` attribute of a namespace, use the - :ref:`Update namespace ` call. - -A successful operation returns the HTTP ``204`` (No Content) response code. - -Normal response codes: 204 - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - -The request does not take a body. - -The request does not return a body. diff --git a/api-ref/source/v2/metadefs-parameters.yaml b/api-ref/source/v2/metadefs-parameters.yaml deleted file mode 100644 index 96a32646..00000000 --- a/api-ref/source/v2/metadefs-parameters.yaml +++ /dev/null @@ -1,527 +0,0 @@ -# variables in header -Content-Type-json: - description: | - The media type descriptor for the request body. Use - ``application/json``. - in: header - required: true - type: string -Location: - description: | - The newly-created URI for the namespace. - in: header - required: true - type: string - -# variables in path -name: - description: | - Name of the resource type. - in: path - required: true - type: string -namespace_name: - description: | - The name of the namespace whose details you want to see. (The name is the - value of a namespace's ``namespace`` field.) - in: path - required: true - type: string -object_name: - description: | - The name of the object. - in: path - required: true - type: string -property_name: - description: | - The name of the property. - in: path - required: true - type: string -resource_type_name: - description: | - The name of the resource type. - in: path - required: true - type: string -tag_name: - description: | - The name of the tag. - in: path - required: true - type: string - -# variables in query -limit: - description: | - Requests a page size of items. Returns a number of items up to a limit - value. Use the ``limit`` parameter to make an initial limited request and - use the ID of the last-seen item from the response as the ``marker`` - parameter value in a subsequent limited request. - in: query - required: false - type: integer -limit-tags: - description: | - Requests a page size of tags. Returns a number of tags up to a limit - value. Use the ``limit`` parameter to make an initial limited request and - use the name of the last-seen tag from the response as the ``marker`` - parameter value in a subsequent limited request. - in: query - required: false - type: integer -marker: - description: | - Allows specification of a *namespace identifier*. When present, only - namespaces occurring after that namespace will be listed, that is, - those namespaces having a ``sort_key`` later than that of the marker - in the ``sort_dir`` direction. - in: query - required: false - type: string -marker-tags: - description: | - Allows specification of a tag name. When present, only tags occurring - *after* the named tag will be listed, that is, those namespaces having a - ``sort_key`` later than that of the marker in the ``sort_dir`` direction. - in: query - required: false - type: string -resource_type-in-query: - description: | - Filters the response by property names that start with a prefix from an - associated resource type. The API removes the prefix of the resource type - from the property name in the response. - in: query - required: false - type: string -resource_type-in-query-namespace-detail: - description: | - Apply the prefix for the specified resource type to the names of the - properties listed in the response. If the resource type specified does not - have an association with this namespace, or if the resource type is - associated but does not have a prefix defined in this namespace, this - parameter is ignored. - in: query - required: false - type: string -resource_types-in-query: - description: | - Filters the response to include only those namespaces that contain the - specified resource type or types as resource type associations. Use the - comma (``,``) character to separate multiple values. For example, - ``OS::Glance::Image,OS::Nova::Flavor`` shows only namespaces associated - with these resource types. - in: query - required: false - type: integer -sort_dir: - description: | - Sorts the response. Use ``asc`` for ascending or ``desc`` for descending - order. The default is ``desc``. - in: query - required: false - type: string -sort_key: - description: | - Sorts the response by an attribute. Accepted values are ``namespace``, - ``created_at``, and ``updated_at``. Default is ``created_at``. - in: query - required: false - type: string -sort_key-tags: - description: | - Sorts the response by an attribute. Accepted values are ``name``, - ``created_at``, and ``updated_at``. Default is ``created_at``. - in: query - required: false - type: string -visibility-in-query: - description: | - Filters the response by a namespace visibility value. A valid value is - ``public`` or ``private``. If you omit this parameter, the response shows - both ``public`` and ``private`` namespaces. - in: query - required: false - type: string - -# variables in body -additionalItems: - description: | - Describes extra items, if you use tuple typing. If the value of ``items`` - is an array (tuple typing) and the instance is longer than the list of - schemas in ``items``, the additional items are described by the schema in - this property. If this value is ``false``, the instance cannot be longer - than the list of schemas in ``items``. If this value is ``true``, that is - equivalent to the empty schema (anything goes). - in: body - required: false - type: string -created_at: - description: | - The date and time when the resource was created. - - The date and time stamp format is `ISO 8601 - `_. - in: body - required: true - type: string -default: - description: | - Default property description. - in: body - required: false - type: string -description: - description: | - The description of the namespace. - in: body - required: false - type: string -display_name: - description: | - User-friendly name to use in a UI to display the namespace name. - in: body - required: false - type: string -enum: - description: | - Enumerated list of property values. - in: body - required: true - type: array -enum-in-request: - description: | - Enumerated list of property values. - in: body - required: false - type: array -first: - description: | - The URI for the first page of response. - in: body - required: true - type: string -hypervisor_type: - description: | - Hypervisor type of property values. - in: body - required: true - type: object -items: - description: | - Schema for the items in an array. - in: body - required: false - type: string -maximum: - description: | - Maximum allowed numerical value. - in: body - required: false - type: string -maxItems: - description: | - Maximum length of an array. - in: body - required: false - type: string -maxLength: - description: | - Maximum allowed string length. - in: body - required: false - type: string -minimum: - description: | - Minimum allowed numerical value. - in: body - required: false - type: string -minItems: - description: | - Minimum length of an array. - in: body - required: false - type: string -minLength: - description: | - Minimum allowed string length. - in: body - required: false - type: string -name-property: - description: | - The name of the property. - in: body - required: true - type: string -name-resource-type: - description: | - Name of the resource type. - in: body - required: true - type: string -name-tag: - description: | - The name of the tag. - in: body - required: true - type: string -namespace: - description: | - An identifier (a name) for the namespace. The value must be unique across - all users. - in: body - required: true - type: string -namespaces: - description: | - A list of ``namespace`` objects. - in: body - required: true - type: array -next: - description: | - The URI for the next page of response. Will not be present on the last - page of the response. - in: body - required: true - type: string -object-description: - description: | - Detailed description of the object. - in: body - required: true - type: string -object-description-in-request: - description: | - Detailed description of the object. - in: body - required: false - type: string -object-name: - description: | - The name of the object, suitable for use as an identifier. - in: body - required: true - type: string -object-properties: - description: | - A set of key:value pairs, where each value is a *property* entity. - in: body - required: true - type: object -object-properties-in-request: - description: | - A set of key:value pairs, where each value is a *property* entity. - in: body - required: false - type: object -object-required: - description: | - A list of the names of properties that are required on this object. - in: body - required: true - type: array -object-required-in-request: - description: | - A list of the names of properties that are required on this object. - in: body - required: false - type: array -object-schema: - description: | - The URI of the JSON schema describing an *object*. - in: body - required: true - type: string -objects: - description: | - One or more object definitions of the namespace. - in: body - required: true - type: string -objects-namespace: - description: | - Namespace object definitions, if any. - in: body - required: false - type: object -operators: - description: | - Operators property description. - in: body - required: false - type: string -owner: - description: | - An identifier for the owner of this resource, usually the tenant ID. - in: body - required: true - type: string -pattern: - description: | - A regular expression ( `ECMA 262 - `_ ) - that a string value must match. - in: body - required: false - type: string -prefix: - description: | - Prefix for any properties in the namespace that you want to apply to the - resource type. If you specify a prefix, you must append a prefix separator, - such as the colon (``:``) character. - in: body - required: false - type: string -properties-dict: - description: | - A dictionary of key:value pairs, where each value is a *property* object as - defined by the :ref:`Metadefs Property Schema `. - in: body - required: true - type: object -properties-nonempty: - description: | - One or more property definitions for the - namespace. - in: body - required: true - type: object -properties-nullable: - description: | - Namespace property definitions, if any. - in: body - required: false - type: object -properties_target: - description: | - Some resource types allow more than one key and value pair for each - instance. For example, the Image service allows both user and image - metadata on volumes. The ``properties_target`` parameter enables a - namespace target to remove the ambiguity. - in: body - required: false - type: string -property-description: - description: | - Detailed description of the property. - in: body - required: true - type: string -property-description-in-request: - description: | - Detailed description of the property. - in: body - required: false - type: string -protected: - description: | - Namespace protection for deletion, either ``true`` or ``false``. - in: body - required: true - type: boolean -protected-in-request: - description: | - Namespace protection for deletion. A valid value is ``true`` or - ``false``. Default is ``false``. - in: body - required: false - type: boolean -readonly: - description: | - Indicates whether this is a read-only property. - in: body - required: false - type: boolean -resource_type_associations: - description: | - A list, each element of which is described by the :ref:`Metadefs Resource - Type Association Schema `. - in: body - required: true - type: array -resource_types-list: - description: | - A list of abbreviated *resource type* JSON objects, where each object - contains the ``name`` of the resource type and its ``created_at`` - and ``updated_at`` timestamps in `ISO 8601 Format - `_. - in: body - required: true - type: array -schema-namespace: - description: | - The URI of the JSON schema describing a *namespace*. - in: body - required: true - type: string -schema-namespaces: - description: | - The URI of the JSON schema describing a *namespaces* entity, that is, an - entity consisting of a list of abbreviated namespace objects. - in: body - required: true - type: string -self: - description: | - The URI for this resource. - in: body - required: true - type: string -tag-name: - description: | - The name of the tag. - in: body - required: true - type: string -tags: - description: | - A list of *tag* objects, where each object is defined by the - :ref:`Metadefs Tag Schema `. - in: body - required: true - type: array -title: - description: | - The title of the property. - in: body - required: true - type: string -type: - description: | - The property type. - in: body - required: true - type: string -uniqueItems: - description: | - Indicates whether all values in the array must be distinct. - in: body - required: false - type: string -updated_at: - description: | - The date and time when the resource was last updated. - - The date and time stamp format is `ISO 8601 - `_. - in: body - required: true - type: string -visibility: - description: | - The namespace visibility, either ``public`` or ``private``. - in: body - required: true - type: enum -visibility-in-request: - description: | - The namespace visibility. A valid value is ``public`` or ``private``. - Default is ``private``. - in: body - required: false - type: enum diff --git a/api-ref/source/v2/metadefs-resourcetypes.inc b/api-ref/source/v2/metadefs-resourcetypes.inc deleted file mode 100644 index ac37b2c9..00000000 --- a/api-ref/source/v2/metadefs-resourcetypes.inc +++ /dev/null @@ -1,169 +0,0 @@ -.. -*- rst -*- - -Metadata definition resource types -********************************** - -Lists resource types. Also, creates, lists, and removes resource type -associations in a namespace. - -*Since API v2.2* - - -List resource types -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/metadefs/resource_types - -Lists all available resource types. - -Using the other API calls in this section, you can create and maintain -*resource type associations* between metadata definition namespaces and the -resource types that are returned by this call. - -Normal response codes: 200 - -Error response codes: 400, 401, 404 - - -Request -------- - -There are no request parameters. - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - resource_types: resource_types-list - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-resource-types-list-response.json - :language: json - - -Create resource type association -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: - POST /v2/metadefs/namespaces/{namespace_name}/resource_types - -Creates a resource type association between a namespace and the resource -type specified in the body of the request. - -.. note:: - If the resource type name specified does not name an existing resource type, - a new resource type will be created as a side effect of this operation. - -Normal response codes: 201 - -Error response codes: 400, 401, 403, 404, 409 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - name: name - - prefix: prefix - - properties_target: properties_target - - -Request Example ---------------- - -.. literalinclude:: samples/metadef-resource-type-create-request.json - :language: json - - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - created_at: created_at - - prefix: prefix - - properties_target: properties_target - - name: name - - updated_at: updated_at - - -List resource type associations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/metadefs/namespaces/{namespace_name}/resource_types - -Lists resource type associations in a namespace. - -Normal response codes: 200 - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - -There is no request body. - - -Response Parameters -------------------- - -.. rest_parameters:: metadefs-parameters.yaml - - - resource_type_associations: resource_type_associations - - - -Response Example ----------------- - -.. literalinclude:: samples/metadef-resource-types-list-response.json - :language: json - - -Remove resource type association -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: - DELETE /v2/metadefs/namespaces/{namespace_name}/resource_types/{name} - -Removes a resource type association in a namespace. - -.. note:: - If the namespace containing the association is protected, that is, if the - ``protected`` attribute of the namespace is ``true``, then you must first - set the ``protected`` attribute to ``false`` on the namespace before you - will be permitted to remove the resource type association. - - * If you try to delete a resource type association from a protected - namespace, the call returns the ``403`` response code. - * To change the ``protected`` attribute of a namespace, use the - :ref:`Update namespace ` call. - -When you successfully delete a resource type association from a namespace, the -response is empty and the response code is ``204``. - -Normal response codes: 204 - -Error response codes: 400, 401, 403, 404 - - -Request -------- - -.. rest_parameters:: metadefs-parameters.yaml - - - namespace_name: namespace_name - - name: resource_type_name diff --git a/api-ref/source/v2/metadefs-schemas.inc b/api-ref/source/v2/metadefs-schemas.inc deleted file mode 100644 index b1b20724..00000000 --- a/api-ref/source/v2/metadefs-schemas.inc +++ /dev/null @@ -1,326 +0,0 @@ -.. -*- rst -*- - -Metadata definition schemas -*************************** - -Gets a JSON-schema document that represents a metadata definition -entity. - -*(Since API v2.2)* - - -Show metadata definition namespace schema -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/metadefs/namespace - -Shows a JSON schema document that represents a metadata definition *namespace* entity. - -The following schema document is an example. The authoritative response is the -actual response to the API call. - - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -There are no request parameters. The call does not take a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-metadef-namespace-show-response.json - :language: json - - - -Show metadata definition namespaces schema -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/metadefs/namespaces - -Shows a JSON schema document that represents a metadata definition *namespaces* -entity. - -A namespaces entity is a container for *namespace* entities. - -The following schema document is an example. The authoritative response is the -actual response to the API call. - - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -There are no request parameters. The call does not take a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-metadef-namespaces-list-response.json - :language: json - - -.. _md-schema-rt-assoc: - -Show metadata definition namespace resource type association schema -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/metadefs/resource_type - -Shows a JSON schema document that represents a metadata definition namespace -*resource type association* entity. - -The following schema document is an example. The authoritative response is the -actual response to the API call. - - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -There are no request parameters. The call does not take a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-metadef-resource-type-association-show-response.json - :language: json - - -Show metadata definition namespace resource type associations schema -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/metadefs/resource_types - -Shows a JSON schema document that represents a metadata definition namespace -*resource type associations* entity. - -A resource type associations entity is a container for *resource type -association* entities. - -The following schema document is an example. The authoritative response is the -actual response to the API call. - - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -There are no request parameters. The call does not take a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-metadef-resource-type-associations-list-response.json - :language: json - - -Show metadata definition object schema -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/metadefs/object - -Shows a JSON schema document that represents a metadata definition *object* -entity. - -The following schema document is an example. The authoritative response is the -actual response to the API call. - - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -There are no request parameters. The call does not take a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-metadef-object-show-response.json - :language: json - - -Show metadata definition objects schema -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/metadefs/objects - -Shows a JSON schema document that represents a metadata definition *objects* -entity. - -An objects entity is a container for *object* entities. - -The following schema document is an example. The authoritative response is the -actual response to the API call. - - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -There are no request parameters. The call does not take a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-metadef-objects-list-response.json - :language: json - - -.. _md-schema-property: - -Show metadata definition property schema -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/metadefs/property - -Shows a JSON schema document that represents a metadata definition *property* -entity. - -The following schema document is an example. The authoritative response is the -actual response to the API call. - - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -There are no request parameters. The call does not take a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-metadef-property-show-response.json - :language: json - - - -Show metadata definition properties schema -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/metadefs/properties - -Shows a JSON schema document that represents a metadata definition *properties* entity. - -A properties entity is a container for *property* entities. - -The following schema document is an example. The authoritative -response is the actual response to the API call. - - -Normal response codes: 200 -Error response codes: 400, 401 - - -Request -------- - -There are no request parameters. The call does not take a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-metadef-properties-list-response.json - :language: json - - - -.. _md-schema-tag: - -Show metadata definition tag schema -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/metadefs/tag - -Shows a JSON schema document that represents a metadata definition *tag* entity. - -The following schema document is an example. The authoritative response is the -actual response to the API call. - - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -There are no request parameters. The call does not take a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-metadef-tag-show-response.json - :language: json - - - -Show metadata definition tags schema -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/metadefs/tags - -Shows a JSON schema document that represents a metadata definition *tags* -entity. - -A tags entity is a container for *tag* entities. - -The following schema document is an example. The authoritative response is the -actual response to the API call. - - -Normal response codes: 200 - -Error response codes: 400, 401 - - -Request -------- - -There are no request parameters. The call does not take a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-metadef-tags-list-response.json - :language: json diff --git a/api-ref/source/v2/samples/image-create-request.json b/api-ref/source/v2/samples/image-create-request.json deleted file mode 100644 index bcb480b6..00000000 --- a/api-ref/source/v2/samples/image-create-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "container_format": "bare", - "disk_format": "raw", - "name": "Ubuntu", - "id": "b2173dd3-7ad6-4362-baa6-a68bce3565cb" -} diff --git a/api-ref/source/v2/samples/image-create-response.json b/api-ref/source/v2/samples/image-create-response.json deleted file mode 100644 index dd2289ee..00000000 --- a/api-ref/source/v2/samples/image-create-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "status": "queued", - "name": "Ubuntu", - "tags": [], - "container_format": "bare", - "created_at": "2015-11-29T22:21:42Z", - "size": null, - "disk_format": "raw", - "updated_at": "2015-11-29T22:21:42Z", - "visibility": "private", - "locations": [], - "self": "/v2/images/b2173dd3-7ad6-4362-baa6-a68bce3565cb", - "min_disk": 0, - "protected": false, - "id": "b2173dd3-7ad6-4362-baa6-a68bce3565cb", - "file": "/v2/images/b2173dd3-7ad6-4362-baa6-a68bce3565cb/file", - "checksum": null, - "owner": "bab7d5c60cd041a0a36f7c4b6e1dd978", - "virtual_size": null, - "min_ram": 0, - "schema": "/v2/schemas/image" -} diff --git a/api-ref/source/v2/samples/image-details-deactivate-response.json b/api-ref/source/v2/samples/image-details-deactivate-response.json deleted file mode 100644 index 43d41c33..00000000 --- a/api-ref/source/v2/samples/image-details-deactivate-response.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "status": "deactivated", - "name": "cirros-0.3.2-x86_64-disk", - "tags": [], - "container_format": "bare", - "created_at": "2014-05-05T17:15:10Z", - "disk_format": "qcow2", - "updated_at": "2014-05-05T17:15:11Z", - "visibility": "public", - "self": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27", - "min_disk": 0, - "protected": false, - "id": "1bea47ed-f6a9-463b-b423-14b9cca9ad27", - "file": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27/file", - "checksum": "64d7c1cd2b6f60c92c14662941cb7913", - "owner": "5ef70662f8b34079a6eddb8da9d75fe8", - "size": 13167616, - "min_ram": 0, - "schema": "/v2/schemas/image", - "virtual_size": null -} diff --git a/api-ref/source/v2/samples/image-member-create-request.json b/api-ref/source/v2/samples/image-member-create-request.json deleted file mode 100644 index 06ad8fe9..00000000 --- a/api-ref/source/v2/samples/image-member-create-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "member": "8989447062e04a818baf9e073fd04fa7" -} diff --git a/api-ref/source/v2/samples/image-member-create-response.json b/api-ref/source/v2/samples/image-member-create-response.json deleted file mode 100644 index d6946dd9..00000000 --- a/api-ref/source/v2/samples/image-member-create-response.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "created_at": "2013-09-20T19:22:19Z", - "image_id": "a96be11e-8536-4910-92cb-de50aa19dfe6", - "member_id": "8989447062e04a818baf9e073fd04fa7", - "schema": "/v2/schemas/member", - "status": "pending", - "updated_at": "2013-09-20T19:25:31Z" -} diff --git a/api-ref/source/v2/samples/image-member-details-response.json b/api-ref/source/v2/samples/image-member-details-response.json deleted file mode 100644 index 870417fb..00000000 --- a/api-ref/source/v2/samples/image-member-details-response.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "status": "pending", - "created_at": "2013-11-26T07:21:21Z", - "updated_at": "2013-11-26T07:21:21Z", - "image_id": "0ae74cc5-5147-4239-9ce2-b0c580f7067e", - "member_id": "8989447062e04a818baf9e073fd04fa7", - "schema": "/v2/schemas/member" -} diff --git a/api-ref/source/v2/samples/image-member-update-request.json b/api-ref/source/v2/samples/image-member-update-request.json deleted file mode 100644 index 4ec147f0..00000000 --- a/api-ref/source/v2/samples/image-member-update-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "status": "accepted" -} diff --git a/api-ref/source/v2/samples/image-member-update-response.json b/api-ref/source/v2/samples/image-member-update-response.json deleted file mode 100644 index 75ed4bed..00000000 --- a/api-ref/source/v2/samples/image-member-update-response.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "created_at": "2013-09-20T19:22:19Z", - "image_id": "a96be11e-8536-4910-92cb-de50aa19dfe6", - "member_id": "8989447062e04a818baf9e073fd04fa7", - "schema": "/v2/schemas/member", - "status": "accepted", - "updated_at": "2013-09-20T20:15:31Z" -} diff --git a/api-ref/source/v2/samples/image-members-list-response.json b/api-ref/source/v2/samples/image-members-list-response.json deleted file mode 100644 index 3b55ad0f..00000000 --- a/api-ref/source/v2/samples/image-members-list-response.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "members": [ - { - "created_at": "2013-10-07T17:58:03Z", - "image_id": "dbc999e3-c52f-4200-bedd-3b18fe7f87fe", - "member_id": "123456789", - "schema": "/v2/schemas/member", - "status": "pending", - "updated_at": "2013-10-07T17:58:03Z" - }, - { - "created_at": "2013-10-07T17:58:55Z", - "image_id": "dbc999e3-c52f-4200-bedd-3b18fe7f87fe", - "member_id": "987654321", - "schema": "/v2/schemas/member", - "status": "accepted", - "updated_at": "2013-10-08T12:08:55Z" - } - ], - "schema": "/v2/schemas/members" -} diff --git a/api-ref/source/v2/samples/image-show-response.json b/api-ref/source/v2/samples/image-show-response.json deleted file mode 100644 index 07055355..00000000 --- a/api-ref/source/v2/samples/image-show-response.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "status": "active", - "name": "cirros-0.3.2-x86_64-disk", - "tags": [], - "container_format": "bare", - "created_at": "2014-05-05T17:15:10Z", - "disk_format": "qcow2", - "updated_at": "2014-05-05T17:15:11Z", - "visibility": "public", - "self": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27", - "min_disk": 0, - "protected": false, - "id": "1bea47ed-f6a9-463b-b423-14b9cca9ad27", - "file": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27/file", - "checksum": "64d7c1cd2b6f60c92c14662941cb7913", - "owner": "5ef70662f8b34079a6eddb8da9d75fe8", - "size": 13167616, - "min_ram": 0, - "schema": "/v2/schemas/image", - "virtual_size": null -} diff --git a/api-ref/source/v2/samples/image-update-request.json b/api-ref/source/v2/samples/image-update-request.json deleted file mode 100644 index 9bbc50b2..00000000 --- a/api-ref/source/v2/samples/image-update-request.json +++ /dev/null @@ -1,15 +0,0 @@ -[ - { - "op": "replace", - "path": "/name", - "value": "Fedora 17" - }, - { - "op": "replace", - "path": "/tags", - "value": [ - "fedora", - "beefy" - ] - } -] diff --git a/api-ref/source/v2/samples/image-update-response.json b/api-ref/source/v2/samples/image-update-response.json deleted file mode 100644 index c337290d..00000000 --- a/api-ref/source/v2/samples/image-update-response.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "checksum": "710544e7f0c828b42f51207342622d33", - "container_format": "ovf", - "created_at": "2016-06-29T16:13:07Z", - "disk_format": "vhd", - "file": "/v2/images/2b61ed2b-f800-4da0-99ff-396b742b8646/file", - "id": "2b61ed2b-f800-4da0-99ff-396b742b8646", - "min_disk": 20, - "min_ram": 512, - "name": "Fedora 17", - "owner": "02a7fb2dd4ef434c8a628c511dcbbeb6", - "protected": false, - "schema": "/v2/schemas/image", - "self": "/v2/images/2b61ed2b-f800-4da0-99ff-396b742b8646", - "size": 21909, - "status": "active", - "tags": [ - "beefy", - "fedora" - ], - "updated_at": "2016-07-25T14:48:18Z", - "virtual_size": null, - "visibility": "private" -} diff --git a/api-ref/source/v2/samples/images-list-response.json b/api-ref/source/v2/samples/images-list-response.json deleted file mode 100644 index 8e2097a0..00000000 --- a/api-ref/source/v2/samples/images-list-response.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "images": [ - { - "status": "active", - "name": "cirros-0.3.2-x86_64-disk", - "tags": [], - "container_format": "bare", - "created_at": "2014-11-07T17:07:06Z", - "disk_format": "qcow2", - "updated_at": "2014-11-07T17:19:09Z", - "visibility": "public", - "self": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27", - "min_disk": 0, - "protected": false, - "id": "1bea47ed-f6a9-463b-b423-14b9cca9ad27", - "file": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27/file", - "checksum": "64d7c1cd2b6f60c92c14662941cb7913", - "owner": "5ef70662f8b34079a6eddb8da9d75fe8", - "size": 13167616, - "min_ram": 0, - "schema": "/v2/schemas/image", - "virtual_size": null - }, - { - "status": "active", - "name": "F17-x86_64-cfntools", - "tags": [], - "container_format": "bare", - "created_at": "2014-10-30T08:23:39Z", - "disk_format": "qcow2", - "updated_at": "2014-11-03T16:40:10Z", - "visibility": "public", - "self": "/v2/images/781b3762-9469-4cec-b58d-3349e5de4e9c", - "min_disk": 0, - "protected": false, - "id": "781b3762-9469-4cec-b58d-3349e5de4e9c", - "file": "/v2/images/781b3762-9469-4cec-b58d-3349e5de4e9c/file", - "checksum": "afab0f79bac770d61d24b4d0560b5f70", - "owner": "5ef70662f8b34079a6eddb8da9d75fe8", - "size": 476704768, - "min_ram": 0, - "schema": "/v2/schemas/image", - "virtual_size": null - } - ], - "schema": "/v2/schemas/images", - "first": "/v2/images" -} diff --git a/api-ref/source/v2/samples/metadef-namespace-create-request-simple.json b/api-ref/source/v2/samples/metadef-namespace-create-request-simple.json deleted file mode 100644 index 6a60f7b7..00000000 --- a/api-ref/source/v2/samples/metadef-namespace-create-request-simple.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "namespace": "FredCo::SomeCategory::Example", - "display_name": "An Example Namespace", - "description": "A metadata definitions namespace for example use.", - "visibility": "public", - "protected": true -} diff --git a/api-ref/source/v2/samples/metadef-namespace-create-request.json b/api-ref/source/v2/samples/metadef-namespace-create-request.json deleted file mode 100644 index 3c272929..00000000 --- a/api-ref/source/v2/samples/metadef-namespace-create-request.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "description": "Choose capabilities that should be provided by the Compute Host. This provides the ability to fine tune the hardware specification required when a new vm is requested.", - "display_name": "Hypervisor Selection", - "namespace": "OS::Compute::Hypervisor", - "properties": { - "hypervisor_type": { - "description": "The hypervisor type.", - "enum": [ - "xen", - "qemu", - "kvm", - "lxc", - "uml", - "vmware", - "hyperv" - ], - "title": "Hypervisor Type", - "type": "string" - }, - "vm_mode": { - "description": "The virtual machine mode.", - "enum": [ - "hvm", - "xen", - "uml", - "exe" - ], - "title": "VM Mode", - "type": "string" - } - }, - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - } - ], - "visibility": "public" -} diff --git a/api-ref/source/v2/samples/metadef-namespace-create-response-simple.json b/api-ref/source/v2/samples/metadef-namespace-create-response-simple.json deleted file mode 100644 index 0e0cd431..00000000 --- a/api-ref/source/v2/samples/metadef-namespace-create-response-simple.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "created_at": "2016-05-19T16:05:48Z", - "description": "A metadata definitions namespace for example use.", - "display_name": "An Example Namespace", - "namespace": "FredCo::SomeCategory::Example", - "owner": "c60b1d57c5034e0d86902aedf8c49be0", - "protected": true, - "schema": "/v2/schemas/metadefs/namespace", - "self": "/v2/metadefs/namespaces/FredCo::SomeCategory::Example", - "updated_at": "2016-05-19T16:05:48Z", - "visibility": "public" -} diff --git a/api-ref/source/v2/samples/metadef-namespace-create-response.json b/api-ref/source/v2/samples/metadef-namespace-create-response.json deleted file mode 100644 index 24384535..00000000 --- a/api-ref/source/v2/samples/metadef-namespace-create-response.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "description": "Choose capabilities that should be provided by the Compute Host. This provides the ability to fine tune the hardware specification required when a new vm is requested.", - "display_name": "Hypervisor Selection", - "namespace": "OS::Compute::Hypervisor", - "properties": { - "hypervisor_type": { - "description": "The hypervisor type.", - "enum": [ - "xen", - "qemu", - "kvm", - "lxc", - "uml", - "vmware", - "hyperv" - ], - "title": "Hypervisor Type", - "type": "string" - }, - "vm_mode": { - "description": "The virtual machine mode.", - "enum": [ - "hvm", - "xen", - "uml", - "exe" - ], - "title": "VM Mode", - "type": "string" - } - }, - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - } - ], - "schema": "/v2/schemas/metadefs/namespace", - "self": "/v2/metadefs/namespaces/OS::Compute::Hypervisor", - "visibility": "public" -} diff --git a/api-ref/source/v2/samples/metadef-namespace-details-response.json b/api-ref/source/v2/samples/metadef-namespace-details-response.json deleted file mode 100644 index 51c4fb81..00000000 --- a/api-ref/source/v2/samples/metadef-namespace-details-response.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "created_at": "2016-06-28T14:57:10Z", - "description": "The libvirt compute driver options.", - "display_name": "libvirt Driver Options", - "namespace": "OS::Compute::Libvirt", - "owner": "admin", - "properties": { - "boot_menu": { - "description": "If true, enables the BIOS bootmenu.", - "enum": [ - "true", - "false" - ], - "title": "Boot Menu", - "type": "string" - }, - "serial_port_count": { - "description": "Specifies the count of serial ports.", - "minimum": 0, - "title": "Serial Port Count", - "type": "integer" - } - }, - "protected": true, - "resource_type_associations": [ - { - "created_at": "2016-06-28T14:57:10Z", - "name": "OS::Glance::Image", - "prefix": "hw_" - }, - { - "created_at": "2016-06-28T14:57:10Z", - "name": "OS::Nova::Flavor", - "prefix": "hw:" - } - ], - "schema": "/v2/schemas/metadefs/namespace", - "self": "/v2/metadefs/namespaces/OS::Compute::Libvirt", - "visibility": "public" -} diff --git a/api-ref/source/v2/samples/metadef-namespace-details-with-rt-response.json b/api-ref/source/v2/samples/metadef-namespace-details-with-rt-response.json deleted file mode 100644 index 6fd3a96e..00000000 --- a/api-ref/source/v2/samples/metadef-namespace-details-with-rt-response.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "created_at": "2016-06-28T14:57:10Z", - "description": "The libvirt compute driver options.", - "display_name": "libvirt Driver Options", - "namespace": "OS::Compute::Libvirt", - "owner": "admin", - "properties": { - "hw_boot_menu": { - "description": "If true, enables the BIOS bootmenu.", - "enum": [ - "true", - "false" - ], - "title": "Boot Menu", - "type": "string" - }, - "hw_serial_port_count": { - "description": "Specifies the count of serial ports.", - "minimum": 0, - "title": "Serial Port Count", - "type": "integer" - } - }, - "protected": true, - "resource_type_associations": [ - { - "created_at": "2016-06-28T14:57:10Z", - "name": "OS::Glance::Image", - "prefix": "hw_" - }, - { - "created_at": "2016-06-28T14:57:10Z", - "name": "OS::Nova::Flavor", - "prefix": "hw:" - } - ], - "schema": "/v2/schemas/metadefs/namespace", - "self": "/v2/metadefs/namespaces/OS::Compute::Libvirt", - "visibility": "public" -} diff --git a/api-ref/source/v2/samples/metadef-namespace-update-request.json b/api-ref/source/v2/samples/metadef-namespace-update-request.json deleted file mode 100644 index d06a41c4..00000000 --- a/api-ref/source/v2/samples/metadef-namespace-update-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "description": "Choose capabilities that should be provided by the Compute Host. This provides the ability to fine tune the hardware specification required when a new vm is requested.", - "display_name": "Hypervisor Selection", - "namespace": "OS::Compute::Hypervisor", - "protected": false, - "visibility": "public" -} diff --git a/api-ref/source/v2/samples/metadef-namespace-update-response.json b/api-ref/source/v2/samples/metadef-namespace-update-response.json deleted file mode 100644 index e56e29d5..00000000 --- a/api-ref/source/v2/samples/metadef-namespace-update-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "created_at": "2014-09-19T13:31:37Z", - "description": "Choose capabilities that should be provided by the Compute Host. This provides the ability to fine tune the hardware specification required when a new vm is requested.", - "display_name": "Hypervisor Selection", - "namespace": "OS::Compute::Hypervisor", - "owner": "7ec22942411e427692e8a3436be1031a", - "protected": false, - "schema": "/v2/schemas/metadefs/namespace", - "self": "/v2/metadefs/namespaces/OS::Compute::Hypervisor", - "updated_at": "2014-09-19T13:31:37Z", - "visibility": "public" -} diff --git a/api-ref/source/v2/samples/metadef-namespaces-list-response.json b/api-ref/source/v2/samples/metadef-namespaces-list-response.json deleted file mode 100644 index 44462d4d..00000000 --- a/api-ref/source/v2/samples/metadef-namespaces-list-response.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "first": "/v2/metadefs/namespaces?sort_key=created_at&sort_dir=asc", - "namespaces": [ - { - "created_at": "2014-08-28T17:13:06Z", - "description": "The libvirt compute driver options. These are properties specific to compute drivers. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", - "display_name": "libvirt Driver Options", - "namespace": "OS::Compute::Libvirt", - "owner": "admin", - "protected": true, - "resource_type_associations": [ - { - "created_at": "2014-08-28T17:13:06Z", - "name": "OS::Glance::Image", - "updated_at": "2014-08-28T17:13:06Z" - } - ], - "schema": "/v2/schemas/metadefs/namespace", - "self": "/v2/metadefs/namespaces/OS::Compute::Libvirt", - "updated_at": "2014-08-28T17:13:06Z", - "visibility": "public" - }, - { - "created_at": "2014-08-28T17:13:06Z", - "description": "Compute drivers may enable quotas on CPUs available to a VM, disk tuning, bandwidth I/O, and instance VIF traffic control. See: http://docs.openstack.org/admin-guide-cloud/compute-flavors.html", - "display_name": "Flavor Quota", - "namespace": "OS::Compute::Quota", - "owner": "admin", - "protected": true, - "resource_type_associations": [ - { - "created_at": "2014-08-28T17:13:06Z", - "name": "OS::Nova::Flavor", - "updated_at": "2014-08-28T17:13:06Z" - } - ], - "schema": "/v2/schemas/metadefs/namespace", - "self": "/v2/metadefs/namespaces/OS::Compute::Quota", - "updated_at": "2014-08-28T17:13:06Z", - "visibility": "public" - }, - { - "created_at": "2014-08-28T17:13:06Z", - "description": "Trusted compute pools with Intel\u00ae Trusted Execution Technology (Intel\u00ae TXT) support IT compliance by protecting virtualized data centers - private, public, and hybrid clouds against attacks toward hypervisor and BIOS, firmware, and other pre-launch software components.", - "display_name": "Trusted Compute Pools (Intel\u00ae TXT)", - "namespace": "OS::Compute::Trust", - "owner": "admin", - "protected": true, - "resource_type_associations": [ - { - "created_at": "2014-08-28T17:13:06Z", - "name": "OS::Nova::Flavor", - "updated_at": "2014-08-28T17:13:06Z" - } - ], - "schema": "/v2/schemas/metadefs/namespace", - "self": "/v2/metadefs/namespaces/OS::Compute::Trust", - "updated_at": "2014-08-28T17:13:06Z", - "visibility": "public" - }, - { - "created_at": "2014-08-28T17:13:06Z", - "description": "This provides the preferred socket/core/thread counts for the virtual CPU instance exposed to guests. This enables the ability to avoid hitting limitations on vCPU topologies that OS vendors place on their products. See also: http://git.openstack.org/cgit/openstack/nova-specs/tree/specs/juno/virt-driver-vcpu-topology.rst", - "display_name": "Virtual CPU Topology", - "namespace": "OS::Compute::VirtCPUTopology", - "owner": "admin", - "protected": true, - "resource_type_associations": [ - { - "created_at": "2014-08-28T17:13:06Z", - "name": "OS::Glance::Image", - "prefix": "hw_", - "updated_at": "2014-08-28T17:13:06Z" - }, - { - "created_at": "2014-08-28T17:13:06Z", - "name": "OS::Cinder::Volume", - "prefix": "hw_", - "properties_target": "image", - "updated_at": "2014-08-28T17:13:06Z" - }, - { - "created_at": "2014-08-28T17:13:06Z", - "name": "OS::Nova::Flavor", - "prefix": "hw:", - "updated_at": "2014-08-28T17:13:06Z" - } - ], - "schema": "/v2/schemas/metadefs/namespace", - "self": "/v2/metadefs/namespaces/OS::Compute::VirtCPUTopology", - "updated_at": "2014-08-28T17:13:06Z", - "visibility": "public" - } - ], - "schema": "/v2/schemas/metadefs/namespaces" -} diff --git a/api-ref/source/v2/samples/metadef-object-create-request.json b/api-ref/source/v2/samples/metadef-object-create-request.json deleted file mode 100644 index 2aefab80..00000000 --- a/api-ref/source/v2/samples/metadef-object-create-request.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "description": "You can configure the CPU limits with control parameters.", - "name": "CPU Limits", - "properties": { - "quota:cpu_period": { - "description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.", - "maximum": 1000000, - "minimum": 1000, - "title": "Quota: CPU Period", - "type": "integer" - }, - "quota:cpu_quota": { - "description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.", - "title": "Quota: CPU Quota", - "type": "integer" - }, - "quota:cpu_shares": { - "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", - "title": "Quota: CPU Shares", - "type": "integer" - } - }, - "required": [] -} diff --git a/api-ref/source/v2/samples/metadef-object-create-response.json b/api-ref/source/v2/samples/metadef-object-create-response.json deleted file mode 100644 index d0184bd9..00000000 --- a/api-ref/source/v2/samples/metadef-object-create-response.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "created_at": "2014-09-19T18:20:56Z", - "description": "You can configure the CPU limits with control parameters.", - "name": "CPU Limits", - "properties": { - "quota:cpu_period": { - "description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.", - "maximum": 1000000, - "minimum": 1000, - "title": "Quota: CPU Period", - "type": "integer" - }, - "quota:cpu_quota": { - "description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.", - "title": "Quota: CPU Quota", - "type": "integer" - }, - "quota:cpu_shares": { - "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", - "title": "Quota: CPU Shares", - "type": "integer" - } - }, - "required": [], - "schema": "/v2/schemas/metadefs/object", - "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/CPU Limits", - "updated_at": "2014-09-19T18:20:56Z" -} diff --git a/api-ref/source/v2/samples/metadef-object-details-response.json b/api-ref/source/v2/samples/metadef-object-details-response.json deleted file mode 100644 index d0184bd9..00000000 --- a/api-ref/source/v2/samples/metadef-object-details-response.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "created_at": "2014-09-19T18:20:56Z", - "description": "You can configure the CPU limits with control parameters.", - "name": "CPU Limits", - "properties": { - "quota:cpu_period": { - "description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.", - "maximum": 1000000, - "minimum": 1000, - "title": "Quota: CPU Period", - "type": "integer" - }, - "quota:cpu_quota": { - "description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.", - "title": "Quota: CPU Quota", - "type": "integer" - }, - "quota:cpu_shares": { - "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", - "title": "Quota: CPU Shares", - "type": "integer" - } - }, - "required": [], - "schema": "/v2/schemas/metadefs/object", - "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/CPU Limits", - "updated_at": "2014-09-19T18:20:56Z" -} diff --git a/api-ref/source/v2/samples/metadef-object-update-request.json b/api-ref/source/v2/samples/metadef-object-update-request.json deleted file mode 100644 index 35b41e34..00000000 --- a/api-ref/source/v2/samples/metadef-object-update-request.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "description": "You can configure the CPU limits with control parameters.", - "name": "CPU Limits", - "properties": { - "quota:cpu_shares": { - "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", - "title": "Quota: CPU Shares", - "type": "integer" - } - }, - "required": [] -} diff --git a/api-ref/source/v2/samples/metadef-object-update-response.json b/api-ref/source/v2/samples/metadef-object-update-response.json deleted file mode 100644 index 79db6272..00000000 --- a/api-ref/source/v2/samples/metadef-object-update-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "created_at": "2014-09-19T19:20:56Z", - "description": "You can configure the CPU limits with control parameters.", - "name": "CPU Limits", - "properties": { - "quota:cpu_shares": { - "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", - "title": "Quota: CPU Shares", - "type": "integer" - } - }, - "required": [], - "schema": "/v2/schemas/metadefs/object", - "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/CPU Limits", - "updated_at": "2014-09-19T19:20:56Z" -} diff --git a/api-ref/source/v2/samples/metadef-objects-list-response.json b/api-ref/source/v2/samples/metadef-objects-list-response.json deleted file mode 100644 index 06746022..00000000 --- a/api-ref/source/v2/samples/metadef-objects-list-response.json +++ /dev/null @@ -1,112 +0,0 @@ -{ - "objects": [ - { - "created_at": "2014-09-18T18:16:35Z", - "description": "You can configure the CPU limits with control parameters.", - "name": "CPU Limits", - "properties": { - "quota:cpu_period": { - "description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.", - "maximum": 1000000, - "minimum": 1000, - "title": "Quota: CPU Period", - "type": "integer" - }, - "quota:cpu_quota": { - "description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.", - "title": "Quota: CPU Quota", - "type": "integer" - }, - "quota:cpu_shares": { - "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", - "title": "Quota: CPU Shares", - "type": "integer" - } - }, - "required": [], - "schema": "/v2/schemas/metadefs/object", - "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/CPU Limits" - }, - { - "created_at": "2014-09-18T18:16:35Z", - "description": "Using disk I/O quotas, you can set maximum disk write to 10 MB per second for a VM user.", - "name": "Disk QoS", - "properties": { - "quota:disk_read_bytes_sec": { - "description": "Sets disk I/O quota for disk read bytes / sec.", - "title": "Quota: Disk read bytes / sec", - "type": "integer" - }, - "quota:disk_read_iops_sec": { - "description": "Sets disk I/O quota for disk read IOPS / sec.", - "title": "Quota: Disk read IOPS / sec", - "type": "integer" - }, - "quota:disk_total_bytes_sec": { - "description": "Sets disk I/O quota for total disk bytes / sec.", - "title": "Quota: Disk Total Bytes / sec", - "type": "integer" - }, - "quota:disk_total_iops_sec": { - "description": "Sets disk I/O quota for disk total IOPS / sec.", - "title": "Quota: Disk Total IOPS / sec", - "type": "integer" - }, - "quota:disk_write_bytes_sec": { - "description": "Sets disk I/O quota for disk write bytes / sec.", - "title": "Quota: Disk Write Bytes / sec", - "type": "integer" - }, - "quota:disk_write_iops_sec": { - "description": "Sets disk I/O quota for disk write IOPS / sec.", - "title": "Quota: Disk Write IOPS / sec", - "type": "integer" - } - }, - "required": [], - "schema": "/v2/schemas/metadefs/object", - "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/Disk QoS" - }, - { - "created_at": "2014-09-18T18:16:35Z", - "description": "Bandwidth QoS tuning for instance virtual interfaces (VIFs) may be specified with these properties. Incoming and outgoing traffic can be shaped independently. If not specified, no quality of service (QoS) is applied on that traffic direction. So, if you want to shape only the network's incoming traffic, use inbound only (and vice versa). The OpenStack Networking service abstracts the physical implementation of the network, allowing plugins to configure and manage physical resources. Virtual Interfaces (VIF) in the logical model are analogous to physical network interface cards (NICs). VIFs are typically owned a managed by an external service; for instance when OpenStack Networking is used for building OpenStack networks, VIFs would be created, owned, and managed in Nova. VIFs are connected to OpenStack Networking networks via ports. A port is analogous to a port on a network switch, and it has an administrative state. When a VIF is attached to a port the OpenStack Networking API creates an attachment object, which specifies the fact that a VIF with a given identifier is plugged into the port.", - "name": "Virtual Interface QoS", - "properties": { - "quota:vif_inbound_average": { - "description": "Network Virtual Interface (VIF) inbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.", - "title": "Quota: VIF Inbound Average", - "type": "integer" - }, - "quota:vif_inbound_burst": { - "description": "Network Virtual Interface (VIF) inbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.", - "title": "Quota: VIF Inbound Burst", - "type": "integer" - }, - "quota:vif_inbound_peak": { - "description": "Network Virtual Interface (VIF) inbound peak in kilobytes per second. Specifies maximum rate at which an interface can receive data.", - "title": "Quota: VIF Inbound Peak", - "type": "integer" - }, - "quota:vif_outbound_average": { - "description": "Network Virtual Interface (VIF) outbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.", - "title": "Quota: VIF Outbound Average", - "type": "integer" - }, - "quota:vif_outbound_burst": { - "description": "Network Virtual Interface (VIF) outbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.", - "title": "Quota: VIF Outbound Burst", - "type": "integer" - }, - "quota:vif_outbound_peak": { - "description": "Network Virtual Interface (VIF) outbound peak in kilobytes per second. Specifies maximum rate at which an interface can send data.", - "title": "Quota: VIF Outbound Burst", - "type": "integer" - } - }, - "required": [], - "schema": "/v2/schemas/metadefs/object", - "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/Virtual Interface QoS" - } - ], - "schema": "v2/schemas/metadefs/objects" -} diff --git a/api-ref/source/v2/samples/metadef-properties-list-response.json b/api-ref/source/v2/samples/metadef-properties-list-response.json deleted file mode 100644 index aeb4a5da..00000000 --- a/api-ref/source/v2/samples/metadef-properties-list-response.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "properties": { - "hw_disk_bus": { - "description": "Specifies the type of disk controller to attach disk devices to.", - "enum": [ - "scsi", - "virtio", - "uml", - "xen", - "ide", - "usb" - ], - "title": "Disk Bus", - "type": "string" - }, - "hw_machine_type": { - "description": "Enables booting an ARM system using the specified machine type. By default, if an ARM image is used and its type is not specified, Compute uses vexpress-a15 (for ARMv7) or virt (for AArch64) machine types. Valid types can be viewed by using the virsh capabilities command (machine types are displayed in the machine tag).", - "title": "Machine Type", - "type": "string" - }, - "hw_qemu_guest_agent": { - "description": "It is a daemon program running inside the domain which is supposed to help management applications with executing functions which need assistance of the guest OS. For example, freezing and thawing filesystems, entering suspend. However, guest agent (GA) is not bullet proof, and hostile guest OS can send spurious replies.", - "enum": [ - "yes", - "no" - ], - "title": "QEMU Guest Agent", - "type": "string" - }, - "hw_rng_model": { - "default": "virtio", - "description": "Adds a random-number generator device to the image's instances. The cloud administrator can enable and control device behavior by configuring the instance's flavor. By default: The generator device is disabled. /dev/random is used as the default entropy source. To specify a physical HW RNG device, use the following option in the nova.conf file: rng_dev_path=/dev/hwrng", - "title": "Random Number Generator Device", - "type": "string" - }, - "hw_scsi_model": { - "default": "virtio-scsi", - "description": "Enables the use of VirtIO SCSI (virtio-scsi) to provide block device access for compute instances; by default, instances use VirtIO Block (virtio-blk). VirtIO SCSI is a para-virtualized SCSI controller device that provides improved scalability and performance, and supports advanced SCSI hardware.", - "title": "SCSI Model", - "type": "string" - }, - "hw_video_model": { - "description": "The video image driver used.", - "enum": [ - "vga", - "cirrus", - "vmvga", - "xen", - "qxl" - ], - "title": "Video Model", - "type": "string" - }, - "hw_video_ram": { - "description": "Maximum RAM for the video image. Used only if a hw_video:ram_max_mb value has been set in the flavor's extra_specs and that value is higher than the value set in hw_video_ram.", - "title": "Max Video Ram", - "type": "integer" - }, - "hw_vif_model": { - "description": "Specifies the model of virtual network interface device to use. The valid options depend on the configured hypervisor. KVM and QEMU: e1000, ne2k_pci, pcnet, rtl8139, and virtio. VMware: e1000, e1000e, VirtualE1000, VirtualE1000e, VirtualPCNet32, VirtualSriovEthernetCard, and VirtualVmxnet. Xen: e1000, netfront, ne2k_pci, pcnet, and rtl8139.", - "enum": [ - "e1000", - "ne2k_pci", - "pcnet", - "rtl8139", - "virtio", - "e1000", - "e1000e", - "VirtualE1000", - "VirtualE1000e", - "VirtualPCNet32", - "VirtualSriovEthernetCard", - "VirtualVmxnet", - "netfront", - "ne2k_pci" - ], - "title": "Virtual Network Interface", - "type": "string" - }, - "os_command_line": { - "description": "The kernel command line to be used by the libvirt driver, instead of the default. For linux containers (LXC), the value is used as arguments for initialization. This key is valid only for Amazon kernel, ramdisk, or machine images (aki, ari, or ami).", - "title": "Kernel Command Line", - "type": "string" - } - } -} diff --git a/api-ref/source/v2/samples/metadef-property-create-request.json b/api-ref/source/v2/samples/metadef-property-create-request.json deleted file mode 100644 index 0b4b314a..00000000 --- a/api-ref/source/v2/samples/metadef-property-create-request.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "description": "The hypervisor type. It may be used by the host properties filter for scheduling. The ImagePropertiesFilter filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties. Image properties are contained in the image dictionary in the request_spec.", - "enum": [ - "xen", - "qemu", - "kvm", - "lxc", - "uml", - "vmware", - "hyperv" - ], - "name": "hypervisor_type", - "title": "Hypervisor Type", - "type": "string" -} diff --git a/api-ref/source/v2/samples/metadef-property-create-response.json b/api-ref/source/v2/samples/metadef-property-create-response.json deleted file mode 100644 index 0b4b314a..00000000 --- a/api-ref/source/v2/samples/metadef-property-create-response.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "description": "The hypervisor type. It may be used by the host properties filter for scheduling. The ImagePropertiesFilter filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties. Image properties are contained in the image dictionary in the request_spec.", - "enum": [ - "xen", - "qemu", - "kvm", - "lxc", - "uml", - "vmware", - "hyperv" - ], - "name": "hypervisor_type", - "title": "Hypervisor Type", - "type": "string" -} diff --git a/api-ref/source/v2/samples/metadef-property-details-response.json b/api-ref/source/v2/samples/metadef-property-details-response.json deleted file mode 100644 index 0b4b314a..00000000 --- a/api-ref/source/v2/samples/metadef-property-details-response.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "description": "The hypervisor type. It may be used by the host properties filter for scheduling. The ImagePropertiesFilter filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties. Image properties are contained in the image dictionary in the request_spec.", - "enum": [ - "xen", - "qemu", - "kvm", - "lxc", - "uml", - "vmware", - "hyperv" - ], - "name": "hypervisor_type", - "title": "Hypervisor Type", - "type": "string" -} diff --git a/api-ref/source/v2/samples/metadef-property-update-request.json b/api-ref/source/v2/samples/metadef-property-update-request.json deleted file mode 100644 index 0b4b314a..00000000 --- a/api-ref/source/v2/samples/metadef-property-update-request.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "description": "The hypervisor type. It may be used by the host properties filter for scheduling. The ImagePropertiesFilter filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties. Image properties are contained in the image dictionary in the request_spec.", - "enum": [ - "xen", - "qemu", - "kvm", - "lxc", - "uml", - "vmware", - "hyperv" - ], - "name": "hypervisor_type", - "title": "Hypervisor Type", - "type": "string" -} diff --git a/api-ref/source/v2/samples/metadef-property-update-response.json b/api-ref/source/v2/samples/metadef-property-update-response.json deleted file mode 100644 index 0b4b314a..00000000 --- a/api-ref/source/v2/samples/metadef-property-update-response.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "description": "The hypervisor type. It may be used by the host properties filter for scheduling. The ImagePropertiesFilter filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties. Image properties are contained in the image dictionary in the request_spec.", - "enum": [ - "xen", - "qemu", - "kvm", - "lxc", - "uml", - "vmware", - "hyperv" - ], - "name": "hypervisor_type", - "title": "Hypervisor Type", - "type": "string" -} diff --git a/api-ref/source/v2/samples/metadef-resource-type-assoc-create-response.json b/api-ref/source/v2/samples/metadef-resource-type-assoc-create-response.json deleted file mode 100644 index 9176e9a2..00000000 --- a/api-ref/source/v2/samples/metadef-resource-type-assoc-create-response.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "created_at": "2014-09-19T16:09:13Z", - "name": "OS::Cinder::Volume", - "prefix": "hw_", - "properties_target": "image", - "updated_at": "2014-09-19T16:09:13Z" -} diff --git a/api-ref/source/v2/samples/metadef-resource-type-create-request.json b/api-ref/source/v2/samples/metadef-resource-type-create-request.json deleted file mode 100644 index ec5225ec..00000000 --- a/api-ref/source/v2/samples/metadef-resource-type-create-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "name": "OS::Cinder::Volume", - "prefix": "hw_", - "properties_target": "image" -} diff --git a/api-ref/source/v2/samples/metadef-resource-types-list-response.json b/api-ref/source/v2/samples/metadef-resource-types-list-response.json deleted file mode 100644 index d2dc34a9..00000000 --- a/api-ref/source/v2/samples/metadef-resource-types-list-response.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "resource_types": [ - { - "created_at": "2014-08-28T18:13:04Z", - "name": "OS::Glance::Image", - "updated_at": "2014-08-28T18:13:04Z" - }, - { - "created_at": "2014-08-28T18:13:04Z", - "name": "OS::Cinder::Volume", - "updated_at": "2014-08-28T18:13:04Z" - }, - { - "created_at": "2014-08-28T18:13:04Z", - "name": "OS::Nova::Flavor", - "updated_at": "2014-08-28T18:13:04Z" - }, - { - "created_at": "2014-08-28T18:13:04Z", - "name": "OS::Nova::Aggregate", - "updated_at": "2014-08-28T18:13:04Z" - }, - { - "created_at": "2014-08-28T18:13:04Z", - "name": "OS::Nova::Instance", - "updated_at": "2014-08-28T18:13:04Z" - } - ] -} diff --git a/api-ref/source/v2/samples/metadef-tag-create-response.json b/api-ref/source/v2/samples/metadef-tag-create-response.json deleted file mode 100644 index 298e344b..00000000 --- a/api-ref/source/v2/samples/metadef-tag-create-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "created_at": "2015-05-09T01:12:31Z", - "name": "added-sample-tag", - "updated_at": "2015-05-09T01:12:31Z" -} diff --git a/api-ref/source/v2/samples/metadef-tag-details-response.json b/api-ref/source/v2/samples/metadef-tag-details-response.json deleted file mode 100644 index 0fc7197e..00000000 --- a/api-ref/source/v2/samples/metadef-tag-details-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "created_at": "2015-05-06T23:16:12Z", - "name": "sample-tag2", - "updated_at": "2015-05-06T23:16:12Z" -} diff --git a/api-ref/source/v2/samples/metadef-tag-update-request.json b/api-ref/source/v2/samples/metadef-tag-update-request.json deleted file mode 100644 index f2edbd4a..00000000 --- a/api-ref/source/v2/samples/metadef-tag-update-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "name": "new-tag-name" -} diff --git a/api-ref/source/v2/samples/metadef-tag-update-response.json b/api-ref/source/v2/samples/metadef-tag-update-response.json deleted file mode 100644 index 5fe767e7..00000000 --- a/api-ref/source/v2/samples/metadef-tag-update-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "created_at": "2016-05-21T18:49:38Z", - "name": "new-tag-name", - "updated_at": "2016-05-21T19:04:22Z" -} diff --git a/api-ref/source/v2/samples/metadef-tags-create-request.json b/api-ref/source/v2/samples/metadef-tags-create-request.json deleted file mode 100644 index facd34af..00000000 --- a/api-ref/source/v2/samples/metadef-tags-create-request.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tags": [ - { - "name": "sample-tag1" - }, - { - "name": "sample-tag2" - }, - { - "name": "sample-tag3" - } - ] -} diff --git a/api-ref/source/v2/samples/metadef-tags-create-response.json b/api-ref/source/v2/samples/metadef-tags-create-response.json deleted file mode 100644 index facd34af..00000000 --- a/api-ref/source/v2/samples/metadef-tags-create-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tags": [ - { - "name": "sample-tag1" - }, - { - "name": "sample-tag2" - }, - { - "name": "sample-tag3" - } - ] -} diff --git a/api-ref/source/v2/samples/metadef-tags-list-response.json b/api-ref/source/v2/samples/metadef-tags-list-response.json deleted file mode 100644 index facd34af..00000000 --- a/api-ref/source/v2/samples/metadef-tags-list-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tags": [ - { - "name": "sample-tag1" - }, - { - "name": "sample-tag2" - }, - { - "name": "sample-tag3" - } - ] -} diff --git a/api-ref/source/v2/samples/schemas-image-member-show-response.json b/api-ref/source/v2/samples/schemas-image-member-show-response.json deleted file mode 100644 index 146cc22d..00000000 --- a/api-ref/source/v2/samples/schemas-image-member-show-response.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "member", - "properties": { - "created_at": { - "description": "Date and time of image member creation", - "type": "string" - }, - "image_id": { - "description": "An identifier for the image", - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "type": "string" - }, - "member_id": { - "description": "An identifier for the image member (tenantId)", - "type": "string" - }, - "schema": { - "readOnly": true, - "type": "string" - }, - "status": { - "description": "The status of this image member", - "enum": [ - "pending", - "accepted", - "rejected" - ], - "type": "string" - }, - "updated_at": { - "description": "Date and time of last modification of image member", - "type": "string" - } - } -} diff --git a/api-ref/source/v2/samples/schemas-image-members-list-response.json b/api-ref/source/v2/samples/schemas-image-members-list-response.json deleted file mode 100644 index 41aa5ade..00000000 --- a/api-ref/source/v2/samples/schemas-image-members-list-response.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "links": [ - { - "href": "{schema}", - "rel": "describedby" - } - ], - "name": "members", - "properties": { - "members": { - "items": { - "name": "member", - "properties": { - "created_at": { - "description": "Date and time of image member creation", - "type": "string" - }, - "image_id": { - "description": "An identifier for the image", - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "type": "string" - }, - "member_id": { - "description": "An identifier for the image member (tenantId)", - "type": "string" - }, - "schema": { - "readOnly": true, - "type": "string" - }, - "status": { - "description": "The status of this image member", - "enum": [ - "pending", - "accepted", - "rejected" - ], - "type": "string" - }, - "updated_at": { - "description": "Date and time of last modification of image member", - "type": "string" - } - } - }, - "type": "array" - }, - "schema": { - "type": "string" - } - } -} diff --git a/api-ref/source/v2/samples/schemas-image-show-response.json b/api-ref/source/v2/samples/schemas-image-show-response.json deleted file mode 100644 index 83825496..00000000 --- a/api-ref/source/v2/samples/schemas-image-show-response.json +++ /dev/null @@ -1,236 +0,0 @@ -{ - "additionalProperties": { - "type": "string" - }, - "links": [ - { - "href": "{self}", - "rel": "self" - }, - { - "href": "{file}", - "rel": "enclosure" - }, - { - "href": "{schema}", - "rel": "describedby" - } - ], - "name": "image", - "properties": { - "architecture": { - "description": "Operating system architecture as specified in https://docs.openstack.org/cli-reference/glance-property-keys.html", - "is_base": false, - "type": "string" - }, - "checksum": { - "description": "md5 hash of image contents.", - "maxLength": 32, - "readOnly": true, - "type": [ - "null", - "string" - ] - }, - "container_format": { - "description": "Format of the container", - "enum": [ - null, - "ami", - "ari", - "aki", - "bare", - "ovf", - "ova", - "docker" - ], - "type": [ - "null", - "string" - ] - }, - "created_at": { - "description": "Date and time of image registration", - "readOnly": true, - "type": "string" - }, - "direct_url": { - "description": "URL to access the image file kept in external store", - "readOnly": true, - "type": "string" - }, - "disk_format": { - "description": "Format of the disk", - "enum": [ - null, - "ami", - "ari", - "aki", - "vhd", - "vhdx", - "vmdk", - "raw", - "qcow2", - "vdi", - "iso", - "ploop" - ], - "type": [ - "null", - "string" - ] - }, - "file": { - "description": "An image file url", - "readOnly": true, - "type": "string" - }, - "id": { - "description": "An identifier for the image", - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "type": "string" - }, - "instance_uuid": { - "description": "Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.)", - "is_base": false, - "type": "string" - }, - "kernel_id": { - "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image.", - "is_base": false, - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "type": [ - "null", - "string" - ] - }, - "locations": { - "description": "A set of URLs to access the image file kept in external store", - "items": { - "properties": { - "metadata": { - "type": "object" - }, - "url": { - "maxLength": 255, - "type": "string" - } - }, - "required": [ - "url", - "metadata" - ], - "type": "object" - }, - "type": "array" - }, - "min_disk": { - "description": "Amount of disk space (in GB) required to boot image.", - "type": "integer" - }, - "min_ram": { - "description": "Amount of ram (in MB) required to boot image.", - "type": "integer" - }, - "name": { - "description": "Descriptive name for the image", - "maxLength": 255, - "type": [ - "null", - "string" - ] - }, - "os_distro": { - "description": "Common name of operating system distribution as specified in https://docs.openstack.org/cli-reference/glance-property-keys.html", - "is_base": false, - "type": "string" - }, - "os_version": { - "description": "Operating system version as specified by the distributor", - "is_base": false, - "type": "string" - }, - "owner": { - "description": "Owner of the image", - "maxLength": 255, - "type": [ - "null", - "string" - ] - }, - "protected": { - "description": "If true, image will not be deletable.", - "type": "boolean" - }, - "ramdisk_id": { - "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image.", - "is_base": false, - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "type": [ - "null", - "string" - ] - }, - "schema": { - "description": "An image schema url", - "readOnly": true, - "type": "string" - }, - "self": { - "description": "An image self url", - "readOnly": true, - "type": "string" - }, - "size": { - "description": "Size of image file in bytes", - "readOnly": true, - "type": [ - "null", - "integer" - ] - }, - "status": { - "description": "Status of the image", - "enum": [ - "queued", - "saving", - "active", - "killed", - "deleted", - "pending_delete", - "deactivated" - ], - "readOnly": true, - "type": "string" - }, - "tags": { - "description": "List of strings related to the image", - "items": { - "maxLength": 255, - "type": "string" - }, - "type": "array" - }, - "updated_at": { - "description": "Date and time of the last image modification", - "readOnly": true, - "type": "string" - }, - "virtual_size": { - "description": "Virtual size of image in bytes", - "readOnly": true, - "type": [ - "null", - "integer" - ] - }, - "visibility": { - "description": "Scope of image accessibility", - "enum": [ - "public", - "private" - ], - "type": "string" - } - } -} diff --git a/api-ref/source/v2/samples/schemas-images-list-response.json b/api-ref/source/v2/samples/schemas-images-list-response.json deleted file mode 100644 index 6f544957..00000000 --- a/api-ref/source/v2/samples/schemas-images-list-response.json +++ /dev/null @@ -1,267 +0,0 @@ -{ - "links": [ - { - "href": "{first}", - "rel": "first" - }, - { - "href": "{next}", - "rel": "next" - }, - { - "href": "{schema}", - "rel": "describedby" - } - ], - "name": "images", - "properties": { - "first": { - "type": "string" - }, - "images": { - "items": { - "additionalProperties": { - "type": "string" - }, - "links": [ - { - "href": "{self}", - "rel": "self" - }, - { - "href": "{file}", - "rel": "enclosure" - }, - { - "href": "{schema}", - "rel": "describedby" - } - ], - "name": "image", - "properties": { - "architecture": { - "description": "Operating system architecture as specified in https://docs.openstack.org/cli-reference/glance-property-keys.html", - "is_base": false, - "type": "string" - }, - "checksum": { - "description": "md5 hash of image contents.", - "maxLength": 32, - "readOnly": true, - "type": [ - "null", - "string" - ] - }, - "container_format": { - "description": "Format of the container", - "enum": [ - null, - "ami", - "ari", - "aki", - "bare", - "ovf", - "ova", - "docker" - ], - "type": [ - "null", - "string" - ] - }, - "created_at": { - "description": "Date and time of image registration", - "readOnly": true, - "type": "string" - }, - "direct_url": { - "description": "URL to access the image file kept in external store", - "readOnly": true, - "type": "string" - }, - "disk_format": { - "description": "Format of the disk", - "enum": [ - null, - "ami", - "ari", - "aki", - "vhd", - "vhdx", - "vmdk", - "raw", - "qcow2", - "vdi", - "iso", - "ploop" - ], - "type": [ - "null", - "string" - ] - }, - "file": { - "description": "An image file url", - "readOnly": true, - "type": "string" - }, - "id": { - "description": "An identifier for the image", - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "type": "string" - }, - "instance_uuid": { - "description": "Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.)", - "is_base": false, - "type": "string" - }, - "kernel_id": { - "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image.", - "is_base": false, - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "type": [ - "null", - "string" - ] - }, - "locations": { - "description": "A set of URLs to access the image file kept in external store", - "items": { - "properties": { - "metadata": { - "type": "object" - }, - "url": { - "maxLength": 255, - "type": "string" - } - }, - "required": [ - "url", - "metadata" - ], - "type": "object" - }, - "type": "array" - }, - "min_disk": { - "description": "Amount of disk space (in GB) required to boot image.", - "type": "integer" - }, - "min_ram": { - "description": "Amount of ram (in MB) required to boot image.", - "type": "integer" - }, - "name": { - "description": "Descriptive name for the image", - "maxLength": 255, - "type": [ - "null", - "string" - ] - }, - "os_distro": { - "description": "Common name of operating system distribution as specified in https://docs.openstack.org/cli-reference/glance-property-keys.html", - "is_base": false, - "type": "string" - }, - "os_version": { - "description": "Operating system version as specified by the distributor", - "is_base": false, - "type": "string" - }, - "owner": { - "description": "Owner of the image", - "maxLength": 255, - "type": [ - "null", - "string" - ] - }, - "protected": { - "description": "If true, image will not be deletable.", - "type": "boolean" - }, - "ramdisk_id": { - "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image.", - "is_base": false, - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "type": [ - "null", - "string" - ] - }, - "schema": { - "description": "An image schema url", - "readOnly": true, - "type": "string" - }, - "self": { - "description": "An image self url", - "readOnly": true, - "type": "string" - }, - "size": { - "description": "Size of image file in bytes", - "readOnly": true, - "type": [ - "null", - "integer" - ] - }, - "status": { - "description": "Status of the image", - "enum": [ - "queued", - "saving", - "active", - "killed", - "deleted", - "pending_delete", - "deactivated" - ], - "readOnly": true, - "type": "string" - }, - "tags": { - "description": "List of strings related to the image", - "items": { - "maxLength": 255, - "type": "string" - }, - "type": "array" - }, - "updated_at": { - "description": "Date and time of the last image modification", - "readOnly": true, - "type": "string" - }, - "virtual_size": { - "description": "Virtual size of image in bytes", - "readOnly": true, - "type": [ - "null", - "integer" - ] - }, - "visibility": { - "description": "Scope of image accessibility", - "enum": [ - "public", - "private" - ], - "type": "string" - } - } - }, - "type": "array" - }, - "next": { - "type": "string" - }, - "schema": { - "type": "string" - } - } -} diff --git a/api-ref/source/v2/samples/schemas-metadef-namespace-show-response.json b/api-ref/source/v2/samples/schemas-metadef-namespace-show-response.json deleted file mode 100644 index b61ebc23..00000000 --- a/api-ref/source/v2/samples/schemas-metadef-namespace-show-response.json +++ /dev/null @@ -1,234 +0,0 @@ -{ - "additionalProperties": false, - "definitions": { - "positiveInteger": { - "minimum": 0, - "type": "integer" - }, - "positiveIntegerDefault0": { - "allOf": [ - { - "$ref": "#/definitions/positiveInteger" - }, - { - "default": 0 - } - ] - }, - "property": { - "additionalProperties": { - "properties": { - "additionalItems": { - "type": "boolean" - }, - "default": {}, - "description": { - "type": "string" - }, - "enum": { - "type": "array" - }, - "items": { - "properties": { - "enum": { - "type": "array" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - null - ], - "type": "string" - } - }, - "type": "object" - }, - "maxItems": { - "$ref": "#/definitions/positiveInteger" - }, - "maxLength": { - "$ref": "#/definitions/positiveInteger" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minLength": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minimum": { - "type": "number" - }, - "name": { - "maxLength": 255, - "type": "string" - }, - "operators": { - "items": { - "type": "string" - }, - "type": "array" - }, - "pattern": { - "format": "regex", - "type": "string" - }, - "readonly": { - "type": "boolean" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "title": { - "type": "string" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - null - ], - "type": "string" - }, - "uniqueItems": { - "default": false, - "type": "boolean" - } - }, - "required": [ - "title", - "type" - ], - "type": "object" - }, - "type": "object" - }, - "stringArray": { - "items": { - "type": "string" - }, - "type": "array", - "uniqueItems": true - } - }, - "name": "namespace", - "properties": { - "created_at": { - "description": "Date and time of namespace creation", - "format": "date-time", - "readOnly": true, - "type": "string" - }, - "description": { - "description": "Provides a user friendly description of the namespace.", - "maxLength": 500, - "type": "string" - }, - "display_name": { - "description": "The user friendly name for the namespace. Used by UI if available.", - "maxLength": 80, - "type": "string" - }, - "namespace": { - "description": "The unique namespace text.", - "maxLength": 80, - "type": "string" - }, - "objects": { - "items": { - "properties": { - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "properties": { - "$ref": "#/definitions/property" - }, - "required": { - "$ref": "#/definitions/stringArray" - } - }, - "type": "object" - }, - "type": "array" - }, - "owner": { - "description": "Owner of the namespace.", - "maxLength": 255, - "type": "string" - }, - "properties": { - "$ref": "#/definitions/property" - }, - "protected": { - "description": "If true, namespace will not be deletable.", - "type": "boolean" - }, - "resource_type_associations": { - "items": { - "properties": { - "name": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "properties_target": { - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "schema": { - "readOnly": true, - "type": "string" - }, - "self": { - "readOnly": true, - "type": "string" - }, - "tags": { - "items": { - "properties": { - "name": { - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "updated_at": { - "description": "Date and time of the last namespace modification", - "format": "date-time", - "readOnly": true, - "type": "string" - }, - "visibility": { - "description": "Scope of namespace accessibility.", - "enum": [ - "public", - "private" - ], - "type": "string" - } - }, - "required": [ - "namespace" - ] -} diff --git a/api-ref/source/v2/samples/schemas-metadef-namespaces-list-response.json b/api-ref/source/v2/samples/schemas-metadef-namespaces-list-response.json deleted file mode 100644 index 39e34909..00000000 --- a/api-ref/source/v2/samples/schemas-metadef-namespaces-list-response.json +++ /dev/null @@ -1,265 +0,0 @@ -{ - "definitions": { - "positiveInteger": { - "minimum": 0, - "type": "integer" - }, - "positiveIntegerDefault0": { - "allOf": [ - { - "$ref": "#/definitions/positiveInteger" - }, - { - "default": 0 - } - ] - }, - "property": { - "additionalProperties": { - "properties": { - "additionalItems": { - "type": "boolean" - }, - "default": {}, - "description": { - "type": "string" - }, - "enum": { - "type": "array" - }, - "items": { - "properties": { - "enum": { - "type": "array" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - null - ], - "type": "string" - } - }, - "type": "object" - }, - "maxItems": { - "$ref": "#/definitions/positiveInteger" - }, - "maxLength": { - "$ref": "#/definitions/positiveInteger" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minLength": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minimum": { - "type": "number" - }, - "name": { - "maxLength": 255, - "type": "string" - }, - "operators": { - "items": { - "type": "string" - }, - "type": "array" - }, - "pattern": { - "format": "regex", - "type": "string" - }, - "readonly": { - "type": "boolean" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "title": { - "type": "string" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - null - ], - "type": "string" - }, - "uniqueItems": { - "default": false, - "type": "boolean" - } - }, - "required": [ - "title", - "type" - ], - "type": "object" - }, - "type": "object" - }, - "stringArray": { - "items": { - "type": "string" - }, - "type": "array", - "uniqueItems": true - } - }, - "links": [ - { - "href": "{first}", - "rel": "first" - }, - { - "href": "{next}", - "rel": "next" - }, - { - "href": "{schema}", - "rel": "describedby" - } - ], - "name": "namespaces", - "properties": { - "first": { - "type": "string" - }, - "namespaces": { - "items": { - "additionalProperties": false, - "name": "namespace", - "properties": { - "created_at": { - "description": "Date and time of namespace creation", - "format": "date-time", - "readOnly": true, - "type": "string" - }, - "description": { - "description": "Provides a user friendly description of the namespace.", - "maxLength": 500, - "type": "string" - }, - "display_name": { - "description": "The user friendly name for the namespace. Used by UI if available.", - "maxLength": 80, - "type": "string" - }, - "namespace": { - "description": "The unique namespace text.", - "maxLength": 80, - "type": "string" - }, - "objects": { - "items": { - "properties": { - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "properties": { - "$ref": "#/definitions/property" - }, - "required": { - "$ref": "#/definitions/stringArray" - } - }, - "type": "object" - }, - "type": "array" - }, - "owner": { - "description": "Owner of the namespace.", - "maxLength": 255, - "type": "string" - }, - "properties": { - "$ref": "#/definitions/property" - }, - "protected": { - "description": "If true, namespace will not be deletable.", - "type": "boolean" - }, - "resource_type_associations": { - "items": { - "properties": { - "name": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "properties_target": { - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "schema": { - "readOnly": true, - "type": "string" - }, - "self": { - "readOnly": true, - "type": "string" - }, - "tags": { - "items": { - "properties": { - "name": { - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "updated_at": { - "description": "Date and time of the last namespace modification", - "format": "date-time", - "readOnly": true, - "type": "string" - }, - "visibility": { - "description": "Scope of namespace accessibility.", - "enum": [ - "public", - "private" - ], - "type": "string" - } - }, - "required": [ - "namespace" - ] - }, - "type": "array" - }, - "next": { - "type": "string" - }, - "schema": { - "type": "string" - } - } -} diff --git a/api-ref/source/v2/samples/schemas-metadef-object-show-response.json b/api-ref/source/v2/samples/schemas-metadef-object-show-response.json deleted file mode 100644 index ae7ffff3..00000000 --- a/api-ref/source/v2/samples/schemas-metadef-object-show-response.json +++ /dev/null @@ -1,164 +0,0 @@ -{ - "additionalProperties": false, - "definitions": { - "positiveInteger": { - "minimum": 0, - "type": "integer" - }, - "positiveIntegerDefault0": { - "allOf": [ - { - "$ref": "#/definitions/positiveInteger" - }, - { - "default": 0 - } - ] - }, - "property": { - "additionalProperties": { - "properties": { - "additionalItems": { - "type": "boolean" - }, - "default": {}, - "description": { - "type": "string" - }, - "enum": { - "type": "array" - }, - "items": { - "properties": { - "enum": { - "type": "array" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - null - ], - "type": "string" - } - }, - "type": "object" - }, - "maxItems": { - "$ref": "#/definitions/positiveInteger" - }, - "maxLength": { - "$ref": "#/definitions/positiveInteger" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minLength": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minimum": { - "type": "number" - }, - "name": { - "maxLength": 255, - "type": "string" - }, - "operators": { - "items": { - "type": "string" - }, - "type": "array" - }, - "pattern": { - "format": "regex", - "type": "string" - }, - "readonly": { - "type": "boolean" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "title": { - "type": "string" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - null - ], - "type": "string" - }, - "uniqueItems": { - "default": false, - "type": "boolean" - } - }, - "required": [ - "title", - "type" - ], - "type": "object" - }, - "type": "object" - }, - "stringArray": { - "items": { - "type": "string" - }, - "type": "array", - "uniqueItems": true - } - }, - "name": "object", - "properties": { - "created_at": { - "description": "Date and time of object creation", - "format": "date-time", - "readOnly": true, - "type": "string" - }, - "description": { - "type": "string" - }, - "name": { - "maxLength": 255, - "type": "string" - }, - "properties": { - "$ref": "#/definitions/property" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "schema": { - "readOnly": true, - "type": "string" - }, - "self": { - "readOnly": true, - "type": "string" - }, - "updated_at": { - "description": "Date and time of the last object modification", - "format": "date-time", - "readOnly": true, - "type": "string" - } - }, - "required": [ - "name" - ] -} diff --git a/api-ref/source/v2/samples/schemas-metadef-objects-list-response.json b/api-ref/source/v2/samples/schemas-metadef-objects-list-response.json deleted file mode 100644 index 872a8afe..00000000 --- a/api-ref/source/v2/samples/schemas-metadef-objects-list-response.json +++ /dev/null @@ -1,195 +0,0 @@ -{ - "definitions": { - "positiveInteger": { - "minimum": 0, - "type": "integer" - }, - "positiveIntegerDefault0": { - "allOf": [ - { - "$ref": "#/definitions/positiveInteger" - }, - { - "default": 0 - } - ] - }, - "property": { - "additionalProperties": { - "properties": { - "additionalItems": { - "type": "boolean" - }, - "default": {}, - "description": { - "type": "string" - }, - "enum": { - "type": "array" - }, - "items": { - "properties": { - "enum": { - "type": "array" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - null - ], - "type": "string" - } - }, - "type": "object" - }, - "maxItems": { - "$ref": "#/definitions/positiveInteger" - }, - "maxLength": { - "$ref": "#/definitions/positiveInteger" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minLength": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minimum": { - "type": "number" - }, - "name": { - "maxLength": 255, - "type": "string" - }, - "operators": { - "items": { - "type": "string" - }, - "type": "array" - }, - "pattern": { - "format": "regex", - "type": "string" - }, - "readonly": { - "type": "boolean" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "title": { - "type": "string" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - null - ], - "type": "string" - }, - "uniqueItems": { - "default": false, - "type": "boolean" - } - }, - "required": [ - "title", - "type" - ], - "type": "object" - }, - "type": "object" - }, - "stringArray": { - "items": { - "type": "string" - }, - "type": "array", - "uniqueItems": true - } - }, - "links": [ - { - "href": "{first}", - "rel": "first" - }, - { - "href": "{next}", - "rel": "next" - }, - { - "href": "{schema}", - "rel": "describedby" - } - ], - "name": "objects", - "properties": { - "first": { - "type": "string" - }, - "next": { - "type": "string" - }, - "objects": { - "items": { - "additionalProperties": false, - "name": "object", - "properties": { - "created_at": { - "description": "Date and time of object creation", - "format": "date-time", - "readOnly": true, - "type": "string" - }, - "description": { - "type": "string" - }, - "name": { - "maxLength": 255, - "type": "string" - }, - "properties": { - "$ref": "#/definitions/property" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "schema": { - "readOnly": true, - "type": "string" - }, - "self": { - "readOnly": true, - "type": "string" - }, - "updated_at": { - "description": "Date and time of the last object modification", - "format": "date-time", - "readOnly": true, - "type": "string" - } - }, - "required": [ - "name" - ] - }, - "type": "array" - }, - "schema": { - "type": "string" - } - } -} diff --git a/api-ref/source/v2/samples/schemas-metadef-properties-list-response.json b/api-ref/source/v2/samples/schemas-metadef-properties-list-response.json deleted file mode 100644 index 59dfbfd5..00000000 --- a/api-ref/source/v2/samples/schemas-metadef-properties-list-response.json +++ /dev/null @@ -1,152 +0,0 @@ -{ - "definitions": { - "positiveInteger": { - "minimum": 0, - "type": "integer" - }, - "positiveIntegerDefault0": { - "allOf": [ - { - "$ref": "#/definitions/positiveInteger" - }, - { - "default": 0 - } - ] - }, - "stringArray": { - "items": { - "type": "string" - }, - "minItems": 1, - "type": "array", - "uniqueItems": true - } - }, - "links": [ - { - "href": "{first}", - "rel": "first" - }, - { - "href": "{next}", - "rel": "next" - }, - { - "href": "{schema}", - "rel": "describedby" - } - ], - "name": "properties", - "properties": { - "first": { - "type": "string" - }, - "next": { - "type": "string" - }, - "properties": { - "additionalProperties": { - "additionalProperties": false, - "name": "property", - "properties": { - "additionalItems": { - "type": "boolean" - }, - "default": {}, - "description": { - "type": "string" - }, - "enum": { - "type": "array" - }, - "items": { - "properties": { - "enum": { - "type": "array" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - null - ], - "type": "string" - } - }, - "type": "object" - }, - "maxItems": { - "$ref": "#/definitions/positiveInteger" - }, - "maxLength": { - "$ref": "#/definitions/positiveInteger" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minLength": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minimum": { - "type": "number" - }, - "name": { - "maxLength": 255, - "type": "string" - }, - "operators": { - "items": { - "type": "string" - }, - "type": "array" - }, - "pattern": { - "format": "regex", - "type": "string" - }, - "readonly": { - "type": "boolean" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "title": { - "type": "string" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - null - ], - "type": "string" - }, - "uniqueItems": { - "default": false, - "type": "boolean" - } - }, - "required": [ - "type", - "title" - ] - }, - "type": "object" - }, - "schema": { - "type": "string" - } - } -} diff --git a/api-ref/source/v2/samples/schemas-metadef-property-show-response.json b/api-ref/source/v2/samples/schemas-metadef-property-show-response.json deleted file mode 100644 index f9e9bb31..00000000 --- a/api-ref/source/v2/samples/schemas-metadef-property-show-response.json +++ /dev/null @@ -1,122 +0,0 @@ -{ - "additionalProperties": false, - "definitions": { - "positiveInteger": { - "minimum": 0, - "type": "integer" - }, - "positiveIntegerDefault0": { - "allOf": [ - { - "$ref": "#/definitions/positiveInteger" - }, - { - "default": 0 - } - ] - }, - "stringArray": { - "items": { - "type": "string" - }, - "minItems": 1, - "type": "array", - "uniqueItems": true - } - }, - "name": "property", - "properties": { - "additionalItems": { - "type": "boolean" - }, - "default": {}, - "description": { - "type": "string" - }, - "enum": { - "type": "array" - }, - "items": { - "properties": { - "enum": { - "type": "array" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - null - ], - "type": "string" - } - }, - "type": "object" - }, - "maxItems": { - "$ref": "#/definitions/positiveInteger" - }, - "maxLength": { - "$ref": "#/definitions/positiveInteger" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minLength": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minimum": { - "type": "number" - }, - "name": { - "maxLength": 255, - "type": "string" - }, - "operators": { - "items": { - "type": "string" - }, - "type": "array" - }, - "pattern": { - "format": "regex", - "type": "string" - }, - "readonly": { - "type": "boolean" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "title": { - "type": "string" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - null - ], - "type": "string" - }, - "uniqueItems": { - "default": false, - "type": "boolean" - } - }, - "required": [ - "type", - "title", - "name" - ] -} diff --git a/api-ref/source/v2/samples/schemas-metadef-resource-type-association-show-response.json b/api-ref/source/v2/samples/schemas-metadef-resource-type-association-show-response.json deleted file mode 100644 index cf939917..00000000 --- a/api-ref/source/v2/samples/schemas-metadef-resource-type-association-show-response.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "additionalProperties": false, - "name": "resource_type_association", - "properties": { - "created_at": { - "description": "Date and time of resource type association", - "format": "date-time", - "readOnly": true, - "type": "string" - }, - "name": { - "description": "Resource type names should be aligned with Heat resource types whenever possible: http://docs.openstack.org/developer/heat/template_guide/openstack.html", - "maxLength": 80, - "type": "string" - }, - "prefix": { - "description": "Specifies the prefix to use for the given resource type. Any properties in the namespace should be prefixed with this prefix when being applied to the specified resource type. Must include prefix separator (e.g. a colon :).", - "maxLength": 80, - "type": "string" - }, - "properties_target": { - "description": "Some resource types allow more than one key / value pair per instance. For example, Cinder allows user and image metadata on volumes. Only the image properties metadata is evaluated by Nova (scheduling or drivers). This property allows a namespace target to remove the ambiguity.", - "maxLength": 80, - "type": "string" - }, - "updated_at": { - "description": "Date and time of the last resource type association modification", - "format": "date-time", - "readOnly": true, - "type": "string" - } - }, - "required": [ - "name" - ] -} diff --git a/api-ref/source/v2/samples/schemas-metadef-resource-type-associations-list-response.json b/api-ref/source/v2/samples/schemas-metadef-resource-type-associations-list-response.json deleted file mode 100644 index 4d967449..00000000 --- a/api-ref/source/v2/samples/schemas-metadef-resource-type-associations-list-response.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "links": [ - { - "href": "{first}", - "rel": "first" - }, - { - "href": "{next}", - "rel": "next" - }, - { - "href": "{schema}", - "rel": "describedby" - } - ], - "name": "resource_type_associations", - "properties": { - "first": { - "type": "string" - }, - "next": { - "type": "string" - }, - "resource_type_associations": { - "items": { - "additionalProperties": false, - "name": "resource_type_association", - "properties": { - "created_at": { - "description": "Date and time of resource type association", - "format": "date-time", - "readOnly": true, - "type": "string" - }, - "name": { - "description": "Resource type names should be aligned with Heat resource types whenever possible: http://docs.openstack.org/developer/heat/template_guide/openstack.html", - "maxLength": 80, - "type": "string" - }, - "prefix": { - "description": "Specifies the prefix to use for the given resource type. Any properties in the namespace should be prefixed with this prefix when being applied to the specified resource type. Must include prefix separator (e.g. a colon :).", - "maxLength": 80, - "type": "string" - }, - "properties_target": { - "description": "Some resource types allow more than one key / value pair per instance. For example, Cinder allows user and image metadata on volumes. Only the image properties metadata is evaluated by Nova (scheduling or drivers). This property allows a namespace target to remove the ambiguity.", - "maxLength": 80, - "type": "string" - }, - "updated_at": { - "description": "Date and time of the last resource type association modification", - "format": "date-time", - "readOnly": true, - "type": "string" - } - }, - "required": [ - "name" - ] - }, - "type": "array" - }, - "schema": { - "type": "string" - } - } -} diff --git a/api-ref/source/v2/samples/schemas-metadef-tag-show-response.json b/api-ref/source/v2/samples/schemas-metadef-tag-show-response.json deleted file mode 100644 index a99a511d..00000000 --- a/api-ref/source/v2/samples/schemas-metadef-tag-show-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "additionalProperties": false, - "name": "tag", - "properties": { - "created_at": { - "description": "Date and time of tag creation", - "format": "date-time", - "readOnly": true, - "type": "string" - }, - "name": { - "maxLength": 255, - "type": "string" - }, - "updated_at": { - "description": "Date and time of the last tag modification", - "format": "date-time", - "readOnly": true, - "type": "string" - } - }, - "required": [ - "name" - ] -} diff --git a/api-ref/source/v2/samples/schemas-metadef-tags-list-response.json b/api-ref/source/v2/samples/schemas-metadef-tags-list-response.json deleted file mode 100644 index 07006b96..00000000 --- a/api-ref/source/v2/samples/schemas-metadef-tags-list-response.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "links": [ - { - "href": "{first}", - "rel": "first" - }, - { - "href": "{next}", - "rel": "next" - }, - { - "href": "{schema}", - "rel": "describedby" - } - ], - "name": "tags", - "properties": { - "first": { - "type": "string" - }, - "next": { - "type": "string" - }, - "schema": { - "type": "string" - }, - "tags": { - "items": { - "additionalProperties": false, - "name": "tag", - "properties": { - "created_at": { - "description": "Date and time of tag creation", - "format": "date-time", - "readOnly": true, - "type": "string" - }, - "name": { - "maxLength": 255, - "type": "string" - }, - "updated_at": { - "description": "Date and time of the last tag modification", - "format": "date-time", - "readOnly": true, - "type": "string" - } - }, - "required": [ - "name" - ] - }, - "type": "array" - } - } -} diff --git a/api-ref/source/v2/samples/schemas-task-show-response.json b/api-ref/source/v2/samples/schemas-task-show-response.json deleted file mode 100644 index 044f48c0..00000000 --- a/api-ref/source/v2/samples/schemas-task-show-response.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "name": "task", - "properties": { - "created_at": { - "description": "Datetime when this resource was created", - "type": "string" - }, - "expires_at": { - "description": "Datetime when this resource would be subject to removal", - "type": [ - "null", - "string" - ] - }, - "id": { - "description": "An identifier for the task", - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "type": "string" - }, - "input": { - "description": "The parameters required by task, JSON blob", - "type": [ - "null", - "object" - ] - }, - "message": { - "description": "Human-readable informative message only included when appropriate (usually on failure)", - "type": "string" - }, - "owner": { - "description": "An identifier for the owner of this task", - "type": "string" - }, - "result": { - "description": "The result of current task, JSON blob", - "type": [ - "null", - "object" - ] - }, - "schema": { - "readOnly": true, - "type": "string" - }, - "self": { - "readOnly": true, - "type": "string" - }, - "status": { - "description": "The current status of this task", - "enum": [ - "pending", - "processing", - "success", - "failure" - ], - "type": "string" - }, - "type": { - "description": "The type of task represented by this content", - "enum": [ - "import" - ], - "type": "string" - }, - "updated_at": { - "description": "Datetime when this resource was updated", - "type": "string" - } - } -} diff --git a/api-ref/source/v2/samples/schemas-tasks-list-response.json b/api-ref/source/v2/samples/schemas-tasks-list-response.json deleted file mode 100644 index 660dbe1e..00000000 --- a/api-ref/source/v2/samples/schemas-tasks-list-response.json +++ /dev/null @@ -1,71 +0,0 @@ -{ - "links": [ - { - "href": "{schema}", - "rel": "describedby" - } - ], - "name": "tasks", - "properties": { - "schema": { - "type": "string" - }, - "tasks": { - "items": { - "name": "task", - "properties": { - "created_at": { - "description": "Datetime when this resource was created", - "type": "string" - }, - "expires_at": { - "description": "Datetime when this resource would be subject to removal", - "type": [ - "null", - "string" - ] - }, - "id": { - "description": "An identifier for the task", - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "type": "string" - }, - "owner": { - "description": "An identifier for the owner of this task", - "type": "string" - }, - "schema": { - "readOnly": true, - "type": "string" - }, - "self": { - "readOnly": true, - "type": "string" - }, - "status": { - "description": "The current status of this task", - "enum": [ - "pending", - "processing", - "success", - "failure" - ], - "type": "string" - }, - "type": { - "description": "The type of task represented by this content", - "enum": [ - "import" - ], - "type": "string" - }, - "updated_at": { - "description": "Datetime when this resource was updated", - "type": "string" - } - } - }, - "type": "array" - } - } -} diff --git a/api-ref/source/v2/samples/task-create-request.json b/api-ref/source/v2/samples/task-create-request.json deleted file mode 100644 index 625cd681..00000000 --- a/api-ref/source/v2/samples/task-create-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "type": "import", - "input": { - "import_from": "http://app-catalog.openstack.example.org/groovy-image", - "import_from_format": "qcow2", - "image_properties": { - "disk_format": "vhd", - "container_format": "ovf" - } - } -} diff --git a/api-ref/source/v2/samples/task-create-response.json b/api-ref/source/v2/samples/task-create-response.json deleted file mode 100644 index bd06544e..00000000 --- a/api-ref/source/v2/samples/task-create-response.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "created_at": "2016-06-24T14:57:19Z", - "id": "bb480de2-7077-4ea9-bbe9-be1891290d3e", - "input": { - "image_properties": { - "container_format": "ovf", - "disk_format": "vhd" - }, - "import_from": "http://app-catalog.openstack.example.org/groovy-image", - "import_from_format": "qcow2" - }, - "message": "", - "owner": "fa6c8c1600f4444281658a23ee6da8e8", - "result": null, - "schema": "/v2/schemas/task", - "self": "/v2/tasks/bb480de2-7077-4ea9-bbe9-be1891290d3e", - "status": "pending", - "type": "import", - "updated_at": "2016-06-24T14:57:19Z" -} diff --git a/api-ref/source/v2/samples/task-show-failure-response.json b/api-ref/source/v2/samples/task-show-failure-response.json deleted file mode 100644 index 9063c15e..00000000 --- a/api-ref/source/v2/samples/task-show-failure-response.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "created_at": "2016-06-24T14:57:20Z", - "expires_at": "2016-06-26T14:57:20Z", - "id": "bb480de2-7077-4ea9-bbe9-be1891290d3e", - "input": { - "image_properties": { - "container_format": "ovf", - "disk_format": "vhd" - }, - "import_from": "http://app-catalog.openstack.example.org/groovy-image", - "import_from_format": "qcow2" - }, - "message": "Task failed due to Internal Error", - "owner": "fa6c8c1600f4444281658a23ee6da8e8", - "result": null, - "schema": "/v2/schemas/task", - "self": "/v2/tasks/bb480de2-7077-4ea9-bbe9-be1891290d3e", - "status": "failure", - "type": "import", - "updated_at": "2016-06-24T14:57:20Z" -} diff --git a/api-ref/source/v2/samples/task-show-processing-response.json b/api-ref/source/v2/samples/task-show-processing-response.json deleted file mode 100644 index 313b8fd9..00000000 --- a/api-ref/source/v2/samples/task-show-processing-response.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "created_at": "2016-06-24T14:40:19Z", - "id": "231c311d-3557-4e23-afc4-6d98af1419e7", - "input": { - "image_properties": { - "container_format": "ovf", - "disk_format": "vhd" - }, - "import_from": "http://example.com", - "import_from_format": "qcow2" - }, - "message": "", - "owner": "fa6c8c1600f4444281658a23ee6da8e8", - "result": null, - "schema": "/v2/schemas/task", - "self": "/v2/tasks/231c311d-3557-4e23-afc4-6d98af1419e7", - "status": "processing", - "type": "import", - "updated_at": "2016-06-24T14:40:20Z" -} diff --git a/api-ref/source/v2/samples/task-show-success-response.json b/api-ref/source/v2/samples/task-show-success-response.json deleted file mode 100644 index b422525b..00000000 --- a/api-ref/source/v2/samples/task-show-success-response.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "created_at": "2016-06-29T16:13:07Z", - "expires_at": "2016-07-01T16:13:07Z", - "id": "805f47d2-8814-4cd7-bef3-37037389a998", - "input": { - "image_properties": { - "container_format": "ovf", - "disk_format": "vhd" - }, - "import_from": "https://apps.openstack.org/excellent-image", - "import_from_format": "qcow2" - }, - "message": "", - "owner": "02a7fb2dd4ef434c8a628c511dcbbeb6", - "result": { - "image_id": "2b61ed2b-f800-4da0-99ff-396b742b8646" - }, - "schema": "/v2/schemas/task", - "self": "/v2/tasks/805f47d2-8814-4cd7-bef3-37037389a998", - "status": "success", - "type": "import", - "updated_at": "2016-06-29T16:13:07Z" -} diff --git a/api-ref/source/v2/samples/tasks-list-response.json b/api-ref/source/v2/samples/tasks-list-response.json deleted file mode 100644 index 2bf39df6..00000000 --- a/api-ref/source/v2/samples/tasks-list-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "first": "/v2/tasks", - "schema": "/v2/schemas/tasks", - "tasks": [ - { - "created_at": "2016-06-24T14:44:19Z", - "id": "08b7e1c8-3821-4f54-b3b8-d6655d178cdf", - "owner": "fa6c8c1600f4444281658a23ee6da8e8", - "schema": "/v2/schemas/task", - "self": "/v2/tasks/08b7e1c8-3821-4f54-b3b8-d6655d178cdf", - "status": "processing", - "type": "import", - "updated_at": "2016-06-24T14:44:19Z" - }, - { - "created_at": "2016-06-24T14:40:19Z", - "id": "231c311d-3557-4e23-afc4-6d98af1419e7", - "owner": "fa6c8c1600f4444281658a23ee6da8e8", - "schema": "/v2/schemas/task", - "self": "/v2/tasks/231c311d-3557-4e23-afc4-6d98af1419e7", - "status": "processing", - "type": "import", - "updated_at": "2016-06-24T14:40:20Z" - } - ] -} diff --git a/api-ref/source/v2/tasks-parameters.yaml b/api-ref/source/v2/tasks-parameters.yaml deleted file mode 100644 index 6b5a6a58..00000000 --- a/api-ref/source/v2/tasks-parameters.yaml +++ /dev/null @@ -1,195 +0,0 @@ -# variables in header -Content-Type-json: - description: | - The media type descriptor for the request body. Use ``application/json``. - in: header - required: true - type: string - -# variables in path -task_id: - description: | - The identifier for the task, a UUID. - in: path - required: true - type: string - -# variables in query -limit: - description: | - Requests a page size of items. Returns a number of items up to a limit - value. Use the ``limit`` parameter to make an initial limited request and - use the ID of the last-seen item from the response as the ``marker`` - parameter value in a subsequent limited request. - in: query - required: false - type: integer -marker: - description: | - The ID of the last-seen item. Use the ``limit`` parameter to make an - initial limited request and use the ID of the last-seen item from the - response as the ``marker`` parameter value in a subsequent limited request. - in: query - required: false - type: string -sort_dir: - description: | - Sorts the response by a set of one or more sort direction and attribute - (``sort_key``) combinations. A valid value for the sort direction is - ``asc`` (ascending) or ``desc`` (descending). If you omit the sort - direction in a set, the default is ``desc``. - in: query - required: false - type: string -sort_key: - description: | - Sorts the response by one of the following attributes: ``created_at``, - ``expires_at``, ``status``, ``type``, ``updated_at``. Default is - ``created_at``. - in: query - required: false - type: string -status-in-query: - description: | - Filters the response by a task status. A valid value is ``pending``, - ``processing``, ``success``, or ``failure``. - in: query - required: false - type: string -type-in-query: - description: | - Filters the response by a task type. A valid value is ``import``. - in: query - required: false - type: string - -# variables in body -created_at: - description: | - The date and time when the task was created. - - The date and time stamp format is `ISO 8601 - `_. - in: body - required: true - type: string -expires_at: - description: | - The date and time when the task is subject to removal. While the *task - object*, that is, the record describing the task is subject to deletion, - the result of the task (for example, an imported image) still exists. - - The date and time stamp format is `ISO 8601 - `_. - - This value is only set when the task reaches status ``success`` or - ``failure``. Otherwise its value is ``null``. It may not appear in - the response when its value is ``null``. - in: body - required: true - type: string -first: - description: | - The URI for the first page of response. - in: body - required: true - type: string -id: - description: | - The UUID of the task. - in: body - required: true - type: string -input: - description: | - A JSON object specifying the input parameters to the task. Consult your - cloud provider's documentation for details. - in: body - required: true - type: object -message: - description: | - Human-readable text, possibly an empty string, usually displayed in an - error situation to provide more information about what has occurred. - in: body - required: true - type: string -next: - description: | - The URI for the next page of response. Will not be present on the last - page of the response. - in: body - required: true - type: string -owner: - description: | - An identifier for the owner of the task, usually the tenant ID. - in: body - required: true - type: string -result: - description: | - A JSON object specifying information about the ultimate outcome of the - task. Consult your cloud provider's documentation for details. - in: body - required: true - type: object -schema-task: - description: | - The URI for the schema describing an image task. - in: body - required: true - type: string -schema-tasks: - description: | - The URI for the schema describing an image task list. - in: body - required: true - type: string -self: - description: | - A URI for this task. - in: body - required: true - type: string -status: - description: | - The current status of this task. The value can be ``pending``, - ``processing``, ``success`` or ``failure``. - in: body - required: true - type: string -tasks: - description: | - A list of sparse *task* objects. Each object contains the following - fields: - - - ``created_at`` - - ``id`` - - ``owner`` - - ``schema`` - - ``self`` - - ``status`` - - ``type`` - - ``updated_at`` - in: body - required: true - type: array -type: - description: | - The type of task represented by this content. - in: body - required: true - type: string -updated_at: - description: | - The date and time when the task was updated. - - The date and time stamp format is `ISO 8601 - `_. - - If the ``updated_at`` date and time stamp is not set, its value is - ``null``. - in: body - required: true - type: string diff --git a/api-ref/source/v2/tasks-schemas.inc b/api-ref/source/v2/tasks-schemas.inc deleted file mode 100644 index 5b777f92..00000000 --- a/api-ref/source/v2/tasks-schemas.inc +++ /dev/null @@ -1,72 +0,0 @@ -.. -*- rst -*- - -Task Schemas -************ - -Gets a JSON-schema document that represents an individual task and a -list of tasks. - -.. _tasks-schema: - -Show tasks schema -~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/tasks - -*(Since Images v2.2)* - -Shows a JSON schema document that represents a list of *tasks*. - -An tasks list entity is a container of entities containing abbreviated -information about individual tasks. - -The following schema is solely an example. Consider only the -response to the API call as authoritative. - -Normal response codes: 200 - -Error response codes: 401 - - -Request -------- - -This operation has no request parameters and does not accept a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-tasks-list-response.json - :language: json - -.. _task-schema: - -Show task schema -~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/schemas/task - -*(Since Images v2.2)* - -Shows a JSON schema document that represents an *task* entity. - -The following schema is solely an example. Consider only the -response to the API call as authoritative. - -Normal response codes: 200 - -Error response codes: 401 - - -Request -------- - -This operation has no request parameters and does not accept a request body. - - -Response Example ----------------- - -.. literalinclude:: samples/schemas-task-show-response.json - :language: json diff --git a/api-ref/source/v2/tasks.inc b/api-ref/source/v2/tasks.inc deleted file mode 100644 index be9cda0d..00000000 --- a/api-ref/source/v2/tasks.inc +++ /dev/null @@ -1,198 +0,0 @@ -.. -*- rst -*- - -Tasks -***** - -Creates, lists, and shows details for tasks. - -*(Since API v2.2)* - - -General Information -~~~~~~~~~~~~~~~~~~~ - -**API Status** - -This API was made admin-only by default in the OpenStack Mitaka release. Thus -the following calls may not be available to end users in your cloud. Please -consult your cloud provider's documentation for more information. - -**Conceptual Overview** - -Please see the `Tasks `_ -section of the Glance Developers Documentation for a conceptual overview of -tasks. - -**Task Status** - -The possible status values for tasks are presented in the following table. - -.. list-table:: - :header-rows: 1 - - * - Status - - Description - * - pending - - The task is waiting for execution. - * - processing - - Execution of the task is underway. - * - success - - The task completed successfully. The ``result`` element should be - populated. - * - failure - - The task failed to complete. The ``message`` element should be a - non-empty string. - - -Create task -~~~~~~~~~~~ - -.. rest_method:: POST /v2/tasks - -Creates a task. - -Normal response codes: 201 - -Error response codes: 401, 413, 415 - - -Request -------- - -.. rest_parameters:: tasks-parameters.yaml - - - type: type - - input: input - - -Request Example ---------------- - -.. literalinclude:: samples/task-create-request.json - :language: json - - -Response Parameters -------------------- - -.. rest_parameters:: tasks-parameters.yaml - - - created_at: created_at - - id: id - - input: input - - message: message - - owner: owner - - result: result - - schema: schema-task - - self: self - - status: status - - type: type - - updated_at: updated_at - -Response Example ----------------- - -.. literalinclude:: samples/task-create-response.json - :language: json - - -List tasks -~~~~~~~~~~ - -.. rest_method:: GET /v2/tasks - -Lists tasks. - -Normal response codes: 200 - -Error response codes: 403, 404, 413 - - -Request -------- - -.. rest_parameters:: tasks-parameters.yaml - - - limit: limit - - marker: marker - - sort_dir: sort_dir - - sort_key: sort_key - - status: status-in-query - - type: type-in-query - - -Response Parameters -------------------- - -.. rest_parameters:: tasks-parameters.yaml - - - first: first - - next: next - - schema: schema-tasks - - tasks: tasks - - -Response Example ----------------- - -.. literalinclude:: samples/tasks-list-response.json - :language: json - - -Show task details -~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/tasks/{task_id} - -Shows details for a task. - - -Normal response codes: 200 - -Error response codes: 404 - - -Request -------- - -.. rest_parameters:: tasks-parameters.yaml - - - task_id: task_id - - -Response Parameters -------------------- - -.. rest_parameters:: tasks-parameters.yaml - - - created_at: created_at - - expires_at: expires_at - - id: id - - input: input - - message: message - - owner: owner - - result: result - - schema: schema-task - - self: self - - status: status - - type: type - - updated_at: updated_at - - -Response Example (task status: processing) ------------------------------------------- - -.. literalinclude:: samples/task-show-processing-response.json - :language: json - -Response Example (task status: success) ------------------------------------------- - -.. literalinclude:: samples/task-show-success-response.json - :language: json - -Response Example (task status: failure) ---------------------------------------- - -.. literalinclude:: samples/task-show-failure-response.json - :language: json diff --git a/api-ref/source/versions/index.rst b/api-ref/source/versions/index.rst deleted file mode 100644 index b2fe30f3..00000000 --- a/api-ref/source/versions/index.rst +++ /dev/null @@ -1,65 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -:tocdepth: 2 - -====================== -Image Service Versions -====================== - -.. rest_expand_all:: - -.. include:: versions.inc - -Version History -*************** - -**Ocata changes** - -- version 2.5 is CURRENT -- version 2.4 is SUPPORTED - -**Newton changes** - -- version 2.4 is CURRENT -- version 2.3 is SUPPORTED -- version 1.1 is DEPRECATED -- version 1.0 is DEPRECATED - -**Kilo changes** - -- version 2.3 is CURRENT -- version 1.1 is SUPPORTED - -**Havana changes** - -- version 2.2 is CURRENT -- version 2.1 is SUPPORTED - -**Grizzly changes** - -- version 2.1 is CURRENT -- version 2.0 is SUPPORTED - -**Folson changes** - -- version 2.0 is CURRENT - -**Diablo changes** - -- version 1.1 is CURRENT -- version 1.0 is SUPPORTED - -**Bexar changes** - -- version 1.0 is CURRENT diff --git a/api-ref/source/versions/samples/image-versions-response.json b/api-ref/source/versions/samples/image-versions-response.json deleted file mode 100644 index df42d877..00000000 --- a/api-ref/source/versions/samples/image-versions-response.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "versions": [ - { - "id": "v2.5", - "links": [ - { - "href": "http://glance.openstack.example.org/v2/", - "rel": "self" - } - ], - "status": "CURRENT" - }, - { - "id": "v2.4", - "links": [ - { - "href": "http://glance.openstack.example.org/v2/", - "rel": "self" - } - ], - "status": "SUPPORTED" - }, - { - "id": "v2.3", - "links": [ - { - "href": "http://glance.openstack.example.org/v2/", - "rel": "self" - } - ], - "status": "SUPPORTED" - }, - { - "id": "v2.2", - "links": [ - { - "href": "http://glance.openstack.example.org/v2/", - "rel": "self" - } - ], - "status": "SUPPORTED" - }, - { - "id": "v2.1", - "links": [ - { - "href": "http://glance.openstack.example.org/v2/", - "rel": "self" - } - ], - "status": "SUPPORTED" - }, - { - "id": "v2.0", - "links": [ - { - "href": "http://glance.openstack.example.org/v2/", - "rel": "self" - } - ], - "status": "SUPPORTED" - }, - { - "id": "v1.1", - "links": [ - { - "href": "http://glance.openstack.example.org/v1/", - "rel": "self" - } - ], - "status": "DEPRECATED" - }, - { - "id": "v1.0", - "links": [ - { - "href": "http://glance.openstack.example.org/v1/", - "rel": "self" - } - ], - "status": "DEPRECATED" - } - ] -} diff --git a/api-ref/source/versions/versions.inc b/api-ref/source/versions/versions.inc deleted file mode 100644 index 6e77818c..00000000 --- a/api-ref/source/versions/versions.inc +++ /dev/null @@ -1,56 +0,0 @@ -.. -*- rst -*- - -API versions -************ - - -List API versions -~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /versions - -Lists information about all Image service API versions supported by this -deployment, including the URIs. - - -Normal response codes: 200 - - -Request -------- - -There are no request parameters. - - - -Response Example ----------------- - -.. literalinclude:: samples/image-versions-response.json - :language: json - - -List API versions -~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET / - -Lists information about all Image service API versions supported by this -deployment, including the URIs. - - - -Normal response codes: 300 - - -Request -------- - -There are no request parameters. - - -Response Example ----------------- - -.. literalinclude:: samples/image-versions-response.json - :language: json diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index efceab81..00000000 --- a/babel.cfg +++ /dev/null @@ -1 +0,0 @@ -[python: **.py] diff --git a/bandit.yaml b/bandit.yaml deleted file mode 100644 index 2e7b1871..00000000 --- a/bandit.yaml +++ /dev/null @@ -1,245 +0,0 @@ -# optional: after how many files to update progress -#show_progress_every: 100 - -# optional: plugins directory name -#plugins_dir: 'plugins' - -# optional: plugins discovery name pattern -plugin_name_pattern: '*.py' - -# optional: terminal escape sequences to display colors -#output_colors: -# DEFAULT: '\033[0m' -# HEADER: '\033[95m' -# LOW: '\033[94m' -# MEDIUM: '\033[93m' -# HIGH: '\033[91m' - -# optional: log format string -#log_format: "[%(module)s]\t%(levelname)s\t%(message)s" - -# globs of files which should be analyzed -include: - - '*.py' - - '*.pyw' - -# a list of strings, which if found in the path will cause files to be excluded -# for example /tests/ - to remove all all files in tests directory -exclude_dirs: - - '/tests/' - -profiles: - gate: - include: - - - any_other_function_with_shell_equals_true - - assert_used - - blacklist_calls - - blacklist_import_func - - # One of the blacklisted imports is the subprocess module. Keystone - # has to import the subprocess module in a single module for - # eventlet support so in most cases bandit won't be able to detect - # that subprocess is even being imported. Also, Bandit's - # recommendation is just to check that the use is safe without any - # documentation on what safe or unsafe usage is. So this test is - # skipped. - # - blacklist_imports - - - exec_used - - - execute_with_run_as_root_equals_true - - # - hardcoded_bind_all_interfaces # TODO: enable this test - - # Not working because wordlist/default-passwords file not bundled, - # see https://bugs.launchpad.net/bandit/+bug/1451575 : - # - hardcoded_password - - # Not used because it's prone to false positives: - # - hardcoded_sql_expressions - - # - hardcoded_tmp_directory # TODO: enable this test - - - jinja2_autoescape_false - - - linux_commands_wildcard_injection - - - paramiko_calls - - - password_config_option_not_marked_secret - - request_with_no_cert_validation - - set_bad_file_permissions - - subprocess_popen_with_shell_equals_true - # - subprocess_without_shell_equals_true # TODO: enable this test - - start_process_with_a_shell - # - start_process_with_no_shell # TODO: enable this test - - start_process_with_partial_path - - ssl_with_bad_defaults - - ssl_with_bad_version - - ssl_with_no_version - # - try_except_pass # TODO: enable this test - - - use_of_mako_templates - -blacklist_calls: - bad_name_sets: - # - pickle: - # qualnames: [pickle.loads, pickle.load, pickle.Unpickler, - # cPickle.loads, cPickle.load, cPickle.Unpickler] - # message: "Pickle library appears to be in use, possible security issue." - # TODO: enable this test - - marshal: - qualnames: [marshal.load, marshal.loads] - message: "Deserialization with the marshal module is possibly dangerous." - # - md5: - # qualnames: [hashlib.md5, Crypto.Hash.MD2.new, Crypto.Hash.MD4.new, Crypto.Hash.MD5.new, cryptography.hazmat.primitives.hashes.MD5] - # message: "Use of insecure MD2, MD4, or MD5 hash function." - # TODO: enable this test - - mktemp_q: - qualnames: [tempfile.mktemp] - message: "Use of insecure and deprecated function (mktemp)." - - eval: - qualnames: [eval] - message: "Use of possibly insecure function - consider using safer ast.literal_eval." - - mark_safe: - names: [mark_safe] - message: "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed." - - httpsconnection: - qualnames: [httplib.HTTPSConnection] - message: "Use of HTTPSConnection does not provide security, see https://wiki.openstack.org/wiki/OSSN/OSSN-0033" - - yaml_load: - qualnames: [yaml.load] - message: "Use of unsafe yaml load. Allows instantiation of arbitrary objects. Consider yaml.safe_load()." - - urllib_urlopen: - qualnames: [urllib.urlopen, urllib.urlretrieve, urllib.URLopener, urllib.FancyURLopener, urllib2.urlopen, urllib2.Request] - message: "Audit url open for permitted schemes. Allowing use of file:/ or custom schemes is often unexpected." - - random: - qualnames: [random.random, random.randrange, random.randint, random.choice, random.uniform, random.triangular] - message: "Standard pseudo-random generators are not suitable for security/cryptographic purposes." - level: "LOW" - - # Most of this is based off of Christian Heimes' work on defusedxml: - # https://pypi.python.org/pypi/defusedxml/#defusedxml-sax - - # TODO(jaegerandi): Enable once defusedxml is in global requirements. - #- xml_bad_cElementTree: - # qualnames: [xml.etree.cElementTree.parse, - # xml.etree.cElementTree.iterparse, - # xml.etree.cElementTree.fromstring, - # xml.etree.cElementTree.XMLParser] - # message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - #- xml_bad_ElementTree: - # qualnames: [xml.etree.ElementTree.parse, - # xml.etree.ElementTree.iterparse, - # xml.etree.ElementTree.fromstring, - # xml.etree.ElementTree.XMLParser] - # message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - - xml_bad_expatreader: - qualnames: [xml.sax.expatreader.create_parser] - message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - - xml_bad_expatbuilder: - qualnames: [xml.dom.expatbuilder.parse, - xml.dom.expatbuilder.parseString] - message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - - xml_bad_sax: - qualnames: [xml.sax.parse, - xml.sax.parseString, - xml.sax.make_parser] - message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - - xml_bad_minidom: - qualnames: [xml.dom.minidom.parse, - xml.dom.minidom.parseString] - message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - - xml_bad_pulldom: - qualnames: [xml.dom.pulldom.parse, - xml.dom.pulldom.parseString] - message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - - xml_bad_etree: - qualnames: [lxml.etree.parse, - lxml.etree.fromstring, - lxml.etree.RestrictedElement, - lxml.etree.GlobalParserTLS, - lxml.etree.getDefaultParser, - lxml.etree.check_docinfo] - message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - - -shell_injection: - # Start a process using the subprocess module, or one of its wrappers. - subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, - subprocess.check_output, utils.execute, utils.execute_with_timeout] - # Start a process with a function vulnerable to shell injection. - shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, - popen2.popen2, popen2.popen3, popen2.popen4, popen2.Popen3, - popen2.Popen4, commands.getoutput, commands.getstatusoutput] - # Start a process with a function that is not vulnerable to shell injection. - no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv,os.execve, - os.execvp, os.execvpe, os.spawnl, os.spawnle, os.spawnlp, - os.spawnlpe, os.spawnv, os.spawnve, os.spawnvp, os.spawnvpe, - os.startfile] - -blacklist_imports: - bad_import_sets: - - telnet: - imports: [telnetlib] - level: HIGH - message: "Telnet is considered insecure. Use SSH or some other encrypted protocol." - - info_libs: - imports: [pickle, cPickle, subprocess, Crypto] - level: LOW - message: "Consider possible security implications associated with {module} module." - - # Most of this is based off of Christian Heimes' work on defusedxml: - # https://pypi.python.org/pypi/defusedxml/#defusedxml-sax - - - xml_libs: - imports: [xml.etree.cElementTree, - xml.etree.ElementTree, - xml.sax.expatreader, - xml.sax, - xml.dom.expatbuilder, - xml.dom.minidom, - xml.dom.pulldom, - lxml.etree, - lxml] - message: "Using {module} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {module} with the equivilent defusedxml package." - level: LOW - - xml_libs_high: - imports: [xmlrpclib] - message: "Using {module} to parse untrusted XML data is known to be vulnerable to XML attacks. Use defused.xmlrpc.monkey_patch() function to monkey-patch xmlrpclib and mitigate XML vulnerabilities." - level: HIGH - -hardcoded_tmp_directory: - tmp_dirs: ['/tmp', '/var/tmp', '/dev/shm'] - -hardcoded_password: - # Support for full path, relative path and special "%(site_data_dir)s" - # substitution (/usr/{local}/share) - word_list: "%(site_data_dir)s/wordlist/default-passwords" - -ssl_with_bad_version: - bad_protocol_versions: - - 'PROTOCOL_SSLv2' - - 'SSLv2_METHOD' - - 'SSLv23_METHOD' - - 'PROTOCOL_SSLv3' # strict option - - 'PROTOCOL_TLSv1' # strict option - - 'SSLv3_METHOD' # strict option - - 'TLSv1_METHOD' # strict option - -password_config_option_not_marked_secret: - function_names: - - oslo.config.cfg.StrOpt - - oslo_config.cfg.StrOpt - -execute_with_run_as_root_equals_true: - function_names: - - ceilometer.utils.execute - - cinder.utils.execute - - neutron.agent.linux.utils.execute - - nova.utils.execute - - nova.utils.trycmd - -try_except_pass: - check_typed_exception: True diff --git a/doc/source/_static/.placeholder b/doc/source/_static/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/doc/source/admin/apache-httpd.rst b/doc/source/admin/apache-httpd.rst deleted file mode 100644 index ec8ef3a6..00000000 --- a/doc/source/admin/apache-httpd.rst +++ /dev/null @@ -1,93 +0,0 @@ -======================= -Running Glance in HTTPD -======================= - -Since the Pike release Glance has packaged a wsgi script entrypoint that -enables you to run it with a real web server like Apache HTTPD or nginx. To -deploy this there are several patterns. This doc shows two common ways of -deploying Glance with Apache HTTPD. - -uwsgi ------ - -This is the current recommended way to deploy Glance with a real web server. -In this deployment method we use uwsgi as a web server bound to a random local -port. Then we configure apache using mod_proxy to forward all incoming requests -on the specified endpoint to that local webserver. This has the advantage of -letting apache manage all inbound http connections, but letting uwsgi manage -running the python code. It also means when we make changes to Glance code -or configuration we don't need to restart all of apache (which may be running -other services too) and just need to restart the local uwsgi daemon. - -The httpd/ directory contains sample files for configuring HTTPD to run Glance -under uwsgi in this configuration. To use the sample configs simply copy -`httpd/uwsgi-glance-api.conf` to the appropriate location for your Apache -server. On Debian/Ubuntu systems it is:: - - /etc/apache2/sites-available/uwsgi-glance-api.conf - -On Red Hat based systems it is:: - - /etc/httpd/conf.d/uwsgi-glance-api.conf - -Enable mod_proxy by running ``sudo a2enmod proxy`` - -Then on Ubuntu/Debian systems enable the site by creating a symlink from the -file in ``sites-available`` to ``sites-enabled``. (This is not required on Red -Hat based systems):: - - ln -s /etc/apache2/sites-available/uwsgi-glance-api.conf /etc/apache2/sites-enabled - -Start or restart HTTPD to pick up the new configuration. - -Now we need to configure and start the uwsgi service. Copy the -`httpd/glance-api-uwsgi.ini` file to `/etc/glance`. Update the file to match -your system configuration (for example, you'll want to set the number of -processes and threads). - -Install uwsgi and start the glance-api server using uwsgi:: - - sudo pip install uwsgi - uwsgi --ini /etc/glance/glance-api-uwsgi.ini - -.. NOTE:: - - In the sample configs port 60999 is used, but this doesn't matter and is - just a randomly selected number. This is not a contract on the port used - for the local uwsgi daemon. - - -mod_proxy_uwsgi -''''''''''''''' - -.. WARNING:: - - Running Glance under HTTPD in this configuration will only work on Python 2 - if you use ``Transfer-Encoding: chunked``. Also if running with Python 2 - apache will be buffering the chunked encoding before passing the request - on to uwsgi. See bug: https://github.com/unbit/uwsgi/issues/1540 - -Instead of running uwsgi as a webserver listening on a local port and then -having Apache HTTP proxy all the incoming requests with mod_proxy. The -normally recommended way of deploying uwsgi with Apache HTTPD is to use -mod_proxy_uwsgi and set up a local socket file for uwsgi to listen on. Apache -will send the requests using the uwsgi protocol over this local socket -file. However, there are issues with doing this and using chunked-encoding. - -You can work around these issues by configuring your apache proxy to buffer the -chunked data and send the full content length to uwsgi. You do this by adding:: - - SetEnv proxy-sendcl 1 - -to the apache config file using mod_proxy_uwsgi. For more details on using -mod_proxy_uwsgi see the official docs: -http://uwsgi-docs.readthedocs.io/en/latest/Apache.html?highlight=mod_uwsgi_proxy#mod-proxy-uwsgi - -mod_wsgi --------- - -This deployment method is not recommended for using Glance. The mod_wsgi -protocol does not support ``Transfer-Encoding: chunked`` and therefore makes it -unsuitable for use with Glance. However, you could theoretically deploy Glance -using mod_wsgi but it will fail on any requests that use a chunked transfer -encoding. diff --git a/doc/source/admin/authentication.rst b/doc/source/admin/authentication.rst deleted file mode 100644 index 35fd0277..00000000 --- a/doc/source/admin/authentication.rst +++ /dev/null @@ -1,118 +0,0 @@ -.. - Copyright 2010 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _authentication: - -Authentication With Keystone -============================ - -Glance may optionally be integrated with Keystone. Setting this up is -relatively straightforward, as the Keystone distribution includes the -necessary middleware. Once you have installed Keystone -and edited your configuration files, newly created images will have -their `owner` attribute set to the tenant of the authenticated users, -and the `is_public` attribute will cause access to those images for -which it is `false` to be restricted to only the owner, users with -admin context, or tenants/users with whom the image has been shared. - - -Configuring the Glance servers to use Keystone ----------------------------------------------- - -Keystone is integrated with Glance through the use of middleware. The -default configuration files for both the Glance API and the Glance -Registry use a single piece of middleware called ``unauthenticated-context``, -which generates a request context containing blank authentication -information. In order to configure Glance to use Keystone, the -``authtoken`` and ``context`` middlewares must be deployed in place of the -``unauthenticated-context`` middleware. The ``authtoken`` middleware performs -the authentication token validation and retrieves actual user authentication -information. It can be found in the Keystone distribution. - - -Configuring Glance API to use Keystone --------------------------------------- - -Configuring Glance API to use Keystone is relatively straight -forward. The first step is to ensure that declarations for the two -pieces of middleware exist in the ``glance-api-paste.ini``. Here is -an example for ``authtoken``:: - - [filter:authtoken] - paste.filter_factory = keystonemiddleware.auth_token:filter_factory - auth_url = http://localhost:35357 - project_domain_id = default - project_name = service_admins - user_domain_id = default - username = glance_admin - password = password1234 - -The actual values for these variables will need to be set depending on -your situation. For more information, please refer to the Keystone -`documentation`_ on the ``auth_token`` middleware. - -.. _`documentation`: http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration - -In short: - -* The ``auth_url`` variable points to the Keystone service. - This information is used by the middleware to actually query Keystone about - the validity of the authentication tokens. -* The auth credentials (``project_name``, ``project_domain_id``, - ``user_domain_id``, ``username``, and ``password``) will be used to - retrieve a service token. That token will be used to authorize user - tokens behind the scenes. - -Finally, to actually enable using Keystone authentication, the -application pipeline must be modified. By default, it looks like:: - - [pipeline:glance-api] - pipeline = versionnegotiation unauthenticated-context apiv1app - -Your particular pipeline may vary depending on other options, such as -the image cache. This must be changed by replacing ``unauthenticated-context`` -with ``authtoken`` and ``context``:: - - [pipeline:glance-api] - pipeline = versionnegotiation authtoken context apiv1app - - -Configuring Glance Registry to use Keystone -------------------------------------------- - -Configuring Glance Registry to use Keystone is also relatively -straight forward. The same middleware needs to be added -to ``glance-registry-paste.ini`` as was needed by Glance API; -see above for an example of the ``authtoken`` configuration. - -Again, to enable using Keystone authentication, the appropriate -application pipeline must be selected. By default, it looks like:: - - [pipeline:glance-registry-keystone] - pipeline = authtoken context registryapp - -To enable the above application pipeline, in your main ``glance-registry.conf`` -configuration file, select the appropriate deployment flavor by adding a -``flavor`` attribute in the ``paste_deploy`` group:: - - [paste_deploy] - flavor = keystone - -.. note:: - If your authentication service uses a role other than ``admin`` to identify - which users should be granted admin-level privileges, you must define it - in the ``admin_role`` config attribute in both ``glance-registry.conf`` and - ``glance-api.conf``. diff --git a/doc/source/admin/cache.rst b/doc/source/admin/cache.rst deleted file mode 100644 index f1de58b6..00000000 --- a/doc/source/admin/cache.rst +++ /dev/null @@ -1,172 +0,0 @@ -.. - Copyright 2011 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _image-cache: - -The Glance Image Cache -====================== - -The Glance API server may be configured to have an optional local image cache. -A local image cache stores a copy of image files, essentially enabling multiple -API servers to serve the same image file, resulting in an increase in -scalability due to an increased number of endpoints serving an image file. - -This local image cache is transparent to the end user -- in other words, the -end user doesn't know that the Glance API is streaming an image file from -its local cache or from the actual backend storage system. - -Managing the Glance Image Cache -------------------------------- - -While image files are automatically placed in the image cache on successful -requests to ``GET /images/``, the image cache is not automatically -managed. Here, we describe the basics of how to manage the local image cache -on Glance API servers and how to automate this cache management. - -Configuration options for the Image Cache ------------------------------------------ - -The Glance cache uses two files: one for configuring the server and -another for the utilities. The ``glance-api.conf`` is for the server -and the ``glance-cache.conf`` is for the utilities. - -The following options are in both configuration files. These need the -same values otherwise the cache will potentially run into problems. - -- ``image_cache_dir`` This is the base directory where Glance stores - the cache data (Required to be set, as does not have a default). -- ``image_cache_sqlite_db`` Path to the sqlite file database that will - be used for cache management. This is a relative path from the - ``image_cache_dir`` directory (Default:``cache.db``). -- ``image_cache_driver`` The driver used for cache management. - (Default:``sqlite``) -- ``image_cache_max_size`` The size when the glance-cache-pruner will - remove the oldest images, to reduce the bytes until under this value. - (Default:``10 GB``) -- ``image_cache_stall_time`` The amount of time an incomplete image will - stay in the cache, after this the incomplete image will be deleted. - (Default:``1 day``) - -The following values are the ones that are specific to the -``glance-cache.conf`` and are only required for the prefetcher to run -correctly. - -- ``admin_user`` The username for an admin account, this is so it can - get the image data into the cache. -- ``admin_password`` The password to the admin account. -- ``admin_tenant_name`` The tenant of the admin account. -- ``auth_url`` The URL used to authenticate to keystone. This will - be taken from the environment variables if it exists. -- ``filesystem_store_datadir`` This is used if using the filesystem - store, points to where the data is kept. -- ``filesystem_store_datadirs`` This is used to point to multiple - filesystem stores. -- ``registry_host`` The URL to the Glance registry. - -Controlling the Growth of the Image Cache -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The image cache has a configurable maximum size (the ``image_cache_max_size`` -configuration file option). The ``image_cache_max_size`` is an upper limit -beyond which pruner, if running, starts cleaning the images cache. -However, when images are successfully returned from a call to -``GET /images/``, the image cache automatically writes the image -file to its cache, regardless of whether the resulting write would make the -image cache's size exceed the value of ``image_cache_max_size``. -In order to keep the image cache at or below this maximum cache size, -you need to run the ``glance-cache-pruner`` executable. - -The recommended practice is to use ``cron`` to fire ``glance-cache-pruner`` -at a regular interval. - -Cleaning the Image Cache -~~~~~~~~~~~~~~~~~~~~~~~~ - -Over time, the image cache can accumulate image files that are either in -a stalled or invalid state. Stalled image files are the result of an image -cache write failing to complete. Invalid image files are the result of an -image file not being written properly to disk. - -To remove these types of files, you run the ``glance-cache-cleaner`` -executable. - -The recommended practice is to use ``cron`` to fire ``glance-cache-cleaner`` -at a semi-regular interval. - -Prefetching Images into the Image Cache -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Some installations have base (sometimes called "golden") images that are -very commonly used to boot virtual machines. When spinning up a new API -server, administrators may wish to prefetch these image files into the -local image cache to ensure that reads of those popular image files come -from a local cache. - -To queue an image for prefetching, you can use one of the following methods: - - * If the ``cache_manage`` middleware is enabled in the application pipeline, - you may call ``PUT /queued-images/`` to queue the image with - identifier ```` - - Alternately, you can use the ``glance-cache-manage`` program to queue the - image. This program may be run from a different host than the host - containing the image cache. Example usage:: - - $> glance-cache-manage --host= queue-image - - This will queue the image with identifier ```` for prefetching - -Once you have queued the images you wish to prefetch, call the -``glance-cache-prefetcher`` executable, which will prefetch all queued images -concurrently, logging the results of the fetch for each image. - -Finding Which Images are in the Image Cache -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can find out which images are in the image cache using one of the -following methods: - - * If the ``cachemanage`` middleware is enabled in the application pipeline, - you may call ``GET /cached-images`` to see a JSON-serialized list of - mappings that show cached images, the number of cache hits on each image, - the size of the image, and the times they were last accessed. - - Alternately, you can use the ``glance-cache-manage`` program. This program - may be run from a different host than the host containing the image cache. - Example usage:: - - $> glance-cache-manage --host= list-cached - - * You can issue the following call on \*nix systems (on the host that contains - the image cache):: - - $> ls -lhR $IMAGE_CACHE_DIR - - where ``$IMAGE_CACHE_DIR`` is the value of the ``image_cache_dir`` - configuration variable. - - Note that the image's cache hit is not shown using this method. - -Manually Removing Images from the Image Cache -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If the ``cachemanage`` middleware is enabled, you may call -``DELETE /cached-images/`` to remove the image file for image -with identifier ```` from the cache. - -Alternately, you can use the ``glance-cache-manage`` program. Example usage:: - - $> glance-cache-manage --host= delete-cached-image diff --git a/doc/source/admin/controllingservers.rst b/doc/source/admin/controllingservers.rst deleted file mode 100644 index 504993b8..00000000 --- a/doc/source/admin/controllingservers.rst +++ /dev/null @@ -1,238 +0,0 @@ -.. - Copyright 2011 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _controlling-servers: - -Controlling Glance Servers -========================== - -This section describes the ways to start, stop, and reload Glance's server -programs. - -Starting a server ------------------ - -There are two ways to start a Glance server (either the API server or the -registry server): - -* Manually calling the server program - -* Using the ``glance-control`` server daemon wrapper program - -We recommend using the second method. - -Manually starting the server -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The first is by directly calling the server program, passing in command-line -options and a single argument for a ``paste.deploy`` configuration file to -use when configuring the server application. - -.. note:: - - Glance ships with an ``etc/`` directory that contains sample ``paste.deploy`` - configuration files that you can copy to a standard configuration directory and - adapt for your own uses. Specifically, bind_host must be set properly. - -If you do `not` specify a configuration file on the command line, Glance will -do its best to locate a configuration file in one of the -following directories, stopping at the first config file it finds: - -* ``$CWD`` -* ``~/.glance`` -* ``~/`` -* ``/etc/glance`` -* ``/etc`` - -The filename that is searched for depends on the server application name. So, -if you are starting up the API server, ``glance-api.conf`` is searched for, -otherwise ``glance-registry.conf``. - -If no configuration file is found, you will see an error, like:: - - $> glance-api - ERROR: Unable to locate any configuration file. Cannot load application glance-api - -Here is an example showing how you can manually start the ``glance-api`` server and ``glance-registry`` in a shell.:: - - $ sudo glance-api --config-file glance-api.conf --debug & - jsuh@mc-ats1:~$ 2011-04-13 14:50:12 DEBUG [glance-api] ******************************************************************************** - 2011-04-13 14:50:12 DEBUG [glance-api] Configuration options gathered from config file: - 2011-04-13 14:50:12 DEBUG [glance-api] /home/jsuh/glance-api.conf - 2011-04-13 14:50:12 DEBUG [glance-api] ================================================ - 2011-04-13 14:50:12 DEBUG [glance-api] bind_host 65.114.169.29 - 2011-04-13 14:50:12 DEBUG [glance-api] bind_port 9292 - 2011-04-13 14:50:12 DEBUG [glance-api] debug True - 2011-04-13 14:50:12 DEBUG [glance-api] default_store file - 2011-04-13 14:50:12 DEBUG [glance-api] filesystem_store_datadir /home/jsuh/images/ - 2011-04-13 14:50:12 DEBUG [glance-api] registry_host 65.114.169.29 - 2011-04-13 14:50:12 DEBUG [glance-api] registry_port 9191 - 2011-04-13 14:50:12 DEBUG [glance-api] ******************************************************************************** - 2011-04-13 14:50:12 DEBUG [routes.middleware] Initialized with method overriding = True, and path info altering = True - 2011-04-13 14:50:12 DEBUG [eventlet.wsgi.server] (21354) wsgi starting up on http://65.114.169.29:9292/ - - $ sudo glance-registry --config-file glance-registry.conf & - jsuh@mc-ats1:~$ 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] PRAGMA table_info("images") - 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] () - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Col ('cid', 'name', 'type', 'notnull', 'dflt_value', 'pk') - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (0, u'created_at', u'DATETIME', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (1, u'updated_at', u'DATETIME', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (2, u'deleted_at', u'DATETIME', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (3, u'deleted', u'BOOLEAN', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (4, u'id', u'INTEGER', 1, None, 1) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (5, u'name', u'VARCHAR(255)', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (6, u'disk_format', u'VARCHAR(20)', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (7, u'container_format', u'VARCHAR(20)', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (8, u'size', u'INTEGER', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (9, u'status', u'VARCHAR(30)', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (10, u'is_public', u'BOOLEAN', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (11, u'location', u'TEXT', 0, None, 0) - 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] PRAGMA table_info("image_properties") - 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] () - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Col ('cid', 'name', 'type', 'notnull', 'dflt_value', 'pk') - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (0, u'created_at', u'DATETIME', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (1, u'updated_at', u'DATETIME', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (2, u'deleted_at', u'DATETIME', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (3, u'deleted', u'BOOLEAN', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (4, u'id', u'INTEGER', 1, None, 1) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (5, u'image_id', u'INTEGER', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (6, u'key', u'VARCHAR(255)', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (7, u'value', u'TEXT', 0, None, 0) - - $ ps aux | grep glance - root 20009 0.7 0.1 12744 9148 pts/1 S 12:47 0:00 /usr/bin/python /usr/bin/glance-api glance-api.conf --debug - root 20012 2.0 0.1 25188 13356 pts/1 S 12:47 0:00 /usr/bin/python /usr/bin/glance-registry glance-registry.conf - jsuh 20017 0.0 0.0 3368 744 pts/1 S+ 12:47 0:00 grep glance - -Simply supply the configuration file as the parameter to the ``--config-file`` option -(the ``etc/glance-api.conf`` and ``etc/glance-registry.conf`` sample configuration -files were used in the above example) and then any other options -you want to use. (``--debug`` was used above to show some of the debugging -output that the server shows when starting up. Call the server program -with ``--help`` to see all available options you can specify on the -command line.) - -For more information on configuring the server via the ``paste.deploy`` -configuration files, see the section entitled -:ref:`Configuring Glance servers ` - -Note that the server `daemonizes` itself by using the standard -shell backgrounding indicator, ``&``, in the previous example. For most use cases, we recommend -using the ``glance-control`` server daemon wrapper for daemonizing. See below -for more details on daemonization with ``glance-control``. - -Using the ``glance-control`` program to start the server -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The second way to start up a Glance server is to use the ``glance-control`` -program. ``glance-control`` is a wrapper script that allows the user to -start, stop, restart, and reload the other Glance server programs in -a fashion that is more conducive to automation and scripting. - -Servers started via the ``glance-control`` program are always `daemonized`, -meaning that the server program process runs in the background. - -To start a Glance server with ``glance-control``, simply call -``glance-control`` with a server and the word "start", followed by -any command-line options you wish to provide. Start the server with ``glance-control`` -in the following way:: - - $> sudo glance-control [OPTIONS] start [CONFPATH] - -.. note:: - - You must use the ``sudo`` program to run ``glance-control`` currently, as the - pid files for the server programs are written to /var/run/glance/ - -Here is an example that shows how to start the ``glance-registry`` server -with the ``glance-control`` wrapper script. :: - - - $ sudo glance-control api start glance-api.conf - Starting glance-api with /home/jsuh/glance.conf - - $ sudo glance-control registry start glance-registry.conf - Starting glance-registry with /home/jsuh/glance.conf - - $ ps aux | grep glance - root 20038 4.0 0.1 12728 9116 ? Ss 12:51 0:00 /usr/bin/python /usr/bin/glance-api /home/jsuh/glance-api.conf - root 20039 6.0 0.1 25188 13356 ? Ss 12:51 0:00 /usr/bin/python /usr/bin/glance-registry /home/jsuh/glance-registry.conf - jsuh 20042 0.0 0.0 3368 744 pts/1 S+ 12:51 0:00 grep glance - - -The same configuration files are used by ``glance-control`` to start the -Glance server programs, and you can specify (as the example above shows) -a configuration file when starting the server. - - -In order for your launched glance service to be monitored for unexpected death -and respawned if necessary, use the following option: - - - $ sudo glance-control [service] start --respawn ... - - -Note that this will cause ``glance-control`` itself to remain running. Also note -that deliberately stopped services are not respawned, neither are rapidly bouncing -services (where process death occurred within one second of the last launch). - - -By default, output from glance services is discarded when launched with ``glance-control``. -In order to capture such output via syslog, use the following option: - - - $ sudo glance-control --capture-output ... - - -Stopping a server ------------------ - -If you started a Glance server manually and did not use the ``&`` backgrounding -function, simply send a terminate signal to the server process by typing -``Ctrl-C`` - -If you started the Glance server using the ``glance-control`` program, you can -use the ``glance-control`` program to stop it. Simply do the following:: - - $> sudo glance-control stop - -as this example shows:: - - $> sudo glance-control registry stop - Stopping glance-registry pid: 17602 signal: 15 - -Restarting a server -------------------- - -You can restart a server with the ``glance-control`` program, as demonstrated -here:: - - $> sudo glance-control registry restart etc/glance-registry.conf - Stopping glance-registry pid: 17611 signal: 15 - Starting glance-registry with /home/jpipes/repos/glance/trunk/etc/glance-registry.conf - -Reloading a server -------------------- - -You can reload a server with the ``glance-control`` program, as demonstrated -here:: - - $> sudo glance-control api reload - Reloading glance-api (pid 18506) with signal(1) - -A reload sends a SIGHUP signal to the master process and causes new configuration -settings to be picked up without any interruption to the running service (provided -neither bind_host or bind_port has changed). diff --git a/doc/source/admin/db-sqlalchemy-migrate.rst b/doc/source/admin/db-sqlalchemy-migrate.rst deleted file mode 100644 index 6f5dea66..00000000 --- a/doc/source/admin/db-sqlalchemy-migrate.rst +++ /dev/null @@ -1,67 +0,0 @@ -.. - Copyright 2012 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _legacy-database-management: - -Legacy Database Management -========================== - -.. note:: - This page applies only to Glance releases prior to Ocata. From Ocata - onward, please see :ref:`database-management`. - -The default metadata driver for Glance uses sqlalchemy, which implies there -exists a backend database which must be managed. The ``glance-manage`` binary -provides a set of commands for making this easier. - -The commands should be executed as a subcommand of 'db': - - glance-manage db - - -Sync the Database ------------------ - - glance-manage db sync - -Place a database under migration control and upgrade, creating it first if necessary. - - -Determining the Database Version --------------------------------- - - glance-manage db version - -This will print the current migration level of a Glance database. - - -Upgrading an Existing Database ------------------------------- - - glance-manage db upgrade - -This will take an existing database and upgrade it to the specified VERSION. - - -Downgrading an Existing Database --------------------------------- - -Upgrades involve complex operations and can fail. Before attempting any -upgrade, you should make a full database backup of your production data. As of -Kilo, database downgrades are not supported, and the only method available to -get back to a prior database version is to restore from backup[1]. - -[1]: http://docs.openstack.org/ops-guide/ops-upgrades.html#perform-a-backup diff --git a/doc/source/admin/db.rst b/doc/source/admin/db.rst deleted file mode 100644 index 4dcebab0..00000000 --- a/doc/source/admin/db.rst +++ /dev/null @@ -1,249 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _database-management: - -Database Management -=================== - -The default metadata driver for Glance uses `SQLAlchemy`_, which implies there -exists a backend database which must be managed. The ``glance-manage`` binary -provides a set of commands for making this easier. - -The commands should be executed as a subcommand of 'db': - - glance-manage db - -.. note:: - In the Ocata release (14.0.0), the database migration engine was changed - from *SQLAlchemy Migrate* to *Alembic*. This necessitated some changes in - the ``glance-manage`` tool. While the user interface has been kept as - similar as possible, the ``glance-manage`` tool included with the Ocata and - more recent releases is incompatible with the "legacy" tool. If you are - consulting these documents for information about the ``glance-manage`` tool - in the Newton or earlier releases, please see the - :ref:`legacy-database-management` page. - -.. _`SQLAlchemy`: http://www.sqlalchemy.org/ - - -Migration Scripts ------------------ - -The migration scripts are stored in the directory: -``glance/db/sqlalchemy/alembic_migrations/versions`` - -As mentioned above, these scripts utilize the Alembic migration engine, which -was first introduced in the Ocata release. All database migrations up through -the Liberty release are consolidated into one Alembic migration script named -``liberty_initial``. Mitaka migrations are retained, but have been rewritten -for Alembic and named using the new naming convention. - -A fresh Glance installation will apply the following -migrations: - -* ``liberty-initial`` -* ``mitaka01`` -* ``mitaka02`` -* ``ocata01`` - -.. note:: - - The "old-style" migration scripts have been retained in their `current - directory`_ in the Ocata release so that interested operators can correlate - them with the new migrations. This directory will be removed in future - releases. - - In particular, the "old-style" script for the Ocata migration, - `045_add_visibility.py`_ is retained for operators who are conversant in - SQLAlchemy Migrate and are interested in comparing it with a "new-style" - Alembic migration script. The Alembic script, which is the one actually - used to do the upgrade to Ocata, is - `ocata01_add_visibility_remove_is_public.py`_. - -.. _`current directory`: http://git.openstack.org/cgit/openstack/glance/tree/glance/db/sqlalchemy/migrate_repo/versions?h=stable/ocata -.. _`045_add_visibility.py`: http://git.openstack.org/cgit/openstack/glance/tree/glance/db/sqlalchemy/migrate_repo/versions/045_add_visibility.py?h=stable/ocata -.. _`ocata01_add_visibility_remove_is_public.py`: http://git.openstack.org/cgit/openstack/glance/tree/glance/db/sqlalchemy/alembic_migrations/versions/ocata01_add_visibility_remove_is_public.py?h=stable/ocata - -Sync the Database ------------------ - - glance-manage db sync [VERSION] - -Place an existing database under migration control and upgrade it to the -specified VERSION or to the latest migration level if VERSION is not specified. - -.. note:: - - Prior to Ocata release the database version was a numeric value. For - example: for the Newton release, the latest migration level was ``44``. - Starting with Ocata, database version is a revision name corresponding to - the latest migration included in the release. For the Ocata release, there - is only one database migration and it is identified by revision - ``ocata01``. So, the database version for Ocata release is ``ocata01``. - - This naming convention will change slightly with the introduction of - zero-downtime upgrades, which is EXPERIMENTAL in Ocata, but is projected to - be the official upgrade method beginning with the Pike release. See - :ref:`zero-downtime` below for more information. - - -Determining the Database Version --------------------------------- - - glance-manage db version - -This will print the current migration level of a Glance database. - - -Upgrading an Existing Database ------------------------------- - - glance-manage db upgrade [VERSION] - -This will take an existing database and upgrade it to the specified VERSION. - -.. _downgrades: - -Downgrading an Existing Database --------------------------------- - -Upgrades involve complex operations and can fail. Before attempting any -upgrade, you should make a full database backup of your production data. As of -Kilo, database downgrades are not supported, and the only method available to -get back to a prior database version is to restore from backup [1]. - -[1]: http://docs.openstack.org/ops-guide/ops-upgrades.html#perform-a-backup - - -.. _zero-downtime: - -Zero-Downtime Database Upgrades -=============================== - -.. warning:: - This feature is EXPERIMENTAL in the Ocata release. We encourage operators - to try it out, but its use in production environments is currently NOT - SUPPORTED. - -A zero-downtime database upgrade enables true rolling upgrades of the Glance -nodes in your cloud's control plane. At the appropriate point in the upgrade, -you can have a mixed deployment of release *n* (for example, Ocata) and release -*n-1* (for example, Newton) Glance nodes, take the *n-1* release nodes out of -rotation, allow them to drain, and then take them out of service permanently, -leaving all Glance nodes in your cloud at release *n*. - -That's a rough sketch of how a rolling upgrade would work. For full details, -see :ref:`rolling-upgrades`. - -.. note:: - Downgrading a database is not supported. See :ref:`downgrades` for more - information. - -The Expand-Migrate-Contract Cycle ---------------------------------- - -For Glance, a zero-downtime database upgrade has three phases: - -1. **Expand**: in this phase, new columns, tables, indexes, or triggers are - added to the database. - -2. **Migrate**: in this phase, data is migrated to the new columns or tables. - -3. **Contract**: in this phase, the "old" tables or columns (and any database - triggers used during the migration), which are no longer in use, are removed - from the database. - -The above phases are abbreviated as an **E-M-C** database upgrade. - -New Database Version Identifiers --------------------------------- - -In order to perform zero-downtime upgrades, the version identifier of a -database becomes more complicated since it must reflect knowledge of what point -in the E-M-C cycle the upgrade has reached. To make this evident, the -identifier explicitly contains 'expand' or 'contract' as part of its name. - -Thus the ``ocata01`` migration (that is, the migration that's currently used in -the fully supported upgrade path) has two identifiers associated with it for -zero-downtime upgrades: ``ocata_expand01`` and ``ocata_contract01``. - -During the upgrade process, the database is initially marked with -``ocata_expand01``. Eventually, after completing the full upgrade process, the -database will be marked with ``ocata_contract01``. So, instead of one database -version, an operator will see a composite database version that will have both -expand and contract versions. A database will be considered at Ocata version -only when both expand and contract revisions are at the latest revisions. For -a successful Ocata zero-downtime upgrade, for example, the database will be -marked with both ``ocata_expand01``, ``ocata_contract01``. - -In the case in which there are multiple changes in a cycle, the database -version record would go through the following progression: - -+-------+--------------------------------------+-------------------------+ -| stage | database identifier | comment | -+=======+======================================+=========================+ -| E | ``bexar_expand01`` | upgrade begins | -+-------+--------------------------------------+-------------------------+ -| E | ``bexar_expand02`` | | -+-------+--------------------------------------+-------------------------+ -| E | ``bexar_expand03`` | | -+-------+--------------------------------------+-------------------------+ -| M | ``bexar_expand03`` | bexar_migrate01 occurs | -+-------+--------------------------------------+-------------------------+ -| M | ``bexar_expand03`` | bexar_migrate02 occurs | -+-------+--------------------------------------+-------------------------+ -| M | ``bexar_expand03`` | bexar_migrate03 occurs | -+-------+--------------------------------------+-------------------------+ -| C | ``bexar_expand03, bexar_contract01`` | | -+-------+--------------------------------------+-------------------------+ -| C | ``bexar_expand03, bexar_contract02`` | | -+-------+--------------------------------------+-------------------------+ -| C | ``bexar_expand03, bexar_contract03`` | upgrade completed | -+-------+--------------------------------------+-------------------------+ - -Database Upgrade ----------------- - -In order to enable the E-M-C database upgrade cycle, and to enable Glance -rolling upgrades, the ``glance-manage`` tool has been augmented to include the -following operations. - -Expanding the Database ----------------------- - - glance-manage db expand - -This will run the expansion phase of a rolling upgrade process. Database -expansion should be run as the first step in the rolling upgrade process before -any new services are started. - - -Migrating the Data ------------------- - - glance-manage db migrate - -This will run the data migrate phase of a rolling upgrade process. Database -migration should be run after database expansion but before any new services -are started. - - -Contracting the Database ------------------------- - - glance-manage db contract - -This will run the contraction phase of a rolling upgrade process. -Database contraction should be run as the last step of the rolling upgrade -process after all old services are upgraded to new ones. diff --git a/doc/source/admin/flows.rst b/doc/source/admin/flows.rst deleted file mode 100644 index 85f75c23..00000000 --- a/doc/source/admin/flows.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. - Copyright 2015 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Glance Flow Plugins -=================== - -Flows ------ - -.. list-plugins:: glance.flows - :detailed: - -Import Flows ------------- - -.. list-plugins:: glance.flows.import - :detailed: diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst deleted file mode 100644 index 93834a8e..00000000 --- a/doc/source/admin/index.rst +++ /dev/null @@ -1,23 +0,0 @@ -====================== - Administration guide -====================== - -.. toctree:: - :maxdepth: 2 - - authentication - cache - policies - property-protections - requirements - apache-httpd - notifications - tasks - controllingservers - flows - db - db-sqlalchemy-migrate - rollingupgrades - troubleshooting - manage-images - requirements diff --git a/doc/source/admin/manage-images.rst b/doc/source/admin/manage-images.rst deleted file mode 100644 index 6e44a9ff..00000000 --- a/doc/source/admin/manage-images.rst +++ /dev/null @@ -1,282 +0,0 @@ -============= -Manage images -============= - -The cloud operator assigns roles to users. Roles determine who can -upload and manage images. The operator might restrict image upload and -management to only cloud administrators or operators. - -You can upload images through the :command:`openstack image create` -command or the Image service API. You can use the ``openstack`` client -for the image management. It provides mechanisms to list and -delete images, set and delete image metadata, and create images of a -running instance or snapshot and backup types. - -After you upload an image, you cannot change it. - -For details about image creation, see the `Virtual Machine Image -Guide `__. - -List or get details for images (glance) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To get a list of images and to get further details about a single -image, use :command:`openstack image list` and :command:`openstack image show` -commands. - -.. code-block:: console - - $ openstack image list - +--------------------------------------+---------------------------------+--------+ - | ID | Name | Status | - +--------------------------------------+---------------------------------+--------+ - | dfc1dfb0-d7bf-4fff-8994-319dd6f703d7 | cirros-0.3.5-x86_64-uec | active | - | a3867e29-c7a1-44b0-9e7f-10db587cad20 | cirros-0.3.5-x86_64-uec-kernel | active | - | 4b916fba-6775-4092-92df-f41df7246a6b | cirros-0.3.5-x86_64-uec-ramdisk | active | - | d07831df-edc3-4817-9881-89141f9134c3 | myCirrosImage | active | - +--------------------------------------+---------------------------------+--------+ -.. code-block:: console - - $ openstack image show myCirrosImage - +------------------+------------------------------------------------------+ - | Field | Value | - +------------------+------------------------------------------------------+ - | checksum | ee1eca47dc88f4879d8a229cc70a07c6 | - | container_format | ami | - | created_at | 2016-08-11T15:07:26Z | - | disk_format | ami | - | file | /v2/images/d07831df-edc3-4817-9881-89141f9134c3/file | - | id | d07831df-edc3-4817-9881-89141f9134c3 | - | min_disk | 0 | - | min_ram | 0 | - | name | myCirrosImage | - | owner | d88310717a8e4ebcae84ed075f82c51e | - | protected | False | - | schema | /v2/schemas/image | - | size | 13287936 | - | status | active | - | tags | | - | updated_at | 2016-08-11T15:20:02Z | - | virtual_size | None | - | visibility | private | - +------------------+------------------------------------------------------+ - -When viewing a list of images, you can also use ``grep`` to filter the -list, as follows: - -.. code-block:: console - - $ openstack image list | grep 'cirros' - | dfc1dfb0-d7bf-4fff-8994-319dd6f703d7 | cirros-0.3.5-x86_64-uec | active | - | a3867e29-c7a1-44b0-9e7f-10db587cad20 | cirros-0.3.5-x86_64-uec-kernel | active | - | 4b916fba-6775-4092-92df-f41df7246a6b | cirros-0.3.5-x86_64-uec-ramdisk | active | - -.. note:: - - To store location metadata for images, which enables direct file access for a client, - update the ``/etc/glance/glance-api.conf`` file with the following statements: - - * ``show_multiple_locations = True`` - - * ``filesystem_store_metadata_file = filePath`` - - where filePath points to a JSON file that defines the mount point for OpenStack - images on your system and a unique ID. For example: - - .. code-block:: json - - [{ - "id": "2d9bb53f-70ea-4066-a68b-67960eaae673", - "mountpoint": "/var/lib/glance/images/" - }] - - After you restart the Image service, you can use the following syntax to view - the image's location information: - - .. code-block:: console - - $ openstack --os-image-api-version 2 image show imageID - - For example, using the image ID shown above, you would issue the command as follows: - - .. code-block:: console - - $ openstack --os-image-api-version 2 image show 2d9bb53f-70ea-4066-a68b-67960eaae673 - -Create or update an image (glance) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To create an image, use :command:`openstack image create`: - -.. code-block:: console - - $ openstack image create imageName - -To update an image by name or ID, use :command:`openstack image set`: - -.. code-block:: console - - $ openstack image set imageName - -The following list explains the optional arguments that you can use with -the ``create`` and ``set`` commands to modify image properties. For -more information, refer to the `OpenStack Image command reference -`_. - -The following example shows the command that you would use to upload a -CentOS 6.3 image in qcow2 format and configure it for public access: - -.. code-block:: console - - $ openstack image create --disk-format qcow2 --container-format bare \ - --public --file ./centos63.qcow2 centos63-image - -The following example shows how to update an existing image with a -properties that describe the disk bus, the CD-ROM bus, and the VIF -model: - -.. note:: - - When you use OpenStack with VMware vCenter Server, you need to specify - the ``vmware_disktype`` and ``vmware_adaptertype`` properties with - :command:`openstack image create`. - Also, we recommend that you set the ``hypervisor_type="vmware"`` property. - For more information, see `Images with VMware vSphere - `_ - in the OpenStack Configuration Reference. - -.. code-block:: console - - $ openstack image set \ - --property hw_disk_bus=scsi \ - --property hw_cdrom_bus=ide \ - --property hw_vif_model=e1000 \ - f16-x86_64-openstack-sda - -Currently the libvirt virtualization tool determines the disk, CD-ROM, -and VIF device models based on the configured hypervisor type -(``libvirt_type`` in ``/etc/nova/nova.conf`` file). For the sake of optimal -performance, libvirt defaults to using virtio for both disk and VIF -(NIC) models. The disadvantage of this approach is that it is not -possible to run operating systems that lack virtio drivers, for example, -BSD, Solaris, and older versions of Linux and Windows. - -If you specify a disk or CD-ROM bus model that is not supported, see -the Disk_and_CD-ROM_bus_model_values_table_. -If you specify a VIF model that is not supported, the instance fails to -launch. See the VIF_model_values_table_. - -The valid model values depend on the ``libvirt_type`` setting, as shown -in the following tables. - -.. _Disk_and_CD-ROM_bus_model_values_table: - -**Disk and CD-ROM bus model values** - -+-------------------------+--------------------------+ -| libvirt\_type setting | Supported model values | -+=========================+==========================+ -| qemu or kvm | * ide | -| | | -| | * scsi | -| | | -| | * virtio | -+-------------------------+--------------------------+ -| xen | * ide | -| | | -| | * xen | -+-------------------------+--------------------------+ - - -.. _VIF_model_values_table: - -**VIF model values** - -+-------------------------+--------------------------+ -| libvirt\_type setting | Supported model values | -+=========================+==========================+ -| qemu or kvm | * e1000 | -| | | -| | * ne2k\_pci | -| | | -| | * pcnet | -| | | -| | * rtl8139 | -| | | -| | * virtio | -+-------------------------+--------------------------+ -| xen | * e1000 | -| | | -| | * netfront | -| | | -| | * ne2k\_pci | -| | | -| | * pcnet | -| | | -| | * rtl8139 | -+-------------------------+--------------------------+ -| vmware | * VirtualE1000 | -| | | -| | * VirtualPCNet32 | -| | | -| | * VirtualVmxnet | -+-------------------------+--------------------------+ - -.. note:: - - By default, hardware properties are retrieved from the image - properties. However, if this information is not available, the - ``libosinfo`` database provides an alternative source for these - values. - - If the guest operating system is not in the database, or if the use - of ``libosinfo`` is disabled, the default system values are used. - - Users can set the operating system ID or a ``short-id`` in image - properties. For example: - - .. code-block:: console - - $ openstack image set --property short-id=fedora23 \ - name-of-my-fedora-image - - Alternatively, users can set ``id`` to a URL: - - .. code-block:: console - - $ openstack image set \ - --property id=http://fedoraproject.org/fedora/23 \ - ID-of-my-fedora-image - -Create an image from ISO image ------------------------------- - -You can upload ISO images to the Image service (glance). -You can subsequently boot an ISO image using Compute. - -In the Image service, run the following command: - -.. code-block:: console - - $ openstack image create ISO_IMAGE --file IMAGE.iso \ - --disk-format iso --container-format bare - -Optionally, to confirm the upload in Image service, run: - -.. code-block:: console - - $ openstack image list - -Troubleshoot image creation -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you encounter problems in creating an image in the Image service or -Compute, the following information may help you troubleshoot the -creation process. - -* Ensure that the version of qemu you are using is version 0.14 or - later. Earlier versions of qemu result in an ``unknown option -s`` - error message in the ``/var/log/nova/nova-compute.log`` file. - -* Examine the ``/var/log/nova/nova-api.log`` and - ``/var/log/nova/nova-compute.log`` log files for error messages. diff --git a/doc/source/admin/notifications.rst b/doc/source/admin/notifications.rst deleted file mode 100644 index cd226949..00000000 --- a/doc/source/admin/notifications.rst +++ /dev/null @@ -1,216 +0,0 @@ -.. - Copyright 2011-2013 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _notifications: - -Notifications -============= - -Notifications can be generated for several events in the image lifecycle. -These can be used for auditing, troubleshooting, etc. - -Notification Drivers --------------------- - -* log - - This driver uses the standard Python logging infrastructure with - the notifications ending up in file specified by the log_file - configuration directive. - -* messaging - - This strategy sends notifications to a message queue configured - using oslo.messaging configuration options. - -* noop - - This strategy produces no notifications. It is the default strategy. - -Notification Types ------------------- - -* ``image.create`` - - Emitted when an image record is created in Glance. Image record creation is - independent of image data upload. - -* ``image.prepare`` - - Emitted when Glance begins uploading image data to its store. - -* ``image.upload`` - - Emitted when Glance has completed the upload of image data to its store. - -* ``image.activate`` - - Emitted when an image goes to `active` status. This occurs when Glance - knows where the image data is located. - -* ``image.send`` - - Emitted upon completion of an image being sent to a consumer. - -* ``image.update`` - - Emitted when an image record is updated in Glance. - -* ``image.delete`` - - Emitted when an image deleted from Glance. - -* ``task.run`` - - Emitted when a task is picked up by the executor to be run. - -* ``task.processing`` - - Emitted when a task is sent over to the executor to begin processing. - -* ``task.success`` - - Emitted when a task is successfully completed. - -* ``task.failure`` - - Emitted when a task fails. - -Content -------- - -Every message contains a handful of attributes. - -* message_id - - UUID identifying the message. - -* publisher_id - - The hostname of the glance instance that generated the message. - -* event_type - - Event that generated the message. - -* priority - - One of WARN, INFO or ERROR. - -* timestamp - - UTC timestamp of when event was generated. - -* payload - - Data specific to the event type. - -Payload -------- - -* image.send - - The payload for INFO, WARN, and ERROR events contain the following: - - image_id - ID of the image (UUID) - owner_id - Tenant or User ID that owns this image (string) - receiver_tenant_id - Tenant ID of the account receiving the image (string) - receiver_user_id - User ID of the account receiving the image (string) - destination_ip - The receiver's IP address to which the image was sent (string) - bytes_sent - The number of bytes actually sent - -* image.create - - For INFO events, it is the image metadata. - WARN and ERROR events contain a text message in the payload. - -* image.prepare - - For INFO events, it is the image metadata. - WARN and ERROR events contain a text message in the payload. - -* image.upload - - For INFO events, it is the image metadata. - WARN and ERROR events contain a text message in the payload. - -* image.activate - - For INFO events, it is the image metadata. - WARN and ERROR events contain a text message in the payload. - -* image.update - - For INFO events, it is the image metadata. - WARN and ERROR events contain a text message in the payload. - -* image.delete - - For INFO events, it is the image id. - WARN and ERROR events contain a text message in the payload. - -* task.run - - The payload for INFO, WARN, and ERROR events contain the following: - - task_id - ID of the task (UUID) - owner - Tenant or User ID that created this task (string) - task_type - Type of the task. Example, task_type is "import". (string) - status, - status of the task. Status can be "pending", "processing", - "success" or "failure". (string) - task_input - Input provided by the user when attempting to create a task. (dict) - result - Resulting output from a successful task. (dict) - message - Message shown in the task if it fails. None if task succeeds. (string) - expires_at - UTC time at which the task would not be visible to the user. (string) - created_at - UTC time at which the task was created. (string) - updated_at - UTC time at which the task was latest updated. (string) - - The exceptions are:- - For INFO events, it is the task dict with result and message as None. - WARN and ERROR events contain a text message in the payload. - -* task.processing - - For INFO events, it is the task dict with result and message as None. - WARN and ERROR events contain a text message in the payload. - -* task.success - - For INFO events, it is the task dict with message as None and result is a - dict. - WARN and ERROR events contain a text message in the payload. - -* task.failure - - For INFO events, it is the task dict with result as None and message is - text. - WARN and ERROR events contain a text message in the payload. diff --git a/doc/source/admin/policies.rst b/doc/source/admin/policies.rst deleted file mode 100644 index ccbb6d3c..00000000 --- a/doc/source/admin/policies.rst +++ /dev/null @@ -1,198 +0,0 @@ -.. - Copyright 2012 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Policies -======== - -Glance's public API calls may be restricted to certain sets of users using a -policy configuration file. This document explains exactly how policies are -configured and what they apply to. - -A policy is composed of a set of rules that are used by the policy "Brain" in -determining if a particular action may be performed by the authorized tenant. - -Constructing a Policy Configuration File ----------------------------------------- - -A policy configuration file is a simply JSON object that contain sets of -rules. Each top-level key is the name of a rule. Each rule -is a string that describes an action that may be performed in the Glance API. - -The actions that may have a rule enforced on them are: - -* ``get_images`` - List available image entities - - * ``GET /v1/images`` - * ``GET /v1/images/detail`` - * ``GET /v2/images`` - -* ``get_image`` - Retrieve a specific image entity - - * ``HEAD /v1/images/`` - * ``GET /v1/images/`` - * ``GET /v2/images/`` - -* ``download_image`` - Download binary image data - - * ``GET /v1/images/`` - * ``GET /v2/images//file`` - -* ``upload_image`` - Upload binary image data - - * ``POST /v1/images`` - * ``PUT /v1/images/`` - * ``PUT /v2/images//file`` - -* ``copy_from`` - Copy binary image data from URL - - * ``POST /v1/images`` - * ``PUT /v1/images/`` - -* ``add_image`` - Create an image entity - - * ``POST /v1/images`` - * ``POST /v2/images`` - -* ``modify_image`` - Update an image entity - - * ``PUT /v1/images/`` - * ``PUT /v2/images/`` - -* ``publicize_image`` - Create or update public images - - * ``POST /v1/images`` with attribute ``is_public`` = ``true`` - * ``PUT /v1/images/`` with attribute ``is_public`` = ``true`` - * ``POST /v2/images`` with attribute ``visibility`` = ``public`` - * ``PUT /v2/images/`` with attribute ``visibility`` = ``public`` - -* ``communitize_image`` - Create or update community images - - * ``POST /v2/images`` with attribute ``visibility`` = ``community`` - * ``PUT /v2/images/`` with attribute ``visibility`` = ``community`` - -* ``delete_image`` - Delete an image entity and associated binary data - - * ``DELETE /v1/images/`` - * ``DELETE /v2/images/`` - -* ``add_member`` - Add a membership to the member repo of an image - - * ``POST /v2/images//members`` - -* ``get_members`` - List the members of an image - - * ``GET /v1/images//members`` - * ``GET /v2/images//members`` - -* ``delete_member`` - Delete a membership of an image - - * ``DELETE /v1/images//members/`` - * ``DELETE /v2/images//members/`` - -* ``modify_member`` - Create or update the membership of an image - - * ``PUT /v1/images//members/`` - * ``PUT /v1/images//members`` - * ``POST /v2/images//members`` - * ``PUT /v2/images//members/`` - -* ``manage_image_cache`` - Allowed to use the image cache management API - - -To limit an action to a particular role or roles, you list the roles like so :: - - { - "delete_image": ["role:admin", "role:superuser"] - } - -The above would add a rule that only allowed users that had roles of either -"admin" or "superuser" to delete an image. - -Writing Rules -------------- - -Role checks are going to continue to work exactly as they already do. If the -role defined in the check is one that the user holds, then that will pass, -e.g., ``role:admin``. - -To write a generic rule, you need to know that there are three values provided -by Glance that can be used in a rule on the left side of the colon (``:``). -Those values are the current user's credentials in the form of: - -- role -- tenant -- owner - -The left side of the colon can also contain any value that Python can -understand, e.g.,: - -- ``True`` -- ``False`` -- ``"a string"`` -- &c. - -Using ``tenant`` and ``owner`` will only work with images. Consider the -following rule:: - - tenant:%(owner)s - -This will use the ``tenant`` value of the currently authenticated user. It -will also use ``owner`` from the image it is acting upon. If those two -values are equivalent the check will pass. All attributes on an image (as well -as extra image properties) are available for use on the right side of the -colon. The most useful are the following: - -- ``owner`` -- ``protected`` -- ``is_public`` - -Therefore, you could construct a set of rules like the following:: - - { - "not_protected": "False:%(protected)s", - "is_owner": "tenant:%(owner)s", - "is_owner_or_admin": "rule:is_owner or role:admin", - "not_protected_and_is_owner": "rule:not_protected and rule:is_owner", - - "get_image": "rule:is_owner_or_admin", - "delete_image": "rule:not_protected_and_is_owner", - "add_member": "rule:not_protected_and_is_owner" - } - -Examples --------- - -Example 1. (The default policy configuration) - - :: - - { - "default": "" - } - -Note that an empty JSON list means that all methods of the -Glance API are callable by anyone. - -Example 2. Disallow modification calls to non-admins - - :: - - { - "default": "", - "add_image": "role:admin", - "modify_image": "role:admin", - "delete_image": "role:admin" - } diff --git a/doc/source/admin/property-protections.rst b/doc/source/admin/property-protections.rst deleted file mode 100644 index 923142b2..00000000 --- a/doc/source/admin/property-protections.rst +++ /dev/null @@ -1,151 +0,0 @@ -.. - Copyright 2013 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _property-protections: - -Property Protections -==================== - -There are two types of image properties in Glance: - -* Core Properties, as specified by the image schema. - -* Meta Properties, which are arbitrary key/value pairs that can be added to an - image. - -Access to meta properties through Glance's public API calls may be -restricted to certain sets of users, using a property protections configuration -file. - -This document explains exactly how property protections are configured and what -they apply to. - - -Constructing a Property Protections Configuration File ------------------------------------------------------- - -A property protections configuration file follows the format of the Glance API -configuration file, which consists of sections, led by a ``[section]`` header -and followed by ``name = value`` entries. Each section header is a regular -expression matching a set of properties to be protected. - -.. note:: - - Section headers must compile to a valid regular expression, otherwise - glance api service will not start. Regular expressions - will be handled by python's re module which is PERL like. - -Each section describes four key-value pairs, where the key is one of -``create/read/update/delete``, and the value is a comma separated list of user -roles that are permitted to perform that operation in the Glance API. **If any of -the keys are not specified, then the glance api service will not start -successfully.** - -In the list of user roles, ``@`` means all roles and ``!`` means no role. -**If both @ and ! are specified for the same rule then the glance api service -will not start** - -.. note:: - - Only one policy rule is allowed per property operation. **If multiple are - specified, then the glance api service will not start.** - -The path to the file should be specified in the ``[DEFAULT]`` section of -``glance-api.conf`` as follows. - - :: - - property_protection_file=/path/to/file - -If this config value is not specified, property protections are not enforced. -**If the path is invalid, glance api service will not start successfully.** - -The file may use either roles or policies to describe the property protections. -The config value should be specified in the ``[DEFAULT]`` section of -``glance-api.conf`` as follows. - - :: - - property_protection_rule_format= - -The default value for ``property_protection_rule_format`` is ``roles``. - -Property protections are applied in the order specified in the configuration -file. This means that if for example you specify a section with ``[.*]`` at -the top of the file, all proceeding sections will be ignored. - -If a property does not match any of the given rules, all operations will be -disabled for all roles. - -If an operation is misspelled or omitted, that operation will be disabled for -all roles. - -Disallowing ``read`` operations will also disallow ``update/delete`` operations. - -A successful HTTP request will return status ``200 OK``. If the user is not -permitted to perform the requested action, ``403 Forbidden`` will be returned. - -V1 API X-glance-registry-Purge-props ------------------------------------- - -Property protections will still be honoured if -``X-glance-registry-Purge-props`` is set to ``True``. That is, if you request -to modify properties with this header set to ``True``, you will not be able to -delete or update properties for which you do not have the relevant permissions. -Properties which are not included in the request and for which you do have -delete permissions will still be removed. - -Examples --------- - -**Example 1**. Limit all property interactions to admin only. - - :: - - [.*] - create = admin - read = admin - update = admin - delete = admin - -**Example 2**. Allow both admins and users with the billing role to read -and modify properties prefixed with ``x_billing_code_``. Allow admins to -read and modify any properties. - - :: - - [^x_billing_code_.*] - create = admin,billing - read = admin, billing - update = admin,billing - delete = admin,billing - - [.*] - create = admin - read = admin - update = admin - delete = admin - -**Example 3**. Limit all property interactions to admin only using policy -rule context_is_admin defined in policy.json. - - :: - - [.*] - create = context_is_admin - read = context_is_admin - update = context_is_admin - delete = context_is_admin diff --git a/doc/source/admin/requirements.rst b/doc/source/admin/requirements.rst deleted file mode 100644 index 4ac2d350..00000000 --- a/doc/source/admin/requirements.rst +++ /dev/null @@ -1,81 +0,0 @@ -.. - Copyright 2016-present OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Requirements -============ - - -External Requirements Affecting Glance -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Like other OpenStack projects, Glance uses some external libraries for a subset -of its features. Some examples include the ``qemu-img`` utility used by the -tasks feature, ``sendfile`` to utilize the "zero-copy" way of copying data -faster, ``pydev`` to debug using popular IDEs, ``python-xattr`` for Image Cache -using "xattr" driver. - -On the other hand, if ``dnspython`` is installed in the environment, Glance -provides a workaround to make it work with IPV6. - -Additionally, some libraries like ``xattr`` are not compatible when -using Glance on Windows (see :ref:`the documentation on config options -affecting the Image Cache `). - - -Guideline to include your requirement in the requirements.txt file -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -As described above, we don't include all the possible requirements needed by -Glance features in the source tree requirements file. So, when an operator -decides to use an **advanced feature** in Glance, we ask them to check the -documentation/guidelines for those features to set up the feature in a workable -way. In order to reduce the operator pain, the development team likes to work -with different operators to figure out when a popular feature should have its -dependencies included in the requirements file. However, there's a tradeoff in -including more of requirements in source tree as it becomes more painful for -packagers. So, it is a bit of a haggle among different stakeholders and a -judicious decision is taken by the project PTL or release liaison to determine -the outcome. - -To simplify the identification of an **advanced feature** in Glance we can -think of it as something not being used and deployed by most of the -upstream/known community members. - -To name a few features that have been identified as advanced: - -* glance tasks -* image signing -* image prefetcher -* glance db purge utility -* image locations - - -Steps to include your requirement in the requirements.txt file -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -1. First step is to propose a change against the ``openstack/requirements`` -project to include the requirement(s) as a part of ``global-requirements`` and -``upper-constraints`` files. - -2. If your requirement is not a part of the project, you will have to propose a -change adding that requirement to the requirements.txt file in Glance. Please -include a ``Depends-On: `` flag in the commit message, where the -``ChangeID`` is the gerrit ID of corresponding change against -``openstack/requirements`` project. - -3. A sync bot then syncs the global requirements into project requirements on a -regular basis, so any updates to the requirements are synchronized on a timely -basis. diff --git a/doc/source/admin/rollingupgrades.rst b/doc/source/admin/rollingupgrades.rst deleted file mode 100644 index 92008d73..00000000 --- a/doc/source/admin/rollingupgrades.rst +++ /dev/null @@ -1,112 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _rolling-upgrades: - -Rolling Upgrades -================ - -.. note:: The Rolling Upgrades feature is EXPERIMENTAL and its use in - production systems is currently **not supported**. - -Scope of this document ----------------------- - -This page describes one way to perform a rolling upgrade from Newton to Ocata -for a particular configuration of Glance services. There may be other ways to -perform a rolling upgrade from Newton to Ocata for other configurations of -Glance services, but those are beyond the scope of this document. For the -experimental rollout of rolling upgrades, we describe only the following -simple case. - -Prerequisites -------------- - -* MySQL/MariaDB 5.5 or later - -* Glance running Images API v2 only - -* Glance not using the Glance Registry - -* Multiple Glance nodes - -* A load balancer or some other type of redirection device is being used - in front of the Glance nodes in such a way that a node can be dropped - out of rotation, that is, that Glance node continues running the Glance - service but is no longer having requests routed to it - -Procedure ---------- - -Following is the process to upgrade Glance with zero downtime: - -1. Backup the Glance database. - -2. Choose an arbitrary Glance node or provision a new node to install the new - release. If an existing Glance node is chosen, gracefully stop the Glance - services. In what follows, this node will be referred to as the NEW NODE. - -.. _Stop the Glance processes gracefully: - -.. note:: - **Gracefully stopping services** - - Before stopping the Glance processes on a node, one may choose to wait until - all the existing connections drain out. This could be achieved by taking the - node out of rotation, that is, by ensuring that requests are no longer - routed to that node. This way all the requests that are currently being - processed will get a chance to finish processing. However, some Glance - requests like uploading and downloading images may last a long time. This - increases the wait time to drain out all connections and consequently the - time to upgrade Glance completely. On the other hand, stopping the Glance - services before the connections drain out will present the user with errors. - While arguably this is not downtime given that Images API requests are - continually being serviced by other nodes, this is nonetheless an unpleasant - user experience for the user whose in-flight request has terminated in an - error. Hence, an operator must be judicious when stopping the services. - -3. Upgrade the NEW NODE with new release and update the configuration - accordingly. **DO NOT** start the Glance services on the NEW NODE at - this time. - -4. Using the NEW NODE, expand the database using the command - - ``glance-manage db expand``. - -5. Then, also on the NEW NODE, perform the data migrations using the command - - ``glance-manage db migrate``. - - *The data migrations must be completed before you proceed to the next step.* - -6. Start the Glance processes on the NEW NODE. It is now ready to receive - traffic from the load balancer. - -7. Taking one node at a time from the remaining nodes, for each node: - - a. `Stop the Glance processes gracefully`_ as described in Step 2, above. - *Do not proceed until the "old" Glance services on the node have been - completely shut down.* - - b. Upgrade the node to the new release (and corresponding configuration). - - c. Start the updated Glance processes on the upgraded node. - -8. After **ALL** of the nodes have been upgraded to run the new Glance - services, and there are **NO** nodes running any old Glance services, - contract the database by running the command - - ``glance manage db contract`` - - from any one of the upgraded nodes. - diff --git a/doc/source/admin/tasks.rst b/doc/source/admin/tasks.rst deleted file mode 100644 index f08ef727..00000000 --- a/doc/source/admin/tasks.rst +++ /dev/null @@ -1,137 +0,0 @@ -.. - Copyright 2015 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tasks: - -Tasks -===== - -Conceptual Overview -------------------- - -Image files can be quite large, and processing images (converting an image from -one format to another, for example) can be extremely resource intensive. -Additionally, a one-size-fits-all approach to processing images is not -desirable. A public cloud will have quite different security concerns than, -for example, a small private cloud run by an academic department in which all -users know and trust each other. Thus a public cloud deployer may wish to run -various validation checks on an image that a user wants to bring in to the -cloud, whereas the departmental cloud deployer may view such processing as a -waste of resources. - -To address this situation, Glance contains *tasks*. Tasks are intended to -offer end users a front end to long running asynchronous operations -- the type -of operation you kick off and don't expect to finish until you've gone to the -coffee shop, had a pleasant chat with your barista, had a coffee, had a -pleasant walk home, etc. The asynchronous nature of tasks is emphasized up -front in order to set end user expectations with respect to how long the task -may take (hint: longer than other Glance operations). Having a set of -operations performed by tasks allows a deployer flexibility with respect to how -many operations will be processed simultaneously, which in turn allows -flexibility with respect to what kind of resources need to be set aside for -task processing. Thus, although large cloud deployers are certainly interested -in tasks for the alternative custom image processing workflow they enable, -smaller deployers find them useful as a means of controlling resource -utilization. - -An additional reason tasks have been introduced into Glance is to support -Glance's role in the OpenStack ecosystem. Glance provides cataloging, storage, -and delivery of virtual machine images. As such, it needs to be responsive to -other OpenStack components. Nova, for instance, requests images from Glance in -order to boot instances; it uploads images to Glance as part of its workflow -for the Nova image-create action; and it uses Glance to provide the data for -the image-related API calls that are defined in the Compute API that Nova -instantiates. It is necessary to the proper functioning of an OpenStack cloud -that these synchronous operations not be compromised by excess load caused by -non-essential functionality such as image import. - -By separating the tasks resource from the images resource in the Images API, -it's easier for deployers to allocate resources and route requests for tasks -separately from the resources required to support Glance's service role. At -the same time this separation avoids confusion for users of an OpenStack cloud. -Responses to requests to ``/v2/images`` should return fairly quickly, while -requests to ``/v2/tasks`` may take a while. - -In short, tasks provide a common API across OpenStack installations for users -of an OpenStack cloud to request image-related operations, yet at the same time -tasks are customizable for individual cloud providers. - -Conceptual Details ------------------- - -A Glance task is a request to perform an asynchronous image-related -operation. The request results in the creation of a *task resource* that -can be polled for information about the status of the operation. - -A specific type of resource distinct from the traditional Glance image resource -is appropriate here for several reasons: - -* A dedicated task resource can be developed independently of the traditional - Glance image resource, both with respect to structure and workflow. - -* There may be multiple tasks (for example, image export or image conversion) - operating on an image simultaneously. - -* A dedicated task resource allows for the delivery to the end user of clear, - detailed error messages specific to the particular operation. - -* A dedicated task resource respects the principle of least surprise. For - example, an import task does not create an image in Glance until it's clear - that the bits submitted pass the deployer's tests for an allowable image. - -Upon reaching a final state (``success`` or ``error``) a task resource is -assigned an expiration datetime that's displayed in the ``expires_at`` field. -(The time between final state and expiration is configurable.) After that -datetime, the task resource is subject to being deleted. The result of the -task (for example, an imported image) will still exist. - -For details about the defined task statuses, please see :ref:`task-statuses`. - -Tasks expire eventually because there's no reason to keep them around, -as the user will have the result of the task, which was the point of creating -the task in the first place. The reason tasks aren't instantly deleted is that -there may be information contained in the task resource that's not easily -available elsewhere. (For example, a successful import task will eventually -result in the creation of an image in Glance, and it would be useful to know -the UUID of this image. Similarly, if the import task fails, we want to give -the end user time to read the task resource to analyze the error message.) - -Task Entities -------------- - -A task entity is represented by a JSON-encoded data structure defined by the -JSON schema available at ``/v2/schemas/task``. - -A task entity has an identifier (``id``) that is guaranteed to be unique within -the endpoint to which it belongs. The id is used as a token in request URIs to -interact with that specific task. - -In addition to the usual properties you'd expect (for example, ``created_at``, -``self``, ``type``, ``status``, ``updated_at``, etc.), tasks have these properties of -interest: - -* ``input``: this is defined to be a JSON blob, the exact content of which will - depend upon the requirements set by the specific cloud deployer. The intent - is that each deployer will document these requirements for end users. - -* ``result``: this is also defined to be a JSON blob, the content of which will - be documented by each cloud deployer. The ``result`` element will be null - until the task has reached a final state, and if the final status is - ``failure``, the result element remains null. - -* ``message``: this string field is expected to be null unless the task has - entered ``failure`` status. At that point, it contains an informative - human-readable message concerning the reason(s) for the task failure. diff --git a/doc/source/admin/troubleshooting.rst b/doc/source/admin/troubleshooting.rst deleted file mode 100644 index 36ccdf63..00000000 --- a/doc/source/admin/troubleshooting.rst +++ /dev/null @@ -1,460 +0,0 @@ -==================== -Images and instances -==================== - -Virtual machine images contain a virtual disk that holds a -bootable operating system on it. Disk images provide templates for -virtual machine file systems. The Image service controls image storage -and management. - -Instances are the individual virtual machines that run on physical -compute nodes inside the cloud. Users can launch any number of instances -from the same image. Each launched instance runs from a copy of the -base image. Any changes made to the instance do not affect -the base image. Snapshots capture the state of an instances -running disk. Users can create a snapshot, and build a new image based -on these snapshots. The Compute service controls instance, image, and -snapshot storage and management. - -When you launch an instance, you must choose a ``flavor``, which -represents a set of virtual resources. Flavors define virtual -CPU number, RAM amount available, and ephemeral disks size. Users -must select from the set of available flavors -defined on their cloud. OpenStack provides a number of predefined -flavors that you can edit or add to. - -.. note:: - - - For more information about creating and troubleshooting images, - see the `OpenStack Virtual Machine Image - Guide `__. - - - For more information about image configuration options, see the - `Image services - `__ - section of the OpenStack Configuration Reference. - - - -You can add and remove additional resources from running instances, such -as persistent volume storage, or public IP addresses. The example used -in this chapter is of a typical virtual system within an OpenStack -cloud. It uses the ``cinder-volume`` service, which provides persistent -block storage, instead of the ephemeral storage provided by the selected -instance flavor. - -This diagram shows the system state prior to launching an instance. The -image store has a number of predefined images, supported by the Image -service. Inside the cloud, a compute node contains the -available vCPU, memory, and local disk resources. Additionally, the -``cinder-volume`` service stores predefined volumes. - -| - -.. _Figure Base Image: - -**The base image state with no running instances** - -.. figure:: ../images/instance-life-1.png - -| - -Instance Launch -~~~~~~~~~~~~~~~ - -To launch an instance, select an image, flavor, and any optional -attributes. The selected flavor provides a root volume, labeled ``vda`` -in this diagram, and additional ephemeral storage, labeled ``vdb``. In -this example, the ``cinder-volume`` store is mapped to the third virtual -disk on this instance, ``vdc``. - -| - -.. _Figure Instance creation: - -**Instance creation from an image** - -.. figure:: ../images/instance-life-2.png - -| - -The Image service copies the base image from the image store to the -local disk. The local disk is the first disk that the instance -accesses, which is the root volume labeled ``vda``. Smaller -instances start faster. Less data needs to be copied across -the network. - -The new empty ephemeral disk is also created, labeled ``vdb``. -This disk is deleted when you delete the instance. - -The compute node connects to the attached ``cinder-volume`` using iSCSI. The -``cinder-volume`` is mapped to the third disk, labeled ``vdc`` in this -diagram. After the compute node provisions the vCPU and memory -resources, the instance boots up from root volume ``vda``. The instance -runs and changes data on the disks (highlighted in red on the diagram). -If the volume store is located on a separate network, the -``my_block_storage_ip`` option specified in the storage node -configuration file directs image traffic to the compute node. - -.. note:: - - Some details in this example scenario might be different in your - environment. For example, you might use a different type of back-end - storage, or different network protocols. One common variant is that - the ephemeral storage used for volumes ``vda`` and ``vdb`` could be - backed by network storage rather than a local disk. - -When you delete an instance, the state is reclaimed with the exception -of the persistent volume. The ephemeral storage, whether encrypted or not, -is purged. Memory and vCPU resources are released. The image remains -unchanged throughout this process. - -| - -.. _End of state: - -**The end state of an image and volume after the instance exits** - -.. figure:: ../images/instance-life-3.png - -| - - -Image properties and property protection -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -An image property is a key and value pair that the administrator -or the image owner attaches to an OpenStack Image service image, as -follows: - -- The administrator defines core properties, such as the image - name. - -- The administrator and the image owner can define additional - properties, such as licensing and billing information. - -The administrator can configure any property as protected, which -limits which policies or user roles can perform CRUD operations on that -property. Protected properties are generally additional properties to -which only administrators have access. - -For unprotected image properties, the administrator can manage -core properties and the image owner can manage additional properties. - -**To configure property protection** - -To configure property protection, edit the ``policy.json`` file. This file -can also be used to set policies for Image service actions. - -#. Define roles or policies in the ``policy.json`` file: - - .. code-block:: json - - { - "context_is_admin": "role:admin", - "default": "", - - "add_image": "", - "delete_image": "", - "get_image": "", - "get_images": "", - "modify_image": "", - "publicize_image": "role:admin", - "copy_from": "", - - "download_image": "", - "upload_image": "", - - "delete_image_location": "", - "get_image_location": "", - "set_image_location": "", - - "add_member": "", - "delete_member": "", - "get_member": "", - "get_members": "", - "modify_member": "", - - "manage_image_cache": "role:admin", - - "get_task": "", - "get_tasks": "", - "add_task": "", - "modify_task": "", - - "deactivate": "", - "reactivate": "", - - "get_metadef_namespace": "", - "get_metadef_namespaces":"", - "modify_metadef_namespace":"", - "add_metadef_namespace":"", - "delete_metadef_namespace":"", - - "get_metadef_object":"", - "get_metadef_objects":"", - "modify_metadef_object":"", - "add_metadef_object":"", - - "list_metadef_resource_types":"", - "get_metadef_resource_type":"", - "add_metadef_resource_type_association":"", - - "get_metadef_property":"", - "get_metadef_properties":"", - "modify_metadef_property":"", - "add_metadef_property":"", - - "get_metadef_tag":"", - "get_metadef_tags":"", - "modify_metadef_tag":"", - "add_metadef_tag":"", - "add_metadef_tags":"" - } - - For each parameter, use ``"rule:restricted"`` to restrict access to all - users or ``"role:admin"`` to limit access to administrator roles. - For example: - - .. code-block:: json - - { - "download_image": - "upload_image": - } - -#. Define which roles or policies can manage which properties in a property - protections configuration file. For example: - - .. code-block:: ini - - [x_none_read] - create = context_is_admin - read = ! - update = ! - delete = ! - - [x_none_update] - create = context_is_admin - read = context_is_admin - update = ! - delete = context_is_admin - - [x_none_delete] - create = context_is_admin - read = context_is_admin - update = context_is_admin - delete = ! - - - A value of ``@`` allows the corresponding operation for a property. - - - A value of ``!`` disallows the corresponding operation for a - property. - -#. In the ``glance-api.conf`` file, define the location of a property - protections configuration file. - - .. code-block:: ini - - property_protection_file = {file_name} - - This file contains the rules for property protections and the roles and - policies associated with it. - - By default, property protections are not enforced. - - If you specify a file name value and the file is not found, the - ``glance-api`` service does not start. - - To view a sample configuration file, see - `glance-api.conf - `__. - -#. Optionally, in the ``glance-api.conf`` file, specify whether roles or - policies are used in the property protections configuration file - - .. code-block:: ini - - property_protection_rule_format = roles - - The default is ``roles``. - - To view a sample configuration file, see - `glance-api.conf - `__. - -Image download: how it works -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Prior to starting a virtual machine, transfer the virtual machine image -to the compute node from the Image service. How this -works can change depending on the settings chosen for the compute node -and the Image service. - -Typically, the Compute service will use the image identifier passed to -it by the scheduler service and request the image from the Image API. -Though images are not stored in glance—rather in a back end, which could -be Object Storage, a filesystem or any other supported method—the -connection is made from the compute node to the Image service and the -image is transferred over this connection. The Image service streams the -image from the back end to the compute node. - -It is possible to set up the Object Storage node on a separate network, -and still allow image traffic to flow between the compute and object -storage nodes. Configure the ``my_block_storage_ip`` option in the -storage node configuration file to allow block storage traffic to reach -the compute node. - -Certain back ends support a more direct method, where on request the -Image service will return a URL that links directly to the back-end store. -You can download the image using this approach. Currently, the only store -to support the direct download approach is the filesystem store. -Configured the approach using the ``filesystems`` option in -the ``image_file_url`` section of the ``nova.conf`` file on -compute nodes. - -Compute nodes also implement caching of images, meaning that if an image -has been used before it won't necessarily be downloaded every time. -Information on the configuration options for caching on compute nodes -can be found in the `Configuration -Reference `__. - -Instance building blocks -~~~~~~~~~~~~~~~~~~~~~~~~ - -In OpenStack, the base operating system is usually copied from an image -stored in the OpenStack Image service. This results in an ephemeral -instance that starts from a known template state and loses all -accumulated states on shutdown. - -You can also put an operating system on a persistent volume in Compute -or the Block Storage volume system. This gives a more traditional, -persistent system that accumulates states that are preserved across -restarts. To get a list of available images on your system, run: - -.. code-block:: console - - $ openstack image list - +--------------------------------------+-----------------------------+--------+ - | ID | Name | Status | - +--------------------------------------+-----------------------------+--------+ - | aee1d242-730f-431f-88c1-87630c0f07ba | Ubuntu 14.04 cloudimg amd64 | active | - +--------------------------------------+-----------------------------+--------+ - | 0b27baa1-0ca6-49a7-b3f4-48388e440245 | Ubuntu 14.10 cloudimg amd64 | active | - +--------------------------------------+-----------------------------+--------+ - | df8d56fc-9cea-4dfd-a8d3-28764de3cb08 | jenkins | active | - +--------------------------------------+-----------------------------+--------+ - -The displayed image attributes are: - -``ID`` - Automatically generated UUID of the image. - -``Name`` - Free form, human-readable name for the image. - -``Status`` - The status of the image. Images marked ``ACTIVE`` are available for - use. - -``Server`` - For images that are created as snapshots of running instances, this - is the UUID of the instance the snapshot derives from. For uploaded - images, this field is blank. - -Virtual hardware templates are called ``flavors``, and are defined by -administrators. Prior to the Newton release, a default installation also -includes five predefined flavors. - -For a list of flavors that are available on your system, run: - -.. code-block:: console - - $ openstack flavor list - +-----+-----------+-------+------+-----------+-------+-----------+ - | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is_Public | - +-----+-----------+-------+------+-----------+-------+-----------+ - | 1 | m1.tiny | 512 | 1 | 0 | 1 | True | - | 2 | m1.small | 2048 | 20 | 0 | 1 | True | - | 3 | m1.medium | 4096 | 40 | 0 | 2 | True | - | 4 | m1.large | 8192 | 80 | 0 | 4 | True | - | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | - +-----+-----------+-------+------+-----------+-------+-----------+ - -By default, administrative users can configure the flavors. You can -change this behavior by redefining the access controls for -``compute_extension:flavormanage`` in ``/etc/nova/policy.json`` on the -``compute-api`` server. - - -Instance management tools -~~~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack provides command-line, web interface, and API-based instance -management tools. Third-party management tools are also available, using -either the native API or the provided EC2-compatible API. - -The OpenStack python-openstackclient package provides a basic command-line -utility, which uses the :command:`openstack` command. -This is available as a native package for most Linux distributions, -or you can install the latest version using the pip python package installer: - -.. code-block:: console - - # pip install python-openstackclient - -For more information about python-openstackclient and other command-line -tools, see the `OpenStack End User -Guide `__. - - -Control where instances run -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `Scheduling section -`__ -of OpenStack Configuration Reference -provides detailed information on controlling where your instances run, -including ensuring a set of instances run on different compute nodes for -service resiliency or on the same node for high performance -inter-instance communications. - -Administrative users can specify which compute node their instances -run on. To do this, specify the ``--availability-zone -AVAILABILITY_ZONE:COMPUTE_HOST`` parameter. - - -Launch instances with UEFI -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Unified Extensible Firmware Interface (UEFI) is a standard firmware -designed to replace legacy BIOS. There is a slow but steady trend -for operating systems to move to the UEFI format and, in some cases, -make it their only format. - -**To configure UEFI environment** - -To successfully launch an instance from an UEFI image in QEMU/KVM -environment, the administrator has to install the following -packages on compute node: - -- OVMF, a port of Intel's tianocore firmware to QEMU virtual machine. - -- libvirt, which has been supporting UEFI boot since version 1.2.9. - -Because default UEFI loader path is ``/usr/share/OVMF/OVMF_CODE.fd``, the -administrator must create one link to this location after UEFI package -is installed. - -**To upload UEFI images** - -To launch instances from a UEFI image, the administrator first has to -upload one UEFI image. To do so, ``hw_firmware_type`` property must -be set to ``uefi`` when the image is created. For example: - -.. code-block:: console - - $ openstack image create --container-format bare --disk-format qcow2 \ - --property hw_firmware_type=uefi --file /tmp/cloud-uefi.qcow --name uefi - -After that, you can launch instances from this UEFI image. diff --git a/doc/source/cli/footer.txt b/doc/source/cli/footer.txt deleted file mode 100644 index 3f236d09..00000000 --- a/doc/source/cli/footer.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. -*- mode: rst -*- - -SEE ALSO -======== - -* `OpenStack Glance `__ - -BUGS -==== - -* Glance bugs are tracked in Launchpad so you can view current bugs at `OpenStack Glance `__ diff --git a/doc/source/cli/general_options.txt b/doc/source/cli/general_options.txt deleted file mode 100644 index 0a0f55f2..00000000 --- a/doc/source/cli/general_options.txt +++ /dev/null @@ -1,68 +0,0 @@ -.. -*- mode: rst -*- - - **-h, --help** - Show the help message and exit - - **--version** - Print the version number and exit - - **-v, --verbose** - Print more verbose output - - **--noverbose** - Disable verbose output - - **-d, --debug** - Print debugging output (set logging level to DEBUG instead of - default WARNING level) - - **--nodebug** - Disable debugging output - - **--use-syslog** - Use syslog for logging - - **--nouse-syslog** - Disable the use of syslog for logging - - **--syslog-log-facility SYSLOG_LOG_FACILITY** - syslog facility to receive log lines - - **--config-dir DIR** - Path to a config directory to pull \*.conf files from. This - file set is sorted, to provide a predictable parse order - if individual options are over-ridden. The set is parsed after - the file(s) specified via previous --config-file, arguments hence - over-ridden options in the directory take precedence. This means - that configuration from files in a specified config-dir will - always take precedence over configuration from files specified - by --config-file, regardless to argument order. - - **--config-file PATH** - Path to a config file to use. Multiple config files can be - specified by using this flag multiple times, for example, - --config-file --config-file . Values in latter - files take precedence. - - **--log-config-append PATH** **--log-config PATH** - The name of logging configuration file. It does not - disable existing loggers, but just appends specified - logging configuration to any other existing logging - options. Please see the Python logging module documentation - for details on logging configuration files. The log-config - name for this option is deprecated. - - **--log-format FORMAT** - A logging.Formatter log message format string which may use any - of the available logging.LogRecord attributes. Default: None - - **--log-date-format DATE_FORMAT** - Format string for %(asctime)s in log records. Default: None - - **--log-file PATH, --logfile PATH** - (Optional) Name of log file to output to. If not set, logging - will go to stdout. - - **--log-dir LOG_DIR, --logdir LOG_DIR** - (Optional) The directory to keep log files in (will be prepended - to --log-file) diff --git a/doc/source/cli/glanceapi.rst b/doc/source/cli/glanceapi.rst deleted file mode 100644 index 81a16680..00000000 --- a/doc/source/cli/glanceapi.rst +++ /dev/null @@ -1,34 +0,0 @@ -========== -glance-api -========== - ---------------------------------------- -Server for the Glance Image Service API ---------------------------------------- - -.. include:: header.txt - -SYNOPSIS -======== - -glance-api [options] - -DESCRIPTION -=========== - -glance-api is a server daemon that serves the Glance API - -OPTIONS -======= - - **General options** - - .. include:: general_options.txt - -FILES -===== - - **/etc/glance/glance-api.conf** - Default configuration file for Glance API - -.. include:: footer.txt diff --git a/doc/source/cli/glancecachecleaner.rst b/doc/source/cli/glancecachecleaner.rst deleted file mode 100644 index e86ced8f..00000000 --- a/doc/source/cli/glancecachecleaner.rst +++ /dev/null @@ -1,42 +0,0 @@ -==================== -glance-cache-cleaner -==================== - ----------------------------------------------------------------- -Glance Image Cache Invalid Cache Entry and Stalled Image cleaner ----------------------------------------------------------------- - -.. include:: header.txt - -SYNOPSIS -======== - -glance-cache-cleaner [options] - -DESCRIPTION -=========== - -This is meant to be run as a periodic task from cron. - -If something goes wrong while we're caching an image (for example the fetch -times out, or an exception is raised), we create an 'invalid' entry. These -entries are left around for debugging purposes. However, after some period of -time, we want to clean these up. - -Also, if an incomplete image hangs around past the image_cache_stall_time -period, we automatically sweep it up. - -OPTIONS -======= - - **General options** - - .. include:: general_options.txt - -FILES -===== - - **/etc/glance/glance-cache.conf** - Default configuration file for the Glance Cache - -.. include:: footer.txt diff --git a/doc/source/cli/glancecachemanage.rst b/doc/source/cli/glancecachemanage.rst deleted file mode 100644 index b8235ab5..00000000 --- a/doc/source/cli/glancecachemanage.rst +++ /dev/null @@ -1,83 +0,0 @@ -=================== -glance-cache-manage -=================== - ------------------------- -Cache management utility ------------------------- - -.. include:: header.txt - -SYNOPSIS -======== - - glance-cache-manage [options] [args] - -COMMANDS -======== - - **help ** - Output help for one of the commands below - - **list-cached** - List all images currently cached - - **list-queued** - List all images currently queued for caching - - **queue-image** - Queue an image for caching - - **delete-cached-image** - Purges an image from the cache - - **delete-all-cached-images** - Removes all images from the cache - - **delete-queued-image** - Deletes an image from the cache queue - - **delete-all-queued-images** - Deletes all images from the cache queue - -OPTIONS -======= - - **--version** - show program's version number and exit - - **-h, --help** - show this help message and exit - - **-v, --verbose** - Print more verbose output - - **-d, --debug** - Print more verbose output - - **-H ADDRESS, --host=ADDRESS** - Address of Glance API host. - Default: 0.0.0.0 - - **-p PORT, --port=PORT** - Port the Glance API host listens on. - Default: 9292 - - **-k, --insecure** - Explicitly allow glance to perform "insecure" SSL - (https) requests. The server's certificate will not be - verified against any certificate authorities. This - option should be used with caution. - - **-A TOKEN, --auth_token=TOKEN** - Authentication token to use to identify the client to the glance server - - **-f, --force** - Prevent select actions from requesting user confirmation - - **-S STRATEGY, --os-auth-strategy=STRATEGY** - Authentication strategy (keystone or noauth) - - .. include:: openstack_options.txt - -.. include:: footer.txt diff --git a/doc/source/cli/glancecacheprefetcher.rst b/doc/source/cli/glancecacheprefetcher.rst deleted file mode 100644 index 6750779e..00000000 --- a/doc/source/cli/glancecacheprefetcher.rst +++ /dev/null @@ -1,35 +0,0 @@ -======================= -glance-cache-prefetcher -======================= - ------------------------------- -Glance Image Cache Pre-fetcher ------------------------------- - -.. include:: header.txt - -SYNOPSIS -======== - - glance-cache-prefetcher [options] - -DESCRIPTION -=========== - -This is meant to be run from the command line after queueing -images to be pretched. - -OPTIONS -======= - - **General options** - - .. include:: general_options.txt - -FILES -===== - - **/etc/glance/glance-cache.conf** - Default configuration file for the Glance Cache - -.. include:: footer.txt diff --git a/doc/source/cli/glancecachepruner.rst b/doc/source/cli/glancecachepruner.rst deleted file mode 100644 index 1bca3316..00000000 --- a/doc/source/cli/glancecachepruner.rst +++ /dev/null @@ -1,36 +0,0 @@ -=================== -glance-cache-pruner -=================== - -------------------- -Glance cache pruner -------------------- - -.. include:: header.txt - -SYNOPSIS -======== - - glance-cache-pruner [options] - -DESCRIPTION -=========== - -Prunes images from the Glance cache when the space exceeds the value -set in the image_cache_max_size configuration option. This is meant -to be run as a periodic task, perhaps every half-hour. - -OPTIONS -======= - - **General options** - - .. include:: general_options.txt - -FILES -===== - - **/etc/glance/glance-cache.conf** - Default configuration file for the Glance Cache - -.. include:: footer.txt diff --git a/doc/source/cli/glancecontrol.rst b/doc/source/cli/glancecontrol.rst deleted file mode 100644 index be7848cd..00000000 --- a/doc/source/cli/glancecontrol.rst +++ /dev/null @@ -1,53 +0,0 @@ -============== -glance-control -============== - --------------------------------------- -Glance daemon start/stop/reload helper --------------------------------------- - -.. include:: header.txt - -SYNOPSIS -======== - - glance-control [options] [CONFPATH] - -Where is one of: - - all, api, glance-api, registry, glance-registry, scrubber, glance-scrubber - -And command is one of: - - start, status, stop, shutdown, restart, reload, force-reload - -And CONFPATH is the optional configuration file to use. - -OPTIONS -======= - - **General Options** - - .. include:: general_options.txt - - **--pid-file=PATH** - File to use as pid file. Default: - /var/run/glance/$server.pid - - **--await-child DELAY** - Period to wait for service death in order to report - exit code (default is to not wait at all) - - **--capture-output** - Capture stdout/err in syslog instead of discarding - - **--nocapture-output** - The inverse of --capture-output - - **--norespawn** - The inverse of --respawn - - **--respawn** - Restart service on unexpected death - -.. include:: footer.txt diff --git a/doc/source/cli/glancemanage.rst b/doc/source/cli/glancemanage.rst deleted file mode 100644 index c6cf43ae..00000000 --- a/doc/source/cli/glancemanage.rst +++ /dev/null @@ -1,104 +0,0 @@ -============= -glance-manage -============= - -------------------------- -Glance Management Utility -------------------------- - -.. include:: header.txt - -SYNOPSIS -======== - - glance-manage [options] - -DESCRIPTION -=========== - -glance-manage is a utility for managing and configuring a Glance installation. -One important use of glance-manage is to setup the database. To do this run:: - - glance-manage db_sync - -Note: glance-manage commands can be run either like this:: - - glance-manage db sync - -or with the db commands concatenated, like this:: - - glance-manage db_sync - - - -COMMANDS -======== - - **db** - This is the prefix for the commands below when used with a space - rather than a _. For example "db version". - - **db_version** - This will print the current migration level of a glance database. - - **db_upgrade [VERSION]** - This will take an existing database and upgrade it to the - specified VERSION. - - **db_version_control** - Place the database under migration control. - - **db_sync [VERSION]** - Place an existing database under migration control and upgrade it to - the specified VERSION. - - **db_expand** - Run this command to expand the database as the first step of a rolling - upgrade process. - - **db_migrate** - Run this command to migrate the database as the second step of a - rolling upgrade process. - - **db_contract** - Run this command to contract the database as the last step of a rolling - upgrade process. - - **db_export_metadefs [PATH | PREFIX]** - Export the metadata definitions into json format. By default the - definitions are exported to /etc/glance/metadefs directory. - **Note: this command will overwrite existing files in the supplied or - default path.** - - **db_load_metadefs [PATH]** - Load the metadata definitions into glance database. By default the - definitions are imported from /etc/glance/metadefs directory. - - **db_unload_metadefs** - Unload the metadata definitions. Clears the contents of all the glance - db tables including metadef_namespace_resource_types, metadef_tags, - metadef_objects, metadef_resource_types, metadef_namespaces and - metadef_properties. - -OPTIONS -======= - - **General Options** - - .. include:: general_options.txt - -.. include:: footer.txt - -CONFIGURATION -============= - -The following paths are searched for a ``glance-manage.conf`` file in the -following order: - -* ``~/.glance`` -* ``~/`` -* ``/etc/glance`` -* ``/etc`` - -All options set in ``glance-manage.conf`` override those set in -``glance-registry.conf`` and ``glance-api.conf``. diff --git a/doc/source/cli/glanceregistry.rst b/doc/source/cli/glanceregistry.rst deleted file mode 100644 index 9da18dc5..00000000 --- a/doc/source/cli/glanceregistry.rst +++ /dev/null @@ -1,35 +0,0 @@ -=============== -glance-registry -=============== - --------------------------------------- -Server for the Glance Registry Service --------------------------------------- - -.. include:: header.txt - -SYNOPSIS -======== - -glance-registry [options] - -DESCRIPTION -=========== - -glance-registry is a server daemon that serves image metadata through a -REST-like API. - -OPTIONS -======= - - **General options** - - .. include:: general_options.txt - -FILES -===== - - **/etc/glance/glance-registry.conf** - Default configuration file for Glance Registry - -.. include:: footer.txt diff --git a/doc/source/cli/glancereplicator.rst b/doc/source/cli/glancereplicator.rst deleted file mode 100644 index f8a80be4..00000000 --- a/doc/source/cli/glancereplicator.rst +++ /dev/null @@ -1,85 +0,0 @@ -================= -glance-replicator -================= - ---------------------------------------------- -Replicate images across multiple data centers ---------------------------------------------- - -.. include:: header.txt - -SYNOPSIS -======== - -glance-replicator [options] [args] - -DESCRIPTION -=========== - -glance-replicator is a utility can be used to populate a new glance -server using the images stored in an existing glance server. The images -in the replicated glance server preserve the uuids, metadata, and image -data from the original. - -COMMANDS -======== - - **help ** - Output help for one of the commands below - - **compare** - What is missing from the slave glance? - - **dump** - Dump the contents of a glance instance to local disk. - - **livecopy** - Load the contents of one glance instance into another. - - **load** - Load the contents of a local directory into glance. - - **size** - Determine the size of a glance instance if dumped to disk. - -OPTIONS -======= - - **-h, --help** - Show this help message and exit - - **-c CHUNKSIZE, --chunksize=CHUNKSIZE** - Amount of data to transfer per HTTP write - - **-d, --debug** - Print debugging information - - **-D DONTREPLICATE, --dontreplicate=DONTREPLICATE** - List of fields to not replicate - - **-m, --metaonly** - Only replicate metadata, not images - - **-l LOGFILE, --logfile=LOGFILE** - Path of file to log to - - **-s, --syslog** - Log to syslog instead of a file - - **-t TOKEN, --token=TOKEN** - Pass in your authentication token if you have one. If - you use this option the same token is used for both - the master and the slave. - - **-M MASTERTOKEN, --mastertoken=MASTERTOKEN** - Pass in your authentication token if you have one. - This is the token used for the master. - - **-S SLAVETOKEN, --slavetoken=SLAVETOKEN** - Pass in your authentication token if you have one. - This is the token used for the slave. - - **-v, --verbose** - Print more verbose output - -.. include:: footer.txt diff --git a/doc/source/cli/glancescrubber.rst b/doc/source/cli/glancescrubber.rst deleted file mode 100644 index eb1c51df..00000000 --- a/doc/source/cli/glancescrubber.rst +++ /dev/null @@ -1,58 +0,0 @@ -=============== -glance-scrubber -=============== - --------------------- -Glance scrub service --------------------- - -.. include:: header.txt - -SYNOPSIS -======== - -glance-scrubber [options] - -DESCRIPTION -=========== - -glance-scrubber is a utility that cleans up images that have been deleted. The -mechanics of this differ depending on the backend store and pending_deletion -options chosen. - -Multiple glance-scrubbers can be run in a single deployment, but only one of -them may be designated as the 'cleanup_scrubber' in the glance-scrubber.conf -file. The 'cleanup_scrubber' coordinates other glance-scrubbers by maintaining -the master queue of images that need to be removed. - -The glance-scubber.conf file also specifies important configuration items such -as the time between runs ('wakeup_time' in seconds), length of time images -can be pending before their deletion ('cleanup_scrubber_time' in seconds) as -well as registry connectivity options. - -glance-scrubber can run as a periodic job or long-running daemon. - -OPTIONS -======= - - **General options** - - .. include:: general_options.txt - - **-D, --daemon** - Run as a long-running process. When not specified (the - default) run the scrub operation once and then exits. - When specified do not exit and run scrub on - wakeup_time interval as specified in the config. - - **--nodaemon** - The inverse of --daemon. Runs the scrub operation once and - then exits. This is the default. - -FILES -===== - - **/etc/glance/glance-scrubber.conf** - Default configuration file for the Glance Scrubber - -.. include:: footer.txt diff --git a/doc/source/cli/header.txt b/doc/source/cli/header.txt deleted file mode 100644 index 7c53f12a..00000000 --- a/doc/source/cli/header.txt +++ /dev/null @@ -1,8 +0,0 @@ -.. -*- mode: rst -*- - -:Author: glance@lists.launchpad.net -:Date: 2017-09-15 -:Copyright: OpenStack Foundation -:Version: 15.0.0 -:Manual section: 1 -:Manual group: cloud computing diff --git a/doc/source/cli/index.rst b/doc/source/cli/index.rst deleted file mode 100644 index 66065a6e..00000000 --- a/doc/source/cli/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -======================== - Command Line Interface -======================== - -.. toctree:: - :glob: - :maxdepth: 1 - - * diff --git a/doc/source/cli/openstack_options.txt b/doc/source/cli/openstack_options.txt deleted file mode 100644 index ad2a858f..00000000 --- a/doc/source/cli/openstack_options.txt +++ /dev/null @@ -1,24 +0,0 @@ -.. -*- mode: rst -*- - - **-os-auth-token=OS_AUTH_TOKEN** - Defaults to env[OS_AUTH_TOKEN] - - **--os-username=OS_USERNAME** - Defaults to env[OS_USERNAME] - - **--os-password=OS_PASSWORD** - Defaults to env[OS_PASSWORD] - - **--os-region-name=OS_REGION_NAME** - Defaults to env[OS_REGION_NAME] - - **--os-tenant-id=OS_TENANT_ID** - Defaults to env[OS_TENANT_ID] - - **--os-tenant-name=OS_TENANT_NAME** - Defaults to env[OS_TENANT_NAME] - - **--os-auth-url=OS_AUTH_URL** - Defaults to env[OS_AUTH_URL] - - diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index f3a766fc..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,299 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2010 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Glance documentation build configuration file, created by -# sphinx-quickstart on Tue May 18 13:50:15 2010. -# -# This file is execfile()'d with the current directory set to its containing -# dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import subprocess -import sys -import warnings - -import openstackdocstheme - - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path = [ - os.path.abspath('../..'), - os.path.abspath('../../bin') - ] + sys.path - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.coverage', - 'sphinx.ext.ifconfig', - 'sphinx.ext.graphviz', - 'stevedore.sphinxext', - 'oslo_config.sphinxext', - 'sphinx.ext.autodoc', - 'sphinx.ext.viewcode', - 'oslo_config.sphinxconfiggen', - 'openstackdocstheme', - ] - -# openstackdocstheme options -repository_name = 'openstack/glance' -bug_project = 'glance' -bug_tag = '' -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -config_generator_config_file = [ - ('../../etc/oslo-config-generator/glance-api.conf', - '_static/glance-api'), - ('../../etc/oslo-config-generator/glance-cache.conf', - '_static/glance-cache'), - ('../../etc/oslo-config-generator/glance-manage.conf', - '_static/glance-manage'), - ('../../etc/oslo-config-generator/glance-registry.conf', - '_static/glance-registry'), - ('../../etc/oslo-config-generator/glance-scrubber.conf', - '_static/glance-scrubber'), -] - - -# Add any paths that contain templates here, relative to this directory. -# templates_path = [] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Glance' -copyright = u'2010-present, OpenStack Foundation.' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -from glance.version import version_info as glance_version -# The full version, including alpha/beta/rc tags. -release = glance_version.version_string_with_vcs() -# The short X.Y version. -version = glance_version.canonical_version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -#unused_docs = [] - -# List of directories, relative to source directory, that shouldn't be searched -# for source files. -#exclude_trees = ['api'] -exclude_patterns = [ - # The man directory includes some snippet files that are included - # in other documents during the build but that should not be - # included in the toctree themselves, so tell Sphinx to ignore - # them when scanning for input files. - 'man/footer.rst', - 'man/general_options.rst', - 'man/openstack_options.rst', -] - -# The reST default role (for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['glance.'] - -# -- Options for man page output -------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - -man_pages = [ - ('man/glanceapi', 'glance-api', u'Glance API Server', - [u'OpenStack'], 1), - ('man/glancecachecleaner', 'glance-cache-cleaner', u'Glance Cache Cleaner', - [u'OpenStack'], 1), - ('man/glancecachemanage', 'glance-cache-manage', u'Glance Cache Manager', - [u'OpenStack'], 1), - ('man/glancecacheprefetcher', 'glance-cache-prefetcher', - u'Glance Cache Pre-fetcher', [u'OpenStack'], 1), - ('man/glancecachepruner', 'glance-cache-pruner', u'Glance Cache Pruner', - [u'OpenStack'], 1), - ('man/glancecontrol', 'glance-control', u'Glance Daemon Control Helper ', - [u'OpenStack'], 1), - ('man/glancemanage', 'glance-manage', u'Glance Management Utility', - [u'OpenStack'], 1), - ('man/glanceregistry', 'glance-registry', u'Glance Registry Server', - [u'OpenStack'], 1), - ('man/glancereplicator', 'glance-replicator', u'Glance Replicator', - [u'OpenStack'], 1), - ('man/glancescrubber', 'glance-scrubber', u'Glance Scrubber Service', - [u'OpenStack'], 1) -] - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = ['_theme'] -html_theme_path = [openstackdocstheme.get_html_theme_path()] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' -git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", - "-n1"] -try: - html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8') -except Exception: - warnings.warn('Cannot get last updated time from git repository. ' - 'Not setting "html_last_updated_fmt".') - - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -html_use_modindex = True - -# If false, no index is generated. -html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'glancedoc' - - -# -- Options for LaTeX output ------------------------------------------------ - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, -# documentclass [howto/manual]). -latex_documents = [ - ('index', 'Glance.tex', u'Glance Documentation', - u'Glance Team', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -#latex_preamble = '' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_use_modindex = True diff --git a/doc/source/configuration/configuring.rst b/doc/source/configuration/configuring.rst deleted file mode 100644 index d8af2ba3..00000000 --- a/doc/source/configuration/configuring.rst +++ /dev/null @@ -1,1603 +0,0 @@ -.. - Copyright 2011 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _basic-configuration: - -Basic Configuration -=================== - -Glance has a number of options that you can use to configure the Glance API -server, the Glance Registry server, and the various storage backends that -Glance can use to store images. - -Most configuration is done via configuration files, with the Glance API -server and Glance Registry server using separate configuration files. - -When starting up a Glance server, you can specify the configuration file to -use (see :ref:`the documentation on controller Glance servers `). -If you do **not** specify a configuration file, Glance will look in the following -directories for a configuration file, in order: - -* ``~/.glance`` -* ``~/`` -* ``/etc/glance`` -* ``/etc`` - -The Glance API server configuration file should be named ``glance-api.conf``. -Similarly, the Glance Registry server configuration file should be named -``glance-registry.conf``. There are many other configuration files also -since Glance maintains a configuration file for each of its services. If you -installed Glance via your operating system's package management system, it -is likely that you will have sample configuration files installed in -``/etc/glance``. - -In addition, sample configuration files for each server application with -detailed comments are available in the :ref:`Glance Sample Configuration -` section. - -The PasteDeploy configuration (controlling the deployment of the WSGI -application for each component) may be found by default in --paste.ini alongside the main configuration file, .conf. -For example, ``glance-api-paste.ini`` corresponds to ``glance-api.conf``. -This pathname for the paste config is configurable, as follows:: - - [paste_deploy] - config_file = /path/to/paste/config - - -Common Configuration Options in Glance --------------------------------------- - -Glance has a few command-line options that are common to all Glance programs: - -``--verbose`` - Optional. Default: ``False`` - - Can be specified on the command line and in configuration files. - - Turns on the INFO level in logging and prints more verbose command-line - interface printouts. - -``--debug`` - Optional. Default: ``False`` - - Can be specified on the command line and in configuration files. - - Turns on the DEBUG level in logging. - -``--config-file=PATH`` - Optional. Default: See below for default search order. - - Specified on the command line only. - - Takes a path to a configuration file to use when running the program. If this - CLI option is not specified, then we check to see if the first argument is a - file. If it is, then we try to use that as the configuration file. If there is - no file or there were no arguments, we search for a configuration file in the - following order: - - * ``~/.glance`` - * ``~/`` - * ``/etc/glance`` - * ``/etc`` - - The filename that is searched for depends on the server application name. So, - if you are starting up the API server, ``glance-api.conf`` is searched for, - otherwise ``glance-registry.conf``. - -``--config-dir=DIR`` - Optional. Default: ``None`` - - Specified on the command line only. - - Takes a path to a configuration directory from which all \*.conf fragments - are loaded. This provides an alternative to multiple --config-file options - when it is inconvenient to explicitly enumerate all the configuration files, - for example when an unknown number of config fragments are being generated - by a deployment framework. - - If --config-dir is set, then --config-file is ignored. - - An example usage would be: - - $ glance-api --config-dir=/etc/glance/glance-api.d - - $ ls /etc/glance/glance-api.d - 00-core.conf - 01-swift.conf - 02-ssl.conf - ... etc. - - The numeric prefixes in the example above are only necessary if a specific - parse ordering is required (i.e. if an individual config option set in an - earlier fragment is overridden in a later fragment). - - Note that ``glance-manage`` currently loads configuration from three files: - - * ``glance-registry.conf`` - * ``glance-api.conf`` - * ``glance-manage.conf`` - - By default ``glance-manage.conf`` only specifies a custom logging file but - other configuration options for ``glance-manage`` should be migrated in there. - **Warning**: Options set in ``glance-manage.conf`` will override options of - the same section and name set in the other two. Similarly, options in - ``glance-api.conf`` will override options set in ``glance-registry.conf``. - This tool is planning to stop loading ``glance-registry.conf`` and - ``glance-api.conf`` in a future cycle. - -Configuring Server Startup Options ----------------------------------- - -You can put the following options in the ``glance-api.conf`` and -``glance-registry.conf`` files, under the ``[DEFAULT]`` section. They enable -startup and binding behaviour for the API and registry servers, respectively. - -``bind_host=ADDRESS`` - The address of the host to bind to. - - Optional. Default: ``0.0.0.0`` - -``bind_port=PORT`` - The port the server should bind to. - - Optional. Default: ``9191`` for the registry server, ``9292`` for the API server - -``backlog=REQUESTS`` - Number of backlog requests to configure the socket with. - - Optional. Default: ``4096`` - -``tcp_keepidle=SECONDS`` - Sets the value of TCP_KEEPIDLE in seconds for each server socket. - Not supported on OS X. - - Optional. Default: ``600`` - -``client_socket_timeout=SECONDS`` - Timeout for client connections' socket operations. If an incoming - connection is idle for this period it will be closed. A value of `0` - means wait forever. - - Optional. Default: ``900`` - -``workers=PROCESSES`` - Number of Glance API or Registry worker processes to start. Each worker - process will listen on the same port. Increasing this value may increase - performance (especially if using SSL with compression enabled). Typically - it is recommended to have one worker process per CPU. The value `0` - will prevent any new worker processes from being created. When ``data_api`` - is set to ``glance.db.simple.api``, ``workers`` MUST be set to either ``0`` or - ``1``. - - Optional. Default: The number of CPUs available will be used by default. - -``max_request_id_length=LENGTH`` - Limits the maximum size of the x-openstack-request-id header which is - logged. Affects only if context middleware is configured in pipeline. - - Optional. Default: ``64`` (Limited by max_header_line default: 16384) - -Configuring SSL Support -~~~~~~~~~~~~~~~~~~~~~~~ - -``cert_file=PATH`` - Path to the certificate file the server should use when binding to an - SSL-wrapped socket. - - Optional. Default: not enabled. - -``key_file=PATH`` - Path to the private key file the server should use when binding to an - SSL-wrapped socket. - - Optional. Default: not enabled. - -``ca_file=PATH`` - Path to the CA certificate file the server should use to validate client - certificates provided during an SSL handshake. This is ignored if - ``cert_file`` and ''key_file`` are not set. - - Optional. Default: not enabled. - -Configuring Registry Access -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are a number of configuration options in Glance that control how -the API server accesses the registry server. - -``registry_client_protocol=PROTOCOL`` - If you run a secure Registry server, you need to set this value to ``https`` - and also set ``registry_client_key_file`` and optionally - ``registry_client_cert_file``. - - Optional. Default: http - -``registry_client_key_file=PATH`` - The path to the key file to use in SSL connections to the - registry server, if any. Alternately, you may set the - ``GLANCE_CLIENT_KEY_FILE`` environ variable to a filepath of the key file - - Optional. Default: Not set. - -``registry_client_cert_file=PATH`` - Optional. Default: Not set. - - The path to the cert file to use in SSL connections to the - registry server, if any. Alternately, you may set the - ``GLANCE_CLIENT_CERT_FILE`` environ variable to a filepath of the cert file - -``registry_client_ca_file=PATH`` - Optional. Default: Not set. - - The path to a Certifying Authority's cert file to use in SSL connections to the - registry server, if any. Alternately, you may set the - ``GLANCE_CLIENT_CA_FILE`` environ variable to a filepath of the CA cert file - -``registry_client_insecure=False`` - Optional. Default: False. - - When using SSL in connections to the registry server, do not require - validation via a certifying authority. This is the registry's equivalent of - specifying --insecure on the command line using glanceclient for the API - -``registry_client_timeout=SECONDS`` - Optional. Default: ``600``. - - The period of time, in seconds, that the API server will wait for a registry - request to complete. A value of '0' implies no timeout. - -.. note:: - ``use_user_token``, ``admin_user``, ``admin_password``, - ``admin_tenant_name``, ``auth_url``, ``auth_strategy`` and ``auth_region`` - options were considered harmful and have been deprecated in M release. - They will be removed in O release. For more information read - `OSSN-0060 `_. - Related functionality with uploading big images has been implemented with - Keystone trusts support. - -``use_user_token=True`` - Optional. Default: True - - DEPRECATED. This option will be removed in O release. - - Pass the user token through for API requests to the registry. - - If 'use_user_token' is not in effect then admin credentials can be - specified (see below). If admin credentials are specified then they are - used to generate a token; this token rather than the original user's - token is used for requests to the registry. - -``admin_user=USER`` - DEPRECATED. This option will be removed in O release. - - If 'use_user_token' is not in effect then admin credentials can be - specified. Use this parameter to specify the username. - - Optional. Default: None - -``admin_password=PASSWORD`` - DEPRECATED. This option will be removed in O release. - - If 'use_user_token' is not in effect then admin credentials can be - specified. Use this parameter to specify the password. - - Optional. Default: None - -``admin_tenant_name=TENANTNAME`` - DEPRECATED. This option will be removed in O release. - - If 'use_user_token' is not in effect then admin credentials can be - specified. Use this parameter to specify the tenant name. - - Optional. Default: None - -``auth_url=URL`` - DEPRECATED. This option will be removed in O release. - - If 'use_user_token' is not in effect then admin credentials can be - specified. Use this parameter to specify the Keystone endpoint. - - Optional. Default: None - -``auth_strategy=STRATEGY`` - DEPRECATED. This option will be removed in O release. - - If 'use_user_token' is not in effect then admin credentials can be - specified. Use this parameter to specify the auth strategy. - - Optional. Default: noauth - -``auth_region=REGION`` - DEPRECATED. This option will be removed in O release. - - If 'use_user_token' is not in effect then admin credentials can be - specified. Use this parameter to specify the region. - - Optional. Default: None - - -Configuring Logging in Glance ------------------------------ - -There are a number of configuration options in Glance that control how Glance -servers log messages. - -``--log-config=PATH`` - Optional. Default: ``None`` - - Specified on the command line only. - - Takes a path to a configuration file to use for configuring logging. - -Logging Options Available Only in Configuration Files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You will want to place the different logging options in the **[DEFAULT]** section -in your application configuration file. As an example, you might do the following -for the API server, in a configuration file called ``etc/glance-api.conf``:: - - [DEFAULT] - log_file = /var/log/glance/api.log - -``log_file`` - The filepath of the file to use for logging messages from Glance's servers. If - missing, the default is to output messages to ``stdout``, so if you are running - Glance servers in a daemon mode (using ``glance-control``) you should make - sure that the ``log_file`` option is set appropriately. - -``log_dir`` - The filepath of the directory to use for log files. If not specified (the default) - the ``log_file`` is used as an absolute filepath. - -``log_date_format`` - The format string for timestamps in the log output. - - Defaults to ``%Y-%m-%d %H:%M:%S``. See the - `logging module `_ documentation for - more information on setting this format string. - -``log_use_syslog`` - Use syslog logging functionality. - - Defaults to False. - -Configuring Glance Storage Backends ------------------------------------ - -There are a number of configuration options in Glance that control how Glance -stores disk images. These configuration options are specified in the -``glance-api.conf`` configuration file in the section ``[glance_store]``. - -``default_store=STORE`` - Optional. Default: ``file`` - - Can only be specified in configuration files. - - Sets the storage backend to use by default when storing images in Glance. - Available options for this option are (``file``, ``swift``, ``rbd``, - ``sheepdog``, ``cinder`` or ``vsphere``). In order to select a default store - it must also be listed in the ``stores`` list described below. - -``stores=STORES`` - Optional. Default: ``file, http`` - - A comma separated list of enabled glance stores. Some available options for - this option are (``filesystem``, ``http``, ``rbd``, ``swift``, - ``sheepdog``, ``cinder``, ``vmware``) - -Configuring the Filesystem Storage Backend -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``filesystem_store_datadir=PATH`` - Optional. Default: ``/var/lib/glance/images/`` - - Can only be specified in configuration files. - - `This option is specific to the filesystem storage backend.` - - Sets the path where the filesystem storage backend write disk images. Note that - the filesystem storage backend will attempt to create this directory if it does - not exist. Ensure that the user that ``glance-api`` runs under has write - permissions to this directory. - -``filesystem_store_file_perm=PERM_MODE`` - Optional. Default: ``0`` - - Can only be specified in configuration files. - - `This option is specific to the filesystem storage backend.` - - The required permission value, in octal representation, for the created image file. - You can use this value to specify the user of the consuming service (such as Nova) as - the only member of the group that owns the created files. To keep the default value, - assign a permission value that is less than or equal to 0. Note that the file owner - must maintain read permission; if this value removes that permission an error message - will be logged and the BadStoreConfiguration exception will be raised. If the Glance - service has insufficient privileges to change file access permissions, a file will still - be saved, but a warning message will appear in the Glance log. - -Configuring the Filesystem Storage Backend with multiple stores -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``filesystem_store_datadirs=PATH:PRIORITY`` - Optional. Default: ``/var/lib/glance/images/:1`` - - Example:: - - filesystem_store_datadirs = /var/glance/store - filesystem_store_datadirs = /var/glance/store1:100 - filesystem_store_datadirs = /var/glance/store2:200 - - This option can only be specified in configuration file and is specific - to the filesystem storage backend only. - - filesystem_store_datadirs option allows administrators to configure - multiple store directories to save glance image in filesystem storage backend. - Each directory can be coupled with its priority. - - **NOTE**: - - * This option can be specified multiple times to specify multiple stores. - * Either filesystem_store_datadir or filesystem_store_datadirs option must be - specified in glance-api.conf - * Store with priority 200 has precedence over store with priority 100. - * If no priority is specified, default priority '0' is associated with it. - * If two filesystem stores have same priority store with maximum free space - will be chosen to store the image. - * If same store is specified multiple times then BadStoreConfiguration - exception will be raised. - -Configuring the Swift Storage Backend -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``swift_store_auth_address=URL`` - Required when using the Swift storage backend. - - Can only be specified in configuration files. - - Deprecated. Use ``auth_address`` in the Swift back-end configuration file instead. - - `This option is specific to the Swift storage backend.` - - Sets the authentication URL supplied to Swift when making calls to its storage - system. For more information about the Swift authentication system, please - see the `Swift auth `_ - documentation. - - **IMPORTANT NOTE**: Swift authentication addresses use HTTPS by default. This - means that if you are running Swift with authentication over HTTP, you need - to set your ``swift_store_auth_address`` to the full URL, including the ``http://``. - -``swift_store_user=USER`` - Required when using the Swift storage backend. - - Can only be specified in configuration files. - - Deprecated. Use ``user`` in the Swift back-end configuration file instead. - - `This option is specific to the Swift storage backend.` - - Sets the user to authenticate against the ``swift_store_auth_address`` with. - -``swift_store_key=KEY`` - Required when using the Swift storage backend. - - Can only be specified in configuration files. - - Deprecated. Use ``key`` in the Swift back-end configuration file instead. - - `This option is specific to the Swift storage backend.` - - Sets the authentication key to authenticate against the - ``swift_store_auth_address`` with for the user ``swift_store_user``. - -``swift_store_container=CONTAINER`` - Optional. Default: ``glance`` - - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - Sets the name of the container to use for Glance images in Swift. - -``swift_store_create_container_on_put`` - Optional. Default: ``False`` - - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - If true, Glance will attempt to create the container ``swift_store_container`` - if it does not exist. - -``swift_store_large_object_size=SIZE_IN_MB`` - Optional. Default: ``5120`` - - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - What size, in MB, should Glance start chunking image files - and do a large object manifest in Swift? By default, this is - the maximum object size in Swift, which is 5GB - -``swift_store_large_object_chunk_size=SIZE_IN_MB`` - Optional. Default: ``200`` - - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - When doing a large object manifest, what size, in MB, should - Glance write chunks to Swift? The default is 200MB. - -``swift_store_multi_tenant=False`` - Optional. Default: ``False`` - - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - If set to True enables multi-tenant storage mode which causes Glance images - to be stored in tenant specific Swift accounts. When set to False Glance - stores all images in a single Swift account. - -``swift_store_multiple_containers_seed`` - Optional. Default: ``0`` - - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - When set to 0, a single-tenant store will only use one container to store all - images. When set to an integer value between 1 and 32, a single-tenant store - will use multiple containers to store images, and this value will determine - how many characters from an image UUID are checked when determining what - container to place the image in. The maximum number of containers that will be - created is approximately equal to 16^N. This setting is used only when - swift_store_multi_tenant is disabled. - - Example: if this config option is set to 3 and - swift_store_container = 'glance', then an image with UUID - 'fdae39a1-bac5-4238-aba4-69bcc726e848' would be placed in the container - 'glance_fda'. All dashes in the UUID are included when creating the container - name but do not count toward the character limit, so in this example with N=10 - the container name would be 'glance_fdae39a1-ba'. - - When choosing the value for swift_store_multiple_containers_seed, deployers - should discuss a suitable value with their swift operations team. The authors - of this option recommend that large scale deployments use a value of '2', - which will create a maximum of ~256 containers. Choosing a higher number than - this, even in extremely large scale deployments, may not have any positive - impact on performance and could lead to a large number of empty, unused - containers. The largest of deployments could notice an increase in performance - if swift rate limits are throttling on single container. Note: If dynamic - container creation is turned off, any value for this configuration option - higher than '1' may be unreasonable as the deployer would have to manually - create each container. - -``swift_store_admin_tenants`` - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - Optional. Default: Not set. - - A list of swift ACL strings that will be applied as both read and - write ACLs to the containers created by Glance in multi-tenant - mode. This grants the specified tenants/users read and write access - to all newly created image objects. The standard swift ACL string - formats are allowed, including: - - : - : - \*: - - Multiple ACLs can be combined using a comma separated list, for - example: swift_store_admin_tenants = service:glance,*:admin - -``swift_store_auth_version`` - Can only be specified in configuration files. - - Deprecated. Use ``auth_version`` in the Swift back-end configuration - file instead. - - `This option is specific to the Swift storage backend.` - - Optional. Default: ``2`` - - A string indicating which version of Swift OpenStack authentication - to use. See the project - `python-swiftclient `_ - for more details. - -``swift_store_service_type`` - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - Optional. Default: ``object-store`` - - A string giving the service type of the swift service to use. This - setting is only used if swift_store_auth_version is ``2``. - -``swift_store_region`` - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - Optional. Default: Not set. - - A string giving the region of the swift service endpoint to use. This - setting is only used if swift_store_auth_version is ``2``. This - setting is especially useful for disambiguation if multiple swift - services might appear in a service catalog during authentication. - -``swift_store_endpoint_type`` - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - Optional. Default: ``publicURL`` - - A string giving the endpoint type of the swift service endpoint to - use. This setting is only used if swift_store_auth_version is ``2``. - -``swift_store_ssl_compression`` - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - Optional. Default: True. - - If set to False, disables SSL layer compression of https swift - requests. Setting to 'False' may improve performance for images which - are already in a compressed format, e.g. qcow2. If set to True then - compression will be enabled (provided it is supported by the swift - proxy). - -``swift_store_cacert`` - Can only be specified in configuration files. - - Optional. Default: ``None`` - - A string giving the path to a CA certificate bundle that will allow Glance's - services to perform SSL verification when communicating with Swift. - -``swift_store_retry_get_count`` - The number of times a Swift download will be retried before the request - fails. - Optional. Default: ``0`` - -Configuring Multiple Swift Accounts/Stores -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -In order to not store Swift account credentials in the database, and to -have support for multiple accounts (or multiple Swift backing stores), a -reference is stored in the database and the corresponding configuration -(credentials/ parameters) details are stored in the configuration file. -Optional. Default: not enabled. - -The location for this file is specified using the ``swift_store_config_file`` -configuration file in the section ``[DEFAULT]``. **If an incorrect value is -specified, Glance API Swift store service will not be configured.** - -``swift_store_config_file=PATH`` - `This option is specific to the Swift storage backend.` - -``default_swift_reference=DEFAULT_REFERENCE`` - Required when multiple Swift accounts/backing stores are configured. - - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - It is the default swift reference that is used to add any new images. - -``swift_store_auth_insecure`` - If True, bypass SSL certificate verification for Swift. - - Can only be specified in configuration files. - - `This option is specific to the Swift storage backend.` - - Optional. Default: ``False`` - -Configuring Swift configuration file -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If ``swift_store_config_file`` is set, Glance will use information -from the file specified under this parameter. - -.. note:: - The ``swift_store_config_file`` is currently used only for single-tenant - Swift store configurations. If you configure a multi-tenant Swift store - back end (``swift_store_multi_tenant=True``), ensure that both - ``swift_store_config_file`` and ``default_swift_reference`` are *not* set. - -The file contains a set of references like: - -.. code-block:: ini - - [ref1] - user = tenant:user1 - key = key1 - auth_version = 2 - auth_address = http://localhost:5000/v2.0 - - [ref2] - user = project_name:user_name2 - key = key2 - user_domain_id = default - project_domain_id = default - auth_version = 3 - auth_address = http://localhost:5000/v3 - -A default reference must be configured. Its parameters will be used when -creating new images. For example, to specify ``ref2`` as the default -reference, add the following value to the [glance_store] section of -:file:`glance-api.conf` file: - -.. code-block:: ini - - default_swift_reference = ref2 - -In the reference, a user can specify the following parameters: - -``user`` - A *project_name user_name* pair in the ``project_name:user_name`` format - to authenticate against the Swift authentication service. - -``key`` - An authentication key for a user authenticating against the Swift - authentication service. - -``auth_address`` - An address where the Swift authentication service is located. - -``auth_version`` - A version of the authentication service to use. - Valid versions are ``2`` and ``3`` for Keystone and ``1`` - (deprecated) for Swauth and Rackspace. - - Optional. Default: ``2`` - -``project_domain_id`` - A domain ID of the project which is the requested project-level - authorization scope. - - Optional. Default: ``None`` - - `This option can be specified if ``auth_version`` is ``3`` .` - -``project_domain_name`` - A domain name of the project which is the requested project-level - authorization scope. - - Optional. Default: ``None`` - - `This option can be specified if ``auth_version`` is ``3`` .` - -``user_domain_id`` - A domain ID of the user which is the requested domain-level - authorization scope. - - Optional. Default: ``None`` - - `This option can be specified if ``auth_version`` is ``3`` .` - -``user_domain_name`` - A domain name of the user which is the requested domain-level - authorization scope. - - Optional. Default: ``None`` - - `This option can be specified if ``auth_version`` is ``3``. ` - -Configuring the RBD Storage Backend -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**Note**: the RBD storage backend requires the python bindings for -librados and librbd. These are in the python-ceph package on -Debian-based distributions. - -``rbd_store_pool=POOL`` - Optional. Default: ``rbd`` - - Can only be specified in configuration files. - - `This option is specific to the RBD storage backend.` - - Sets the RADOS pool in which images are stored. - -``rbd_store_chunk_size=CHUNK_SIZE_MB`` - Optional. Default: ``4`` - - Can only be specified in configuration files. - - `This option is specific to the RBD storage backend.` - - Images will be chunked into objects of this size (in megabytes). - For best performance, this should be a power of two. - -``rados_connect_timeout`` - Optional. Default: ``0`` - - Can only be specified in configuration files. - - `This option is specific to the RBD storage backend.` - - Prevents glance-api hangups during the connection to RBD. Sets the time - to wait (in seconds) for glance-api before closing the connection. - Setting ``rados_connect_timeout<=0`` means no timeout. - -``rbd_store_ceph_conf=PATH`` - Optional. Default: ``/etc/ceph/ceph.conf``, ``~/.ceph/config``, and - ``./ceph.conf`` - - Can only be specified in configuration files. - - `This option is specific to the RBD storage backend.` - - Sets the Ceph configuration file to use. - -``rbd_store_user=NAME`` - Optional. Default: ``admin`` - - Can only be specified in configuration files. - - `This option is specific to the RBD storage backend.` - - Sets the RADOS user to authenticate as. This is only needed - when `RADOS authentication `_ - is `enabled. `_ - -A keyring must be set for this user in the Ceph -configuration file, e.g. with a user ``glance``:: - - [client.glance] - keyring=/etc/glance/rbd.keyring - -To set up a user named ``glance`` with minimal permissions, using a pool called -``images``, run:: - - rados mkpool images - ceph-authtool --create-keyring /etc/glance/rbd.keyring - ceph-authtool --gen-key --name client.glance --cap mon 'allow r' --cap osd 'allow rwx pool=images' /etc/glance/rbd.keyring - ceph auth add client.glance -i /etc/glance/rbd.keyring - -Configuring the Sheepdog Storage Backend -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``sheepdog_store_address=ADDR`` - Optional. Default: ``localhost`` - - Can only be specified in configuration files. - - `This option is specific to the Sheepdog storage backend.` - - Sets the IP address of the sheep daemon - -``sheepdog_store_port=PORT`` - Optional. Default: ``7000`` - - Can only be specified in configuration files. - - `This option is specific to the Sheepdog storage backend.` - - Sets the IP port of the sheep daemon - -``sheepdog_store_chunk_size=SIZE_IN_MB`` - Optional. Default: ``64`` - - Can only be specified in configuration files. - - `This option is specific to the Sheepdog storage backend.` - - Images will be chunked into objects of this size (in megabytes). - For best performance, this should be a power of two. - -Configuring the Cinder Storage Backend -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**Note**: Currently Cinder store is experimental. Current deployers should be -aware that the use of it in production right now may be risky. It is expected -to work well with most iSCSI Cinder backends such as LVM iSCSI, but will not -work with some backends especially if they don't support host-attach. - -**Note**: To create a Cinder volume from an image in this store quickly, additional -settings are required. Please see the -`Volume-backed image `_ -documentation for more information. - -``cinder_catalog_info=::`` - Optional. Default: ``volumev2::publicURL`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - Sets the info to match when looking for cinder in the service catalog. - Format is : separated values of the form: :: - -``cinder_endpoint_template=http://ADDR:PORT/VERSION/%(tenant)s`` - Optional. Default: ``None`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - Override service catalog lookup with template for cinder endpoint. - ``%(...)s`` parts are replaced by the value in the request context. - e.g. http://localhost:8776/v2/%(tenant)s - -``os_region_name=REGION_NAME`` - Optional. Default: ``None`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - Region name of this node. - - Deprecated. Use ``cinder_os_region_name`` instead. - -``cinder_os_region_name=REGION_NAME`` - Optional. Default: ``None`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - Region name of this node. If specified, it is used to locate cinder from - the service catalog. - -``cinder_ca_certificates_file=CA_FILE_PATH`` - Optional. Default: ``None`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - Location of ca certificates file to use for cinder client requests. - -``cinder_http_retries=TIMES`` - Optional. Default: ``3`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - Number of cinderclient retries on failed http calls. - -``cinder_state_transition_timeout`` - Optional. Default: ``300`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - Time period, in seconds, to wait for a cinder volume transition to complete. - -``cinder_api_insecure=ON_OFF`` - Optional. Default: ``False`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - Allow to perform insecure SSL requests to cinder. - -``cinder_store_user_name=NAME`` - Optional. Default: ``None`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - User name to authenticate against Cinder. If , the user of current - context is used. - - **NOTE**: This option is applied only if all of ``cinder_store_user_name``, - ``cinder_store_password``, ``cinder_store_project_name`` and - ``cinder_store_auth_address`` are set. - These options are useful to put image volumes into the internal service - project in order to hide the volume from users, and to make the image - sharable among projects. - -``cinder_store_password=PASSWORD`` - Optional. Default: ``None`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - Password for the user authenticating against Cinder. If , the current - context auth token is used. - -``cinder_store_project_name=NAME`` - Optional. Default: ``None`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - Project name where the image is stored in Cinder. If , the project - in current context is used. - -``cinder_store_auth_address=URL`` - Optional. Default: ``None`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - The address where the Cinder authentication service is listening. If , - the cinder endpoint in the service catalog is used. - -``rootwrap_config=NAME`` - Optional. Default: ``/etc/glance/rootwrap.conf`` - - Can only be specified in configuration files. - - `This option is specific to the Cinder storage backend.` - - Path to the rootwrap configuration file to use for running commands as root. - -Configuring the VMware Storage Backend -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``vmware_server_host=ADDRESS`` - Required when using the VMware storage backend. - - Can only be specified in configuration files. - - Sets the address of the ESX/ESXi or vCenter Server target system. - The address can contain an IP (``127.0.0.1``), an IP and port - (``127.0.0.1:443``), a DNS name (``www.my-domain.com``) or DNS and port. - - `This option is specific to the VMware storage backend.` - -``vmware_server_username=USERNAME`` - Required when using the VMware storage backend. - - Can only be specified in configuration files. - - Username for authenticating with VMware ESX/ESXi or vCenter Server. - -``vmware_server_password=PASSWORD`` - Required when using the VMware storage backend. - - Can only be specified in configuration files. - - Password for authenticating with VMware ESX/ESXi or vCenter Server. - -``vmware_datacenter_path=DC_PATH`` - Optional. Default: ``ha-datacenter`` - - Can only be specified in configuration files. - - Inventory path to a datacenter. If the ``vmware_server_host`` specified - is an ESX/ESXi, the ``vmware_datacenter_path`` is optional. If specified, - it should be ``ha-datacenter``. - -``vmware_datastore_name=DS_NAME`` - Required when using the VMware storage backend. - - Can only be specified in configuration files. - - Datastore name associated with the ``vmware_datacenter_path`` - -``vmware_datastores`` - Optional. Default: Not set. - - This option can only be specified in configuration file and is specific - to the VMware storage backend. - - vmware_datastores allows administrators to configure multiple datastores to - save glance image in the VMware store backend. The required format for the - option is: ::. - - where datacenter_path is the inventory path to the datacenter where the - datastore is located. An optional weight can be given to specify the priority. - - Example:: - - vmware_datastores = datacenter1:datastore1 - vmware_datastores = dc_folder/datacenter2:datastore2:100 - vmware_datastores = datacenter1:datastore3:200 - - **NOTE**: - - - This option can be specified multiple times to specify multiple datastores. - - Either vmware_datastore_name or vmware_datastores option must be specified - in glance-api.conf - - Datastore with weight 200 has precedence over datastore with weight 100. - - If no weight is specified, default weight '0' is associated with it. - - If two datastores have same weight, the datastore with maximum free space - will be chosen to store the image. - - If the datacenter path or datastore name contains a colon (:) symbol, it - must be escaped with a backslash. - -``vmware_api_retry_count=TIMES`` - Optional. Default: ``10`` - - Can only be specified in configuration files. - - The number of times VMware ESX/VC server API must be - retried upon connection related issues. - -``vmware_task_poll_interval=SECONDS`` - Optional. Default: ``5`` - - Can only be specified in configuration files. - - The interval used for polling remote tasks invoked on VMware ESX/VC server. - -``vmware_store_image_dir`` - Optional. Default: ``/openstack_glance`` - - Can only be specified in configuration files. - - The path to access the folder where the images will be stored in the datastore. - -``vmware_api_insecure=ON_OFF`` - Optional. Default: ``False`` - - Can only be specified in configuration files. - - Allow to perform insecure SSL requests to ESX/VC server. - -Configuring the Storage Endpoint -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``swift_store_endpoint=URL`` - Optional. Default: ``None`` - - Can only be specified in configuration files. - - Overrides the storage URL returned by auth. The URL should include the - path up to and excluding the container. The location of an object is - obtained by appending the container and object to the configured URL. - e.g. ``https://www.my-domain.com/v1/path_up_to_container`` - -Configuring Glance Image Size Limit ------------------------------------ - -The following configuration option is specified in the -``glance-api.conf`` configuration file in the section ``[DEFAULT]``. - -``image_size_cap=SIZE`` - Optional. Default: ``1099511627776`` (1 TB) - - Maximum image size, in bytes, which can be uploaded through the Glance API server. - - **IMPORTANT NOTE**: this value should only be increased after careful consideration - and must be set to a value under 8 EB (9223372036854775808). - -Configuring Glance User Storage Quota -------------------------------------- - -The following configuration option is specified in the -``glance-api.conf`` configuration file in the section ``[DEFAULT]``. - -``user_storage_quota`` - Optional. Default: 0 (Unlimited). - - This value specifies the maximum amount of storage that each user can use - across all storage systems. Optionally unit can be specified for the value. - Values are accepted in B, KB, MB, GB or TB which are for Bytes, KiloBytes, - MegaBytes, GigaBytes and TeraBytes respectively. Default unit is Bytes. - - Example values would be, - user_storage_quota=20GB - -Configuring the Image Cache ---------------------------- - -Glance API servers can be configured to have a local image cache. Caching of -image files is transparent and happens using a piece of middleware that can -optionally be placed in the server application pipeline. - -This pipeline is configured in the PasteDeploy configuration file, --paste.ini. You should not generally have to edit this file -directly, as it ships with ready-made pipelines for all common deployment -flavors. - -Enabling the Image Cache Middleware -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable the image cache middleware, the cache middleware must occur in -the application pipeline **after** the appropriate context middleware. - -The cache middleware should be in your ``glance-api-paste.ini`` in a section -titled ``[filter:cache]``. It should look like this:: - - [filter:cache] - paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory - -A ready-made application pipeline including this filter is defined in -the ``glance-api-paste.ini`` file, looking like so:: - - [pipeline:glance-api-caching] - pipeline = versionnegotiation context cache apiv1app - -To enable the above application pipeline, in your main ``glance-api.conf`` -configuration file, select the appropriate deployment flavor like so:: - - [paste_deploy] - flavor = caching - -Enabling the Image Cache Management Middleware -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There is an optional ``cachemanage`` middleware that allows you to -directly interact with cache images. Use this flavor in place of the -``cache`` flavor in your API configuration file. There are three types you -can chose: ``cachemanagement``, ``keystone+cachemanagement`` and -``trusted-auth+cachemanagement``.:: - - [paste_deploy] - flavor = keystone+cachemanagement - -Configuration Options Affecting the Image Cache -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. note:: - - These configuration options must be set in both the glance-cache - and glance-api configuration files. - - -One main configuration file option affects the image cache. - -``image_cache_dir=PATH`` - Required when image cache middleware is enabled. - - Default: ``/var/lib/glance/image-cache`` - - This is the base directory the image cache can write files to. - Make sure the directory is writable by the user running the - ``glance-api`` server - -``image_cache_driver=DRIVER`` - Optional. Choice of ``sqlite`` or ``xattr`` - - Default: ``sqlite`` - - The default ``sqlite`` cache driver has no special dependencies, other - than the ``python-sqlite3`` library, which is installed on virtually - all operating systems with modern versions of Python. It stores - information about the cached files in a SQLite database. - - The ``xattr`` cache driver required the ``python-xattr>=0.6.0`` library - and requires that the filesystem containing ``image_cache_dir`` have - access times tracked for all files (in other words, the noatime option - CANNOT be set for that filesystem). In addition, ``user_xattr`` must be - set on the filesystem's description line in fstab. Because of these - requirements, the ``xattr`` cache driver is not available on Windows. - -``image_cache_sqlite_db=DB_FILE`` - Optional. - - Default: ``cache.db`` - - When using the ``sqlite`` cache driver, you can set the name of the database - that will be used to store the cached images information. The database - is always contained in the ``image_cache_dir``. - -``image_cache_max_size=SIZE`` - Optional. - - Default: ``10737418240`` (10 GB) - - Size, in bytes, that the image cache should be constrained to. Images files - are cached automatically in the local image cache, even if the writing of that - image file would put the total cache size over this size. The - ``glance-cache-pruner`` executable is what prunes the image cache to be equal - to or less than this value. The ``glance-cache-pruner`` executable is designed - to be run via cron on a regular basis. See more about this executable in - :ref:`Controlling the Growth of the Image Cache ` - -.. _configuring-the-glance-registry: - -Configuring the Glance Registry -------------------------------- - -There are a number of configuration options in Glance that control how -this registry server operates. These configuration options are specified in the -``glance-registry.conf`` configuration file in the section ``[DEFAULT]``. - -**IMPORTANT NOTE**: The glance-registry service is only used in conjunction -with the glance-api service when clients are using the v1 REST API. See -`Configuring Glance APIs`_ for more info. - -``sql_connection=CONNECTION_STRING`` (``--sql-connection`` when specified -on command line) - - Optional. Default: ``None`` - - Can be specified in configuration files. Can also be specified on the - command-line for the ``glance-manage`` program. - - Sets the SQLAlchemy connection string to use when connecting to the registry - database. Please see the documentation for - `SQLAlchemy connection strings `_ - online. You must urlencode any special characters in CONNECTION_STRING. - -``sql_timeout=SECONDS`` - - Optional. Default: ``3600`` - - Can only be specified in configuration files. - - Sets the number of seconds after which SQLAlchemy should reconnect to the - datastore if no activity has been made on the connection. - -``enable_v1_registry=`` - - Optional. Default: ``True`` - -``enable_v2_registry=`` - - Optional. Default: ``True`` - - Defines which version(s) of the Registry API will be enabled. - If the Glance API server parameter ``enable_v1_api`` has been set to ``True`` the - ``enable_v1_registry`` has to be ``True`` as well. - If the Glance API server parameter ``enable_v2_api`` has been - set to ``True`` and the parameter ``data_api`` has been set to - ``glance.db.registry.api`` the ``enable_v2_registry`` has to be set to ``True`` - - -Configuring Notifications -------------------------- - -Glance can optionally generate notifications to be logged or sent to a message -queue. The configuration options are specified in the ``glance-api.conf`` -configuration file. - -``[oslo_messaging_notifications]/driver`` - - Optional. Default: ``noop`` - - Sets the notification driver used by oslo.messaging. Options include - ``messaging``, ``messagingv2``, ``log`` and ``routing``. - - **NOTE** - In M release, the``[DEFAULT]/notification_driver`` option has been deprecated in favor - of ``[oslo_messaging_notifications]/driver``. - - For more information see :ref:`Glance notifications ` and - `oslo.messaging `_. - -``[DEFAULT]/disabled_notifications`` - - Optional. Default: ``[]`` - - List of disabled notifications. A notification can be given either as a - notification type to disable a single event, or as a notification group prefix - to disable all events within a group. - - Example: if this config option is set to ["image.create", "metadef_namespace"], - then "image.create" notification will not be sent after image is created and - none of the notifications for metadefinition namespaces will be sent. - -Configuring Glance Property Protections ---------------------------------------- - -Access to image meta properties may be configured using a -:ref:`Property Protections Configuration file `. The -location for this file can be specified in the ``glance-api.conf`` -configuration file in the section ``[DEFAULT]``. **If an incorrect value is -specified, glance API service will not start.** - -``property_protection_file=PATH`` - - Optional. Default: not enabled. - - If property_protection_file is set, the file may use either roles or policies - to specify property protections. - -``property_protection_rule_format=`` - - Optional. Default: ``roles``. - -Configuring Glance APIs ------------------------ - -The glance-api service implements versions 1 and 2 of -the OpenStack Images API. Disable any version of -the Images API using the following options: - -``enable_v1_api=`` - - Optional. Default: ``True`` - -``enable_v2_api=`` - - Optional. Default: ``True`` - - **IMPORTANT NOTE**: To use v2 registry in v2 API, you must set - ``data_api`` to glance.db.registry.api in glance-api.conf. - -Configuring Glance Tasks ------------------------- - -Glance Tasks are implemented only for version 2 of the OpenStack Images API. - -The config value ``task_time_to_live`` is used to determine how long a task -would be visible to the user after transitioning to either the ``success`` or -the ``failure`` state. - -``task_time_to_live=`` - - Optional. Default: ``48`` - - The config value ``task_executor`` is used to determine which executor - should be used by the Glance service to process the task. The currently - available implementation is: ``taskflow``. - -``task_executor=`` - - Optional. Default: ``taskflow`` - - The ``taskflow`` engine has its own set of configuration options, - under the ``taskflow_executor`` section, that can be tuned to improve - the task execution process. Among the available options, you may find - ``engine_mode`` and ``max_workers``. The former allows for selecting - an execution model and the available options are ``serial``, - ``parallel`` and ``worker-based``. The ``max_workers`` option, - instead, allows for controlling the number of workers that will be - instantiated per executor instance. - - The default value for the ``engine_mode`` is ``parallel``, whereas - the default number of ``max_workers`` is ``10``. - -Configuring Glance performance profiling ----------------------------------------- - -Glance supports using osprofiler to trace the performance of each key internal -handling, including RESTful API calling, DB operation and etc. - -``Please be aware that Glance performance profiling is currently a work in -progress feature.`` Although, some trace points is available, e.g. API -execution profiling at wsgi main entry and SQL execution profiling at DB -module, the more fine-grained trace point is being worked on. - -The config value ``enabled`` is used to determine whether fully enable -profiling feature for glance-api and glance-registry service. - -``enabled=`` - - Optional. Default: ``False`` - - There is one more configuration option that needs to be defined to enable - Glance services profiling. The config value ``hmac_keys`` is used for - encrypting context data for performance profiling. - -``hmac_keys=`` - - Optional. Default: ``SECRET_KEY`` - - **IMPORTANT NOTE**: in order to make profiling work as designed operator needs - to make those values of HMAC key be consistent for all services in their - deployment. Without HMAC key the profiling will not be triggered even profiling - feature is enabled. - - **IMPORTANT NOTE**: previously HMAC keys (as well as enabled parameter) were - placed at `/etc/glance/api-paste.ini` and `/etc/glance/registry-paste.ini` files - for Glance API and Glance Registry services respectively. Starting with - osprofiler 0.3.1 release there is no need to set these arguments in the - `*-paste.ini` files. This functionality is still supported, although the - config values are having larger priority. - - The config value ``trace_sqlalchemy`` is used to determine whether fully enable - sqlalchemy engine based SQL execution profiling feature for glance-api and - glance-registry services. - -``trace_sqlalchemy=`` - - Optional. Default: ``False`` - -Configuring Glance public endpoint ----------------------------------- - -This setting allows an operator to configure the endpoint URL that will -appear in the Glance "versions" response (that is, the response to -``GET /``\ ). This can be necessary when the Glance API service is run -behind a proxy because the default endpoint displayed in the versions -response is that of the host actually running the API service. If -Glance is being run behind a load balancer, for example, direct access -to individual hosts running the Glance API may not be allowed, hence the -load balancer URL would be used for this value. - -``public_endpoint=`` - - Optional. Default: ``None`` - -Configuring Glance digest algorithm ------------------------------------ - -Digest algorithm that will be used for digital signature. The default -is sha256. Use the command:: - - openssl list-message-digest-algorithms - -to get the available algorithms supported by the version of OpenSSL on the -platform. Examples are "sha1", "sha256", "sha512", etc. If an invalid -digest algorithm is configured, all digital signature operations will fail and -return a ValueError exception with "No such digest method" error. - -``digest_algorithm=`` - - Optional. Default: ``sha256`` - -Configuring http_keepalive option ---------------------------------- - -``http_keepalive=`` - - If False, server will return the header "Connection: close", If True, server - will return "Connection: Keep-Alive" in its responses. In order to close the - client socket connection explicitly after the response is sent and read - successfully by the client, you simply have to set this option to False when - you create a wsgi server. - -Configuring the Health Check ----------------------------- - -This setting allows an operator to configure the endpoint URL that will -provide information to load balancer if given API endpoint at the node should -be available or not. Both Glance API and Glance Registry servers can be -configured to expose a health check URL. - -To enable the health check middleware, it must occur in the beginning of the -application pipeline. - -The health check middleware should be placed in your -``glance-api-paste.ini`` / ``glance-registry-paste.ini`` in a section -titled ``[filter:healthcheck]``. It should look like this:: - - [filter:healthcheck] - paste.filter_factory = oslo_middleware:Healthcheck.factory - backends = disable_by_file - disable_by_file_path = /etc/glance/healthcheck_disable - -A ready-made application pipeline including this filter is defined e.g. in -the ``glance-api-paste.ini`` file, looking like so:: - - [pipeline:glance-api] - pipeline = healthcheck versionnegotiation osprofiler unauthenticated-context rootapp - -For more information see -`oslo.middleware `_. - -Configuring supported disk formats ----------------------------------- - -Each image in Glance has an associated disk format property. -When creating an image the user specifies a disk format. They must -select a format from the set that the Glance service supports. This -supported set can be seen by querying the ``/v2/schemas/images`` resource. -An operator can add or remove disk formats to the supported set. This is -done by setting the ``disk_formats`` parameter which is found in the -``[image_formats]`` section of ``glance-api.conf``. - -``disk_formats=`` - - Optional. Default: ``ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,ploop`` diff --git a/doc/source/configuration/glance_api.rst b/doc/source/configuration/glance_api.rst deleted file mode 100644 index c8081873..00000000 --- a/doc/source/configuration/glance_api.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. _glance-api.conf: - ---------------- -glance-api.conf ---------------- - -.. show-options:: - :config-file: etc/oslo-config-generator/glance-api.conf diff --git a/doc/source/configuration/glance_cache.rst b/doc/source/configuration/glance_cache.rst deleted file mode 100644 index 6127bae3..00000000 --- a/doc/source/configuration/glance_cache.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. _glance-cache.conf: - ------------------ -glance-cache.conf ------------------ - -.. show-options:: - :config-file: etc/oslo-config-generator/glance-cache.conf diff --git a/doc/source/configuration/glance_manage.rst b/doc/source/configuration/glance_manage.rst deleted file mode 100644 index 48f7aa37..00000000 --- a/doc/source/configuration/glance_manage.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. _glance-manage.conf: - ------------------- -glance-manage.conf ------------------- - -.. show-options:: - :config-file: etc/oslo-config-generator/glance-manage.conf diff --git a/doc/source/configuration/glance_registry.rst b/doc/source/configuration/glance_registry.rst deleted file mode 100644 index 2e71205b..00000000 --- a/doc/source/configuration/glance_registry.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _glance-registry.conf: - --------------------- -glance-registry.conf --------------------- - -This configuration file controls how the register server operates. More -information can be found in :ref:`configuring-the-glance-registry`. - -.. show-options:: - :config-file: etc/oslo-config-generator/glance-registry.conf diff --git a/doc/source/configuration/glance_scrubber.rst b/doc/source/configuration/glance_scrubber.rst deleted file mode 100644 index 9e31808e..00000000 --- a/doc/source/configuration/glance_scrubber.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. _glance-scrubber.conf: - --------------------- -glance-scrubber.conf --------------------- - -.. show-options:: - :config-file: etc/oslo-config-generator/glance-scrubber.conf diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst deleted file mode 100644 index f92afbc3..00000000 --- a/doc/source/configuration/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _configuring: - -============================ -Glance Configuration Options -============================ - -This section provides a list of all possible options for each -configuration file. Refer to :ref:`basic-configuration` for a -detailed guide in getting started with various option settings. - -Glance uses the following configuration files for its various services. - -.. toctree:: - :glob: - :maxdepth: 1 - - * diff --git a/doc/source/configuration/sample-configuration.rst b/doc/source/configuration/sample-configuration.rst deleted file mode 100644 index f324909b..00000000 --- a/doc/source/configuration/sample-configuration.rst +++ /dev/null @@ -1,54 +0,0 @@ -.. _sample-configuration: - -=========================== -Glance Sample Configuration -=========================== - -The following are sample configuration files for all Glance services and -utilities. These are generated from code and reflect the current state of code -in the Glance repository. - - -Sample configuration for Glance API ------------------------------------ - -This sample configuration can also be viewed in `glance-api.conf.sample -<../_static/glance-api.conf.sample>`_. - -.. literalinclude:: ../_static/glance-api.conf.sample - - -Sample configuration for Glance Registry ----------------------------------------- - -This sample configuration can also be viewed in `glance-registry.conf.sample -<../_static/glance-registry.conf.sample>`_. - -.. literalinclude:: ../_static/glance-registry.conf.sample - - -Sample configuration for Glance Scrubber ----------------------------------------- - -This sample configuration can also be viewed in `glance-scrubber.conf.sample -<../_static/glance-scrubber.conf.sample>`_. - -.. literalinclude:: ../_static/glance-scrubber.conf.sample - - -Sample configuration for Glance Manage --------------------------------------- - -This sample configuration can also be viewed in `glance-manage.conf.sample -<../_static/glance-manage.conf.sample>`_. - -.. literalinclude:: ../_static/glance-manage.conf.sample - - -Sample configuration for Glance Cache -------------------------------------- - -This sample configuration can also be viewed in `glance-cache.conf.sample -<../_static/glance-cache.conf.sample>`_. - -.. literalinclude:: ../_static/glance-cache.conf.sample diff --git a/doc/source/contributor/architecture.rst b/doc/source/contributor/architecture.rst deleted file mode 100644 index a587e84d..00000000 --- a/doc/source/contributor/architecture.rst +++ /dev/null @@ -1,83 +0,0 @@ -.. - Copyright 2015 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================== -Basic architecture -================== - -OpenStack Glance has a client-server architecture that provides a REST API -to the user through which requests to the server can be performed. - -A Glance Domain Controller manages the internal server operations -that is divided into layers. Specific tasks are implemented -by each layer. - -All the file (Image data) operations are performed using -glance_store library, which is responsible for interaction with external -storage back ends and (or) local filesystem(s). The glance_store library -provides a uniform interface to access the backend stores. - -Glance uses a central database (Glance DB) that is shared amongst all -the components in the system and is sql-based by default. Other types -of database backends are somewhat supported and used by operators -but are not extensively tested upstream. - -.. figure:: ../images/architecture.png - :figwidth: 100% - :align: center - :alt: OpenStack Glance Architecture Diagram. - Consists of 5 main blocks: "Client" "Glance" "Keystone" - "Glance Store" and "Supported Storages". - Glance block exposes a REST API. The REST API makes use of the - AuthZ Middleware and a Glance Domain Controller, which contains - Auth, Notifier, Policy, Quota, Location and DB. The Glance Domain - Controller makes use of the Glance Store (which is external to the - Glance block), and (still within the Glance block) it makes use of - the Database Abstraction Layer, and (optionally) the Registry Layer. - The Registry Layer makes use of the Database Abstraction Layer. The - Database abstraction layer exclusively makes use of the Glance - Database. - The Client block makes use of the Rest API (which exists in the - Glance block) and the Keystone block. - The Glance Store block contains AuthN which makes use of the - Keystone block, and it also contains Glance Store Drivers, which - exclusively makes use of each of the storage systems in the - Supported Storages block. Within the Supported Storages block, - there exist the following storage systems, none of which make use - of anything else: Filesystem, Swift, Ceph, "ellipses", Sheepdog. - A complete list is given by the currently available drivers in - glance_store/_drivers. - -.. centered:: Image 1. OpenStack Glance Architecture - -Following components are present in the Glance architecture: - -* **A client** - any application that makes use of a Glance server. - -* **REST API** - Glance functionalities are exposed via REST. - -* **Database Abstraction Layer (DAL)** - an application programming interface - (API) that unifies the communication between Glance and databases. - -* **Glance Domain Controller** - middleware that implements the main - Glance functionalities such as authorization, notifications, policies, - database connections. - -* **Glance Store** - used to organize interactions between Glance and various - data stores. - -* **Registry Layer** - optional layer that is used to organise secure - communication between the domain and the DAL by using a separate service. diff --git a/doc/source/contributor/blueprints.rst b/doc/source/contributor/blueprints.rst deleted file mode 100644 index 5b8dbfe9..00000000 --- a/doc/source/contributor/blueprints.rst +++ /dev/null @@ -1,88 +0,0 @@ -Blueprints and Specs -==================== - -The Glance team uses the `glance-specs -`_ repository for its -specification reviews. Detailed information can be found `here -`_. Please also find -additional information in the reviews.rst file. - -The Glance team enforces a deadline for specs proposals. It's a soft -freeze that happens after the first milestone is cut and before the -second milestone is out. There's a freeze exception week that follows -the freeze week. A new proposal can still be submitted during this -period, but be aware that it will most likely be postponed unless a -particularly good argument is made in favor of having an exception for -it. - -Please note that we use a `template -`_ -for spec submissions. It is not required to fill out all sections in the -template. Review of the spec may require filling in information left out by -the submitter. - -Spec Notes ----------- - -There are occasions when a spec will be approved and the code will not land in -the cycle it was targeted at. For these cases, the work flow to get the spec -into the next release is as follows: - -* Anyone can propose a patch to glance-specs which moves a spec from the - previous release into the new release directory. - -.. NOTE: mention the `approved`, `implemented` dirs - -The specs which are moved in this way can be fast-tracked into the -next release. Please note that it is required to re-propose the spec -for the new release however and that it'll be evaluated based on the -resources available and cycle priorities. - -Glance Spec Lite ----------------- - -In Mitaka the team introduced the concept of lite specs. Lite specs -are small features tracked as Launchpad bugs, with status `wishlist` -and tagged with the new 'spec-lite' tag, and allow for the submission -and review of these feature requests before code is submitted. - -This allows for small features that don't warrant a detailed spec to -be proposed, evaluated, and worked on. The team evaluates these -requests as it evaluates specs. Once a bug has been approved as a -Request for Enhancement (RFE), it'll be targeted for a release. - -In Newton the team refined lite specs process away from wishlist bugs -and lite specs are hosted in the glance-specs repository in git. - -Dedicated lite-specs.rst is provided for each deliverable under the -release. - -For example Newton glance_store spec lite file can be found from: -glance-specs/specs/newton/approved/glance_store/lite-specs.rst - -This file includes template section and new lite-specs should be -included to the same file under the template. So do not copy the lite- -specs.rst to a new file like you would do with full spec, but just -propose change to that file itself filling the parts of the template -you need. - -Existing WISHLIST 'spec-lite' tagged bug will be honored as lite spec, -new ones will not be considered unless filed in glance-spec repo. - - -Lite spec Submission Guidelines -------------------------------- - -Before we dive into the guidelines for writing a good lite spec, it is -worth mentioning that depending on your level of engagement with the -Glance project and your role (user, developer, deployer, operator, -etc.), you are more than welcome to have a preliminary discussion of a -potential lite spec by reaching out to other people involved in the -project. This usually happens by posting mails on the relevant mailing -lists (e.g. `openstack-dev `_ - include -[glance] in the subject) or on #openstack-glance IRC channel on -Freenode. If current ongoing code reviews are related to your feature, -posting comments/questions on gerrit may also be a way to engage. Some -amount of interaction with Glance developers will give you an idea of -the plausibility and form of your lite spec before you submit it. That -said, this is not mandatory. diff --git a/doc/source/contributor/database_architecture.rst b/doc/source/contributor/database_architecture.rst deleted file mode 100644 index 8a4d2d0e..00000000 --- a/doc/source/contributor/database_architecture.rst +++ /dev/null @@ -1,261 +0,0 @@ -.. - Copyright 2015 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============================ -Glance database architecture -============================ - -Glance Database Public API -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Glance Database API contains several methods for moving image metadata to -and from persistent storage. You can find a list of public methods grouped by -category below. - -Common parameters for image methods ------------------------------------ - -The following parameters can be applied to all of the image methods below: - - ``context`` — corresponds to a glance.context.RequestContext - object, which stores the information on how a user accesses - the system, as well as additional request information. - - ``image_id`` — a string corresponding to the image identifier. - - ``memb_id`` — a string corresponding to the member identifier - of the image. - -Image basic methods -------------------- - -**Image processing methods:** - -#. ``image_create(context, values)`` — creates a new image record - with parameters listed in the *values* dictionary. Returns a - dictionary representation of a newly created - *glance.db.sqlalchemy.models.Image* object. -#. ``image_update(context, image_id, values, purge_props=False, - from_state=None)`` — updates the existing image with the identifier - *image_id* with the values listed in the *values* dictionary. Returns a - dictionary representation of the updated *Image* object. - - Optional parameters are: - - ``purge_props`` — a flag indicating that all the existing - properties not listed in the *values['properties']* should be - deleted; - - ``from_state`` — a string filter indicating that the updated - image must be in the specified state. - -#. ``image_destroy(context, image_id)`` — deletes all database - records of an image with the identifier *image_id* (like tags, - properties, and members) and sets a 'deleted' status on all the - image locations. -#. ``image_get(context, image_id, force_show_deleted=False)`` — - gets an image with the identifier *image_id* and returns its - dictionary representation. The parameter *force_show_deleted* is - a flag that indicates to show image info even if it was - 'deleted', or its 'pending_delete' statuses. -#. ``image_get_all(context, filters=None, marker=None, limit=None, - sort_key=None, sort_dir=None, member_status='accepted', - is_public=None, admin_as_user=False, return_tag=False)`` — gets - all the images that match zero or more filters. - - Optional parameters are: - - ``filters`` — dictionary of filter keys and values. If a 'properties' - key is present, it is treated as a dictionary of key/value filters in - the attribute of the image properties. - - ``marker`` — image id after which a page should start. - - ``limit`` — maximum number of images to return. - - ``sort_key`` — list of image attributes by which results should - be sorted. - - ``sort_dir`` — direction in which results should be sorted - (asc, desc). - - ``member_status`` — only returns shared images that have this - membership status. - - ``is_public`` — if true, returns only public images. If false, - returns only private and shared images. - - ``admin_as_user`` — for backwards compatibility. If true, an admin - sees the same set of images that would be seen by a regular user. - - ``return_tag`` — indicates whether an image entry in the result - includes its relevant tag entries. This can improve upper-layer - query performance and avoid using separate calls. - -Image location methods ----------------------- - -**Image location processing methods:** - -#. ``image_location_add(context, image_id, location)`` — - adds a new location to an image with the identifier *image_id*. This - location contains values listed in the dictionary *location*. -#. ``image_location_update(context, image_id, location)`` — updates - an existing location with the identifier *location['id']* - for an image with the identifier *image_id* with values listed in - the dictionary *location*. -#. ``image_location_delete(context, image_id, location_id, status, - delete_time=None)`` — sets a 'deleted' or 'pending_delete' - *status* to an existing location record with the identifier - *location_id* for an image with the identifier *image_id*. - -Image property methods ----------------------- - -.. warning:: There is no public property update method. - So if you want to modify it, you have to delete it first - and then create a new one. - -**Image property processing methods:** - -#. ``image_property_create(context, values)`` — creates - a property record with parameters listed in the *values* dictionary - for an image with *values['id']*. Returns a dictionary representation - of a newly created *ImageProperty* object. -#. ``image_property_delete(context, prop_ref, image_ref)`` — deletes an - existing property record with a name *prop_ref* for an image with - the identifier *image_ref*. - -Image member methods --------------------- - -**Methods to handle image memberships:** - -#. ``image_member_create(context, values)`` — creates a member record - with properties listed in the *values* dictionary for an image - with *values['id']*. Returns a dictionary representation - of a newly created *ImageMember* object. -#. ``image_member_update(context, memb_id, values)`` — updates an - existing member record with properties listed in the *values* - dictionary for an image with *values['id']*. Returns a dictionary - representation of an updated member record. -#. ``image_member_delete(context, memb_id)`` — deletes an existing - member record with *memb_id*. -#. ``image_member_find(context, image_id=None, member=None, status=None)`` - — returns all members for a given context with optional image - identifier (*image_id*), member name (*member*), and member status - (*status*) parameters. -#. ``image_member_count(context, image_id)`` — returns a number of image - members for an image with *image_id*. - -Image tag methods ------------------ - -**Methods to process images tags:** - -#. ``image_tag_set_all(context, image_id, tags)`` — changes all the - existing tags for an image with *image_id* to the tags listed - in the *tags* param. To remove all tags, a user just should provide - an empty list. -#. ``image_tag_create(context, image_id, value)`` — adds a *value* - to tags for an image with *image_id*. Returns the value of a - newly created tag. -#. ``image_tag_delete(context, image_id, value)`` — removes a *value* - from tags for an image with *image_id*. -#. ``image_tag_get_all(context, image_id)`` — returns a list of tags - for a specific image. - -Image info methods ------------------- - -The next two methods inform a user about his or her ability to modify -and view an image. The *image* parameter here is a dictionary representation -of an *Image* object. - -#. ``is_image_mutable(context, image)`` — informs a user - about the possibility to modify an image with the given context. - Returns True if the image is mutable in this context. -#. ``is_image_visible(context, image, status=None)`` — informs about - the possibility to see the image details with the given context - and optionally with a status. Returns True if the image is visible - in this context. - -**Glance database schema** - -.. figure:: ../images/glance_db.png - :figwidth: 100% - :align: center - :alt: The glance database schema is depicted by 5 tables. - The table named Images has the following columns: - id: varchar(36); - name: varchar(255), nullable; - size: bigint(20), nullable; - status: varchar(30); - is_public: tinyint(1); - created_at: datetime; - updated_at: datetime, nullable; - deleted_at: datetime, nullable; - deleted: tinyint(1); - disk_format: varchar(20), nullable; - container_format: varchar(20), nullable; - checksum: varchar(32), nullable; - owner: varchar(255), nullable - min_disk: int(11); - min_ram: int(11); - protected: tinyint(1); and - virtual_size: bigint(20), nullable;. - The table named image_locations has the following columns: - id: int(11), primary; - image_id: varchar(36), refers to column named id in table Images; - value: text; - created_at: datetime; - updated_at: datetime, nullable; - deleted_at: datetime, nullable; - deleted: tinyint(1); - meta_data: text, nullable; and - status: varchar(30);. - The table named image_members has the following columns: - id: int(11), primary; - image_id: varchar(36), refers to column named id in table Images; - member: varchar(255); - can_share: tinyint(1); - created_at: datetime; - updated_at: datetime, nullable; - deleted_at: datetime, nullable; - deleted: tinyint(1); and - status: varchar(20;. - The table named image_tags has the following columns: - id: int(11), primary; - image_id: varchar(36), refers to column named id in table Images; - value: varchar(255); - created_at: datetime; - updated_at: datetime, nullable; - deleted_at: datetime, nullable; and - deleted: tinyint(1);. - The table named image_properties has the following columns: - id: int(11), primary; - image_id: varchar(36), refers to column named id in table Images; - name: varchar(255); - value: text, nullable; - created_at: datetime; - updated_at: datetime, nullable; - deleted_at: datetime, nullable; and - deleted: tinyint(1);. - - -.. centered:: Image 1. Glance images DB schema - - -Glance Database Backends -~~~~~~~~~~~~~~~~~~~~~~~~ - -Migration Backends ------------------- - -.. list-plugins:: glance.database.migration_backend - :detailed: - -Metadata Backends ------------------ - -.. list-plugins:: glance.database.metadata_backend - :detailed: diff --git a/doc/source/contributor/database_migrations.rst b/doc/source/contributor/database_migrations.rst deleted file mode 100644 index fe68202a..00000000 --- a/doc/source/contributor/database_migrations.rst +++ /dev/null @@ -1,347 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -====================================================== -Writing Database Migrations for Zero-Downtime Upgrades -====================================================== - -Beginning in Ocata, OpenStack Glance uses Alembic, which replaced SQLAlchemy -Migrate as the database migration engine. Moving to Alembic is particularly -motivated by the zero-downtime upgrade work. Refer to [GSPEC1]_ and [GSPEC2]_ -for more information on zero-downtime upgrades in Glance and why a move to -Alembic was deemed necessary. - -Stop right now and go read [GSPEC1]_ and [GSPEC2]_ if you haven't done so -already. Those documents explain the strategy Glance has approved for database -migrations, and we expect you to be familiar with them in what follows. This -document focuses on the "how", but unless you understand the "what" and "why", -you'll be wasting your time reading this document. - -Prior to Ocata, database migrations were conceived as monoliths. Thus, they -did not need to carefully distinguish and manage database schema expansions, -data migrations, or database schema contractions. The modern database -migrations are more sensitive to the characteristics of changes being -attempted and thus we clearly identify three phases of a database migration: -(1) expand, (2) migrate, and (3) contract. A developer modifying the Glance -database must supply a script for each of these phases. - -Here's a quick reminder of what each phase entails. -For more information, see [GSPEC1]_. - -Expand - Expand migrations MUST be additive in nature. Expand migrations - should be seen as the minimal set of schema changes required by the new - services that can be applied while the old services are still running. - Expand migrations should optionally include temporary database triggers that - keep the old and new columns in sync. If a database change needs data to be - migrated between columns, then temporary database triggers are required to - keep the columns in sync while the data migrations are in-flight. - - .. note:: - Sometimes there could be an exception to the additive-only change - strategy for expand phase. It is described more elaborately in [GSPEC1]_. - Again, consider this as a last reminder to read [GSPEC1]_, if you haven't - already done so. - -Migrate - Data migrations MUST NOT attempt any schema changes and only move existing - data between old and new columns such that new services can start consuming - the new tables and/or columns introduced by the expand migrations. - -Contract - Contract migrations usually include the remaining schema changes required by - the new services that couldn't be applied during expand phase due to their - incompatible nature with the old services. Any temporary database triggers - added during the expand migrations MUST be dropped with contract migrations. - - -Alembic Migrations -================== -As mentioned earlier, starting in Ocata Glance database migrations must be -written for Alembic. All existing Glance migrations have been ported to -Alembic. They can be found here [GMIGS1]_. - - -Schema Migrations (Expand/Contract) ------------------------------------ - -* All Glance schema migrations must reside in - ``glance.db.sqlalchemy.alembic_migrations.versions`` package - -* Every Glance schema migration must be a python module with the following - structure - - .. code:: - - """ - - Revision ID: - Revises: - """ - - - - revision = - down_revision = - depends_on = - - def upgrade(): - - - - Identifiers ``revision``, ``down_revision`` and ``depends_on`` are - elaborated below. - -* The ``revision`` identifier is a unique revision id for every migration. - It must conform to one of the following naming schemes. - - All monolith migrations must conform to: - - .. code:: - - - - - And, all expand/contract migrations must conform to: - - .. code:: - - _[expand|contract] - - - Example: - - .. code:: - - Monolith migration: ocata01 - Expand migration: ocata_expand01 - Contract migration: ocata_contract01 - - This name convention is devised with an intention to easily understand the - migration sequence. While the ```` mentions the release a - migration belongs to, the ```` helps - identify the order of migrations within each release. For modern migrations, - the ``[expand|contract]`` part of the revision id helps identify the - revision branch a migration belongs to. - -* The ``down_revision`` identifier MUST be specified for all Alembic migration - scripts. It points to the previous migration (or ``revision`` in Alembic - lingo) on which the current migration is based. This essentially - establishes a migration sequence very much a like a singly linked list would - (except that we use a ``previous`` link here instead of the more traditional - ``next`` link.) - - The very first migration, ``liberty`` in our case, would have - ``down_revision`` set to ``None``. All other migrations must point to the - last migration in the sequence at the time of writing the migration. - - For example, Glance has two migrations in Mitaka, namely, ``mitaka01`` - and ``mitaka02``. The migration sequence for Mitaka should look like: - - .. code:: - - liberty - ^ - | - | - mitaka01 - ^ - | - | - mitaka02 - -* The ``depends_on`` identifier helps establish dependencies between two - migrations. If a migration ``X`` depends on running migration ``Y`` first, - then ``X`` is said to depend on ``Y``. This could be specified in the - migration as shown below: - - .. code:: - - revision = 'X' - down_revision = 'W' - depends_on = 'Y' - - Naturally, every migration depends on the migrations preceding it in the - migration sequence. Hence, in a typical branch-less migration sequence, - ``depends_on`` is of limited use. However, this could be useful for - migration sequences with branches. We'll see more about this in the next - section. - -* All schema migration scripts must adhere to the naming convention - mentioned below: - - .. code:: - - _.py - - Example: - - .. code:: - - Monolith migration: ocata01_add_visibility_remove_is_public.py - Expand migration: ocata_expand01_add_visibility.py - Contract migration: ocata_contract01_remove_is_public.py - - -Dependency Between Contract and Expand Migrations -------------------------------------------------- - -* To achieve zero-downtime upgrades, the Glance migration sequence has been - branched into ``expand`` and ``contract`` branches. As the name suggests, - the ``expand`` branch contains only the expand migrations and the - ``contract`` branch contains only the contract migrations. As per the - zero-downtime migration strategy, the expand migrations are run first - followed by contract migrations. To establish this dependency, we make the - contract migrations explicitly depend on their corresponding expand - migrations. Thus, running contract migrations without running expansions is - not possible. - - For example, the Community Images migration in Ocata includes the - experimental E-M-C migrations. The expand migration is ``ocata_expand01`` - and the contract migration is ``ocata_contract01``. The dependency is - established as below. - - .. code:: - - revision = 'ocata_contract01' - down_revision = 'mitaka02' - depends_on = 'ocata_expand01' - - - Every contract migration in Glance MUST depend on its corresponding expand - migration. Thus, the current Glance migration sequence looks as shown below: - - .. code:: - - liberty - ^ - | - | - mitaka01 - ^ - | - | - mitaka02 - ^ - | - +------------+------------+ - | | - | | - ocata_expand01 <------ ocata_contract01 - ^ ^ - | | - | | - pike_expand01 <------ pike_contract01 - - -Data Migrations ---------------- - -* All Glance data migrations must reside in - ``glance.db.sqlalchemy.alembic_migrations.data_migrations`` package. - -* The data migrations themselves are not Alembic migration scripts. And, hence - they don't require a unique revision id. However, they must adhere to a - similar naming convention discussed above. That is: - - .. code:: - - _migrate_.py - - Example: - - .. code:: - - Data Migration: ocata_migrate01_community_images.py - -* All data migrations modules must adhere to the following structure: - - .. code:: - - def has_migrations(engine): - - return - - - def migrate(engine): - - return - - -NOTES ------ - -* Starting in Ocata, Glance needs every database migration to include both - monolithic and Expand-Migrate-Contract (E-M-C) style migrations. At some - point in Pike, E-M-C migrations will be made default. At that point, it - would be no longer required to include monolithic migration script. - -* Alembic is a database migration engine written for SQLAlchemy. So, any - migration script written for SQLAlchemy Migrate should work with Alembic as - well provided the structural differences above (primarily adding - ``revision``, ``down_revision`` and ``depends_on``) are taken care of. - Moreover, it maybe easier to do certain operations with Alembic. - Refer to [ALMBC]_ for information on Alembic operations. - -* A given database change may not require actions in each of the expand, - migrate, contract phases, but nonetheless, we require a script for *each* - phase for *every* change. In the case where an action is not required, a - ``no-op`` script, described below, MUST be used. - - For instance, if a database migration is completely contractive in nature, - say removing a column, there won't be a need for expand and migrate - operations. But, including a ``no-op`` expand and migrate scripts will make - it explicit and also preserve the one-to-one correspondence between expand, - migrate and contract scripts. - - A no-op expand/contract Alembic migration: - - .. code:: - - - """An example empty Alembic migration script - - Revision ID: foo02 - Revises: foo01 - """ - - revision = foo02 - down_revision = foo01 - - def upgrade(): - pass - - - A no-op migrate script: - - .. code:: - - """An example empty data migration script""" - - def has_migrations(engine): - return False - - - def migrate(engine): - return 0 - -References -========== -.. [GSPEC1] `Database Strategy for Rolling Upgrades - `_ -.. [GSPEC2] `Glance Alembic Migrations Spec - `_ -.. [GMIGS1] `Glance Alembic Migrations Implementation - `_ -.. [ALMBC] `Alembic Operations `_ diff --git a/doc/source/contributor/documentation.rst b/doc/source/contributor/documentation.rst deleted file mode 100644 index 51c736f3..00000000 --- a/doc/source/contributor/documentation.rst +++ /dev/null @@ -1,117 +0,0 @@ -Documentation -============= - -Tips for Doc Writers (and Developers, too!) -------------------------------------------- - -Here are some useful tips about questions that come up a lot but aren't always -easy to find answers to. - -* Make example URLs consistent - - For consistency, example URLs for openstack components are in the form: - - .. code:: - - project.openstack.example.org - - So, for example, an example image-list call to Glance would use a URL - written like this: - - .. code:: - - http://glance.openstack.example.org/v2/images - -Where to Contribute -------------------- - -There are a few different kinds of documentation associated with Glance to -which you may want to contribute: - -* Configuration - - As you read through the sample configuration files in the ``etc`` directory - in the source tree, you may find typographical errors, or grammatical - problems, or text that could use clarification. The Glance team welcomes - your contributions, but please note that the sample configuration files are - generated, not static text. Thus you must modify the source code where the - particular option you're correcting is defined and then re-generate the conf - file using ``tox -e genconfig``. - -* Glance's Documentation - - The Glance Documentation (what you're reading right now) lives in the source - code tree under ``doc/source``. It consists of information for developers - working on Glance, information for consumers of the OpenStack Images APIs - implemented by Glance, and information for operators deploying Glance. Thus - there's a wide range of documents to which you could contribute. - - Small improvements can simply be addressed by a patch, but it's probably a - good idea to first file a bug for larger changes so they can be tracked more - easily (especially if you plan to submit several different patches to address - the shortcoming). - -* User Guides - - There are several user guides published by the OpenStack Documentation Team. - Please see the README in their code repository for more information: - https://github.com/openstack/openstack-manuals - -* OpenStack API Reference - - There's a "quick reference" guide to the APIs implemented by Glance: - http://developer.openstack.org/api-ref/image/ - - The guide is generated from source files in the source code tree under - ``api-ref/source``. Corrections in spelling or typographical errors may be - addressed directly by a patch. If you note a divergence between the API - reference and the actual behavior of Glance, please file a bug before - submitting a patch. - - Additionally, now that the quick reference guides are being maintained by - each project (rather than a central team), you may note divergences in format - between the Glance guides and those of other teams. For example, some - projects may have adopted an informative new way to display error codes. If - you notice structural improvements that our API reference is missing, please - file a bug. And, of course, we would also welcome your patch implementing - the improvement! - -Release Notes -------------- - -Release notes are notes available for operators to get an idea what each -project has included and changed during a cycle. They may also include -various warnings and notices. - -Generating release notes is done with Reno. - -.. code-block:: bash - - $ tox -e venv -- reno new - -This will generate a yaml file in ``releasenotes/notes`` that will contain -instructions about how to fill in (or remove) the various sections of -the document. Modify the yaml file as appropriate and include it as -part of your commit. - -Commit your note to git (required for reno to pick it up): - -.. code-block:: bash - - $ git add releasenotes/notes/; git commit - -Once the release notes have been committed you can build them by using: - -.. code-block:: bash - - $ tox -e releasenotes - -This will create the HTML files under ``releasenotes/build/html/``. - -**NOTE**: The ``prelude`` section in the release notes is to highlight only the -important changes of the release. Please word your note accordingly and be -judicious when adding content there. We don't encourage extraneous notes and at -the same time we don't want to miss out on important ones. In short, not every -release note will need content in the ``prelude`` section. If what you're -working on required a spec, then a prelude is appropriate. If you're submitting -a bugfix, most likely not; a spec-lite is a judgement call. diff --git a/doc/source/contributor/domain_implementation.rst b/doc/source/contributor/domain_implementation.rst deleted file mode 100644 index 5b04a372..00000000 --- a/doc/source/contributor/domain_implementation.rst +++ /dev/null @@ -1,154 +0,0 @@ -.. - Copyright 2016 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================================== -Glance domain model implementation -================================== - -Gateway and basic layers -~~~~~~~~~~~~~~~~~~~~~~~~ - -The domain model contains the following layers: - -#. :ref:`authorization` -#. :ref:`property` -#. :ref:`notifier` -#. :ref:`policy` -#. :ref:`quota` -#. :ref:`location` -#. :ref:`database` - -The schema below shows a stack that contains the Image domain layers and -their locations: - -.. figure:: ../images/glance_layers.png - :figwidth: 100% - :align: center - :alt: From top to bottom, the stack consists of the Router and REST API, - which are above the domain implementation. The Auth, Property - Protection (optional), Notifier, Policy, Quota, - Location, and Database represent the domain implementation. - The Registry (optional) and Data Access sit below the domain - implementation. Further, the Client block calls the Router; - the Location block calls the Glance Store, and the Data Access - layer calls the DBMS. - Additional information conveyed in the image is the location in - the Glance code of the various components: - Router: api/v2/router.py - REST API: api/v2/* - Auth: api/authorization.py - Property Protection: api/property_protections.py - Notifier: notifier.py - Policy: api/policy.py - Quota: quota/__init__.py - Location: location.py - DB: db/__init__.py - Registry: registry/v2/* - Data Access: db/sqlalchemy/api.py - -.. _authorization: - -Authorization -------------- - -The first layer of the domain model provides a verification of whether an -image itself or its property can be changed. An admin or image owner can -apply the changes. The information about a user is taken from the request -``context`` and is compared with the image ``owner``. If the user cannot -apply a change, a corresponding error message appears. - -.. _property: - -Property protection -------------------- - -The second layer of the domain model is optional. It becomes available if you -set the ``property_protection_file`` parameter in the Glance configuration -file. - -There are two types of image properties in Glance: - -* *Core properties*, as specified in the image schema -* *Meta properties*, which are the arbitrary key/value pairs that can be added - to an image - -The property protection layer manages access to the meta properties -through Glance’s public API calls. You can restrict the access in the -property protection configuration file. - -.. _notifier: - -Notifier --------- - -On the third layer of the domain model, the following items are added to -the message queue: - -#. Notifications about all of the image changes -#. All of the exceptions and warnings that occurred while using an image - -.. _policy: - -Policy ------- - -The fourth layer of the domain model is responsible for: - -#. Defining access rules to perform actions with an image. The rules are - defined in the :file:`etc/policy.json` file. -#. Monitoring of the rules implementation. - -.. _quota: - -Quota ------ - -On the fifth layer of the domain model, if a user has an admin-defined size -quota for all of his uploaded images, there is a check that verifies whether -this quota exceeds the limit during an image upload and save: - -* If the quota does not exceed the limit, then the action to add an image - succeeds. -* If the quota exceeds the limit, then the action does not succeed and a - corresponding error message appears. - -.. _location: - -Location --------- - -The sixth layer of the domain model is used for interaction with the store via -the ``glance_store`` library, like upload and download, and for managing an -image location. On this layer, an image is validated before the upload. If -the validation succeeds, an image is written to the ``glance_store`` library. - -This sixth layer of the domain model is responsible for: - -#. Checking whether a location URI is correct when a new location is added -#. Removing image data from the store when an image location is changed -#. Preventing image location duplicates - -.. _database: - -Database --------- - -On the seventh layer of the domain model: - -* The methods to interact with the database API are implemented. -* Images are converted to the corresponding format to be recorded in the - database. And the information received from the database is - converted to an Image object. diff --git a/doc/source/contributor/domain_model.rst b/doc/source/contributor/domain_model.rst deleted file mode 100644 index b088ec93..00000000 --- a/doc/source/contributor/domain_model.rst +++ /dev/null @@ -1,290 +0,0 @@ -.. - Copyright 2015 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============ -Domain model -============ - -The main goal of a domain model is refactoring the logic around -object manipulation by splitting it to independent layers. Each -subsequent layer wraps the previous one creating an "onion" structure, -thus realizing a design pattern called "Decorator." The main feature -of domain model is to use a composition instead of inheritance or -basic decoration while building an architecture. This provides -flexibility and transparency of an internal organization for a developer, -because he does not know what layers are used and works with a domain -model object as with a common object. - -Inner architecture -~~~~~~~~~~~~~~~~~~ - -Each layer defines its own operations’ implementation through a -special ``proxy`` class. At first, operations are performed on the -upper layer, then they successively pass the control to the underlying -layers. - -The nesting of layers can be specified explicitly using a programmer -interface Gateway or implicitly using ``helper`` classes. Nesting -may also depend on various conditions, skipping or adding additional -layers during domain object creation. - -Proxies -~~~~~~~ - -The layer behavior is described in special ``proxy`` classes -that must provide exactly the same interface as the original class -does. In addition, each ``proxy`` class has a field ``base`` -indicating a lower layer object that is an instance of another -``proxy`` or ``original`` class. - -To access the rest of the fields, you can use special ``proxy`` -properties or universal methods ``set_property`` and ``get_property``. - -In addition, the ``proxy`` class must have an ``__init__`` format -method:: - - def __init__(self, base, helper_class=None, helper_kwargs=None, **kwargs) - -where ``base`` corresponds to the underlying object layer, -``proxy_class`` and ``proxy_kwargs`` are optional and are used to -create a ``helper`` class. -Thus, to access a ``meth1`` method from the underlying layer, it is -enough to call it on the ``base`` object:: - - def meth1(*args, **kwargs): - … - self.base.meth1(*args, **kwargs) - … - -To get access to the domain object field, it is recommended to use -properties that are created by an auxiliary function:: - - def _create_property_proxy(attr): - def get_attr(self): - return getattr(self.base, attr) - - def set_attr(self, value): - return setattr(self.base, attr, value) - - def del_attr(self): - return delattr(self.base, attr) - - return property(get_attr, set_attr, del_attr) - -So, the reference to the underlying layer field ``prop1`` looks like:: - - class Proxy(object): - … - prop1 = _create_property_proxy('prop1') - … - -If the number of layers is big, it is reasonable to create a common -parent ``proxy`` class that provides further control transfer. This -facilitates the writing of specific layers if they do not provide a -particular implementation of some operation. - -Gateway -~~~~~~~ - -``gateway`` is a mechanism to explicitly specify a composition of -the domain model layers. It defines an interface to retrieve the -domain model object based on the ``proxy`` classes described above. - -Example of the gateway implementation -------------------------------------- - -This example defines three classes: - -* ``Base`` is the main class that sets an interface for all the - ``proxy`` classes. -* ``LoggerProxy`` class implements additional logic associated with - the logging of messages from the ``print_msg`` method. -* ``ValidatorProxy`` class implements an optional check that helps to - determine whether all the parameters in the ``sum_numbers`` method - are positive. - -:: - - class Base(object): - ""Base class in domain model.""" - msg = "Hello Domain" - - def print_msg(self): - print(self.msg) - - def sum_numbers(self, *args): - return sum(args) - - class LoggerProxy(object): - """"Class extends functionality by writing message to log.""" - def __init__(self, base, logg): - self.base = base - self.logg = logg - - # Proxy to provide implicit access to inner layer. - msg = _create_property_proxy('msg') - - def print_msg(self): - # Write message to log and then pass the control to inner layer. - self.logg.write("Message %s has been written to the log") % self.msg - self.base.print_msg() - - def sum_numbers(self, *args): - # Nothing to do here. Just pass the control to the next layer. - return self.base.sum_numbers(*args) - - class ValidatorProxy(object): - """Class validates that input parameters are correct.""" - def __init__(self, base): - self.base = base - - msg = _create_property_proxy('msg') - - def print_msg(self): - # There are no checks. - self.base.print_msg() - - def sum_numbers(self, *args): - # Validate input numbers and pass them further. - for arg in args: - if arg <= 0: - return "Only positive numbers are supported." - return self.base.sum_numbers(*args) - -Thus, the ``gateway`` method for the above example may look like: - -:: - - def gateway(logg, only_positive=True): - base = Base() - logger = LoggerProxy(base, logg) - if only_positive: - return ValidatorProxy(logger) - return logger - - domain_object = gateway(sys.stdout, only_positive=True) - -It is important to consider that the order of the layers matters. -And even if layers are logically independent from each other, -rearranging them in different order may lead to another result. - -Helpers -~~~~~~~ - -``Helper`` objects are used for an implicit nesting assignment that -is based on a specification described in an auxiliary method (similar -to ``gateway``). This approach may be helpful when using a *simple -factory* for generating objects. Such a way is more flexible as it -allows specifying the wrappers dynamically. - -The ``helper`` class is unique for all the ``proxy`` classes and it -has the following form: - -:: - - class Helper(object): - def __init__(self, proxy_class=None, proxy_kwargs=None): - self.proxy_class = proxy_class - self.proxy_kwargs = proxy_kwargs or {} - - def proxy(self, obj): - """Wrap an object.""" - if obj is None or self.proxy_class is None: - return obj - return self.proxy_class(obj, **self.proxy_kwargs) - - def unproxy(self, obj): - """Return object from inner layer.""" - if obj is None or self.proxy_class is None: - return obj - return obj.base - -Example of a simple factory implementation ------------------------------------------- - -Here is a code of a *simple factory* for generating objects from the -previous example. It specifies a ``BaseFactory`` class with a -``generate`` method and related ``proxy`` classes: - -:: - - class BaseFactory(object): - """Simple factory to generate an object.""" - def generate(self): - return Base() - - class LoggerFactory(object): - """Proxy class to add logging functionality.""" - def __init__(self, base, logg, proxy_class=None, proxy_kwargs=None): - self.helper = Helper(proxy_class, proxy_kwargs) - self.base = base - self.logg = logg - - def generate(self): - return self.helper.proxy(self.base.generate()) - - class ValidatorFactory(object): - """Proxy class to add validation.""" - def __init__(self, base, only_positive=True, proxy_class=None, proxy_kwargs=None): - self.helper = Helper(proxy_class, proxy_kwargs) - self.base = base - self.only_positive = only_positive - - def generate(self): - if self.only_positive: - # Wrap in ValidatorProxy if required. - return self.helper.proxy(self.base.generate()) - return self.base.generate() - -Further, ``BaseFactory`` and related ``proxy`` classes are combined -together: - -:: - - def create_factory(logg, only_positive=True): - base_factory = BaseFactory() - logger_factory = LoggerFactory(base_factory, logg, - proxy_class=LoggerProxy, - proxy_kwargs=dict(logg=logg)) - validator_factory = ValidatorFactory(logger_factory, only_positive, - proxy_class = ValidatorProxy) - return validator_factory - -Ultimately, to generate a domain object, you create and run a factory -method ``generate`` which implicitly creates a composite object. This -method is based on specifications that are set forth in the ``proxy`` -class. - -:: - - factory = create_factory(sys.stdout, only_positive=False) - domain_object = factory.generate() - -Why do you need a domain if you can use decorators? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the above examples, to implement the planned logic, it is quite -possible to use standard Python language techniques such as -decorators. However, to implement more complicated operations, the -domain model is reasonable and justified. - -In general, the domain is useful when: - -* there are more than three layers. In such case, the domain model - usage facilitates the understanding and supporting of the code; -* wrapping must be implemented depending on some conditions, - including dynamic wrapping; -* there is a requirement to wrap objects implicitly by helpers. diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index 88af7d09..00000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,49 +0,0 @@ -Glance Contribution Guidelines -============================== - -In the Contributions Guide, you will find documented policies for -developing with Glance. This includes the processes we use for -blueprints and specs, bugs, contributor onboarding, core reviewer -memberships, and other procedural items. - -Glance, as with all OpenStack projects, is written with the following design -guidelines in mind: - -* **Component based architecture**: Quickly add new behaviors -* **Highly available**: Scale to very serious workloads -* **Fault tolerant**: Isolated processes avoid cascading failures -* **Recoverable**: Failures should be easy to diagnose, debug, and rectify -* **Open standards**: Be a reference implementation for a community-driven api - -This documentation is generated by the Sphinx toolkit and lives in the source -tree. Additional documentation on Glance and other components of OpenStack can -be found on the `OpenStack wiki `_. - -Developer reference -------------------- - -.. toctree:: - :maxdepth: 2 - - architecture - database_architecture - database_migrations - domain_model - domain_implementation - api/autoindex - -Policies --------- -.. toctree:: - :maxdepth: 3 - - blueprints - documentation - minor-code-changes - refreshing-configs - release-cpl -.. bugs - contributor-onboarding - core-reviewers - gate-failure-triage - code-reviews diff --git a/doc/source/contributor/minor-code-changes.rst b/doc/source/contributor/minor-code-changes.rst deleted file mode 100644 index b75147c7..00000000 --- a/doc/source/contributor/minor-code-changes.rst +++ /dev/null @@ -1,96 +0,0 @@ -Disallowed Minor Code Changes -============================= - -There are a few types of code changes that have been proposed recently that -have been rejected by the Glance team, so we want to point them out and explain -our reasoning. - -If you feel an exception should be made for some particular change, please put -it on the agenda for the Glance weekly meeting so it can be discussed. - -Database migration scripts --------------------------- - -Once a database script has been included in a release, spelling or grammar -corrections in comments are forbidden unless you are fixing them as a part of -another stronger bug on the migration script itself. Modifying migration -scripts confuses operators and administrators -- we only want them to notice -serious problems. Their preference must take precedence over fixing spell -errors. - -Typographical errors in comments --------------------------------- - -Comments are not user-facing. Correcting minor misspellings or grammatical -errors only muddies the history of that part of the code, making ``git blame`` -arguably less useful. So such changes are likely to be rejected. (This -prohibition, of course, does not apply to corrections of misleading or unclear -comments, or for example, an incorrect reference to a standards document.) - -Misspellings in code --------------------- - -Misspellings in function names are unlikely to be corrected for the "historical -clarity" reasons outlined above for comments. Plus, if a function is named -``mispelled()`` and a later developer tries to call ``misspelled()``, the -latter will result in a NameError when it's called, so the later developer will -know to use the incorrectly spelled function name. - -Misspellings in variable names are more problematic, because if you have a -variable named ``mispelled`` and a later developer puts up a patch where an -updated value is assigned to ``misspelled``, Python won't complain. The "real" -variable won't be updated, and the patch won't have its intended effect. -Whether such a change is allowed will depend upon the age of the code, how -widely used the variable is, whether it's spelled correctly in other functions, -what the current test coverage is like, and so on. We tend to be very -conservative about making changes that could cause regressions. So whether a -patch that corrects the spelling of a variable name is accepted is a judgment -(or is that "judgement"?) call by reviewers. In proposing your patch, however, -be aware that your reviewers will have these concerns in mind. - -Tests ------ - -Occasionally someone proposes a patch that converts instances of -``assertEqual(True, whatever)`` to ``assertTrue(whatever)``, or instances of -``assertEqual(False, w)`` to ``assertFalse(w)`` in tests. Note that these are -not type safe changes and they weaken the tests. (See the Python ``unittest`` -docs for details.) We tend to be very conservative about our tests and don't -like weakening changes. - -We're not saying that such changes can never be made, we're just saying that -each change must be accompanied by an explanation of why the weaker test is -adequate for what's being tested. - -Just to make this a bit clearer it can be shown using the following -example, comment out the lines in the runTest method alternatively:: - - import unittest - - class MyTestCase(unittest.TestCase): - def setUp(self): - pass - - class Tests(MyTestCase): - def runTest(self): - self.assertTrue('True') - self.assertTrue(True) - self.assertEqual(True, 'True') - -To run this use:: - - python -m testtools.run test.py - -Also mentioned within the unittests documentation_. - -.. _documentation: https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertTrue - -LOG.warn to LOG.warning ------------------------ - -Consistently there are proposed changes that will change all {LOG,logging}. -warn to {LOG,logging}.warning across the codebase due to the deprecation in -Python 3. While the deprecation is real, Glance uses oslo_log that provides -alias warn and solves the issue in single place for all projects using it. -These changes are not accepted due to the huge amount of refactoring they -cause for no reason. diff --git a/doc/source/contributor/refreshing-configs.rst b/doc/source/contributor/refreshing-configs.rst deleted file mode 100644 index f657e59e..00000000 --- a/doc/source/contributor/refreshing-configs.rst +++ /dev/null @@ -1,62 +0,0 @@ -.. - Copyright 2016-present OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Guideline On Refreshing Configuration Files Under etc/ -====================================================== - -During a release cycle many configuration options are changed or updated. The -sample configuration files provided in tree (under ``etc/*``) need to be -updated using the autogeneration tool as these files are being used in -different places. Some examples are devstack gates, downstream packagers -shipping with the same (or using defaults from these files), etc. Hence, before -we cut a release we need to refresh the configuration files shipped with tree -to match the changes done in the source code during the release cycle. - -In an ideal world, every review that proposes an addition, removal or update to -a configuration option(s) should use the tox tool to refresh only the -configuration options(s) that were changed. However, many of the configuration -options like those coming from oslo.messaging, oslo_middleware, etc. may have -changed in the meantime. So, whenever someone uses the tool to autogenerate the -configuration files based on the options in tree, there are more changes than -those made just by the author. - -We do not recommend the authors to manually edit the autogenerated files so, a -reasonable tradeoff is for the authors to include **only those files** that are -affected by their change(s). - -.. code-block:: bash - - $ tox -e genconfig - - -When To Refresh The Sample Configuration Files -============================================== - -* Every review that proposes an addition, removal or update to a configuration - option(s) should use the tox tool to refresh only the configuration option(s) - they have changed. -* Ideally reviewers should request updates to sample configuration files for - every change that attempts to add/delete/modify a configuration option(s) in - the code. -* In some situations however, there may be a bunch of similar changes that are - affecting the configuration files. In this case, in order to make the - developers' and reviewers' effort easier, we recommend an update to the - configuration files in bulk right after all the update changes have been - made/merged. - - -**IMPORTANT NOTE**: All sample configuration files mush be updated before the -milestone-3 (or the final release) of the project. diff --git a/doc/source/contributor/release-cpl.rst b/doc/source/contributor/release-cpl.rst deleted file mode 100644 index f17a87c3..00000000 --- a/doc/source/contributor/release-cpl.rst +++ /dev/null @@ -1,278 +0,0 @@ -================== -Glance Release CPL -================== - -So you've volunteered to be the Glance Release Cross-Project Liaison (CPL) and -now you're worried about what you've gotten yourself into. Well, here are some -tips for you from former release CPLs. - -You will be doing vital and important work both for Glance and OpenStack. -Releases have to be available at the scheduled milestones and RC dates because -end users, other OpenStack projects, and packagers rely on releases being -available so they can begin their work. Missing a date can have a cascading -effect on all the people who are depending on the release being available at -its scheduled time. Sounds scary, I know, but you'll also get a lot of -satisfaction out of having a key role in keeping OpenStack running smoothly. - - -Who You Have to Be -================== - -You do **not** have to be: - -- The PTL - -- A core reviewer - -- A stable-branch core reviewer/maintainer - -You **do** have to be: - -- A member of the Glance community - -- A person who has signed the OpenStack CLA (or whatever is in use at the time - you are reading this) - -- Someone familiar with or willing to learn git, gerrit, etc. - -- Someone who will be comfortable saying "No" when colleagues want to sneak - just one more thing in before a deadline. - -- Someone willing to work with the release team on a regular basis and attend - their `weekly meeting`_. - - Just as the stable maintenance team is responsible for the stability and - quality of the stable branches, the release CPL must take on responsibility - for the stability and quality of every release artifact of Glance. If you - are too lenient with your colleagues, you might be responsible for - introducing a catastrophic or destabilizing release. Suppose someone, - possibly even the PTL, shows up right before RC1 with a large but probably - innocuous change. Even if this passes the gate, you should err on the side - of caution and ask to not allow it to merge. - (This has happened `before `_ ) - -A Release CPL has authority within the Glance project. They have authority -through two measures: - -- Being the person who volunteered to do this hard work - -- Maintaining a healthy relationship with the PTL and their Glance colleagues. - -Use this authority to ensure that each Glance release is the best possible. -The PTL's job is to lead technical direction, your job is to shepherd cats and -help them focus on the priorities for each release. - - -What This Does Not Grant You -============================ - -Volunteering to be Release CPL does not give you the right to be a Glance Core -Reviewer. That is a separate role that is determined based on the quality of -your reviews. You should be primarily motivated by wanting to help the team -ship an excellent release. - - -Get To Know The Release Team -============================ - -OpenStack has teams for most projects and efforts. In that vein, the release -team works on tooling to make releasing projects easier as well as verifying -releases. As CPL it is your job to work with this team. At the time of this -writing, the team organizes in ``#openstack-release`` and has a `weekly -meeting`_. Idling in their team channel and attending the meeting are two very -strongly suggested (if not required) actions for the CPL. You should introduce -yourself well in advance of the release deadlines. You should also take the -time to research what actions you may need to take in advance of those -deadlines as the release team becomes very busy around those deadlines. - - -Familiarize Yourself with Community Goals -========================================= - -Community Goals **are** Glance Goals. They are documented and tracked in the -`openstack/governance`_ repository. In Ocata, for example, the CPL assumed the -responsibility of monitoring those goals and reporting back to the TC when -we completed them. - -In my opinion, it makes sense for the Release CPL to perform this task because -they are the ones who are keenly aware of the deadlines in the release -schedule and can remind the assigned developers of those deadlines. - -It also is important for the Release CPL to coordinate with the PTL to ensure -that there are project-specific deadlines for the goals. This will ensure the -work is completed and reviewed in a timely fashion and hopefully early enough -to catch any bugs that shake out of the work. - - -Familiarize Yourself with the Release Tooling -============================================= - -The Release Team has worked to automate much of the release process over the -last several development cycles. Much of the tooling is controlled by updating -certain YAML files in the `openstack/releases`_ repository. - -To release a Glance project, look in the ``deliverables`` directory for the -cycle's codename, e.g., ``pike``, and then look for the project inside of -that. Update that using the appropriate syntax and after the release team has -reviewed your request and approved it, the rest will be automated for you. - -For more information on release management process and tooling, refer to -`release management process guide`_ and `release management tooling guide`_. - - -Familiarize Yourself with the Bug Tracker -========================================= - -The `bug tracker`_ is the best way to determine what items are slated to get -in for each particular milestone or cycle release. Use it to the best of its -capabilities. - -Release Stability and the Gate -============================== - -As you may know at this point, OpenStack's Integrated Gate will begin to -experience longer queue times and more frequent unrelated failures around -milestones and release deadlines (as other projects attempt to sneak things -in at the last minute). - -You may help your colleagues (and yourself) if you advocate for deadlines on -features, etc., at least a week in advance of the actual release deadline. -This can apply to all release deadlines (milestone, release candidate, final). -If you can stabilize your project prior to the flurry of activity, you will -ship a better product. You can also then focus on bug fixing reviews in the -interim between your project priorities deadline and the actual release -deadline. - - -Checklist -========= - -The release team will set dates for all the milestones for each release. The -release schedule can be found from this page: -https://releases.openstack.org/index.html -There are checklists to follow for various important release aspects: - - -Glance Specific Goals ---------------------- - -While the release team sets dates for community-wide releases, you should work -with the PTL to set Glance specific deadlines/events such spec proposal freeze, -spec freeze, mid-cycle, bug squash and review squash etc. Also, you can set -additional deadlines for Glance priorities to ensure work is on-track for a -timely release. - -You are also responsible for ensuring PTL and other concerned individuals are -aware and reminded of the events/deadlines to ensure timely release. - - -Milestone Release ------------------ - -The release schedule for the current cycle will give you a range of dates for -each milestone release. It is your job to propose the release for Glance -sometime during that range and ensure the release is created. This means the -following: - -- Showing up at meetings to announce the planned date weeks in advance. - - Your colleagues on the Glance team will need at least 4 weeks notice so they - can plan and prioritize what work should be included in the milestone. - -- Reminding your colleagues what the stated priorities for that milestone - were, their progress, etc. - -- Being inflexible in the release date. As soon as you pick your date, stick - to it. If a feature slips a milestone to the next, it is not the end of the - world. It is not ideal, but Glance *needs* to release its milestone as soon - as possible. - -- Proposing the release in a timely and correct fashion on the day you stated. - You may have colleagues try to argue their case to the release team. This is - when your collaboration with the PTL will be necessary. The PTL needs to - help affirm your decision to release the version of the project you can on - the day you decide it. - -- Release ``glance_store`` and ``python-glanceclient`` at least once per - milestone. - -- Write `release notes`_ - -Release Candidate Releases --------------------------- - -The release candidate release period is similarly scoped to a few days. It is -even more important that Glance release during that period. To help your -colleagues, try to schedule this release as close to the end of that range as -possible. Once RC1 is released, only bugs introduced since the last milestone -that are going to compromise the integrity of the release should be merged. -Again, your duties include all of the Milestone Release duties plus the -following: - -- When proposing the release, you need to appropriately configure the release - tooling to create a stable branch. If you do not, then you have not - appropriately created the release candidate. - -- Keeping a *very* watchful eye on what is proposed to and approved for master - as well as your new stable branch. Again, automated updates from release - tooling and *release critical* bugs are the only things that should be - merged to either. - -- If release critical bugs are found and fixed, proposing a new release - candidate from the SHA on the stable branch. - -- Write `release notes`_ - -- Announce that any non-release-critical changes won't be accepted from this - point onwards until the final Glance release is made. Consider adding -2 on such - reviews with good description to prevent further updates. This also helps in - keeping the gate relatively free to process the release-critical changes. - - -Final Releases --------------- - -The release team usually proposes all of the projects' final releases in one -patch based off the final release candidate. After those are created, some -things in Glance need to be updated immediately. - -- Right after cutting the stable branch, Glance release version (not the API - version) must be bumped so that all further development is attributed to the - next release version. This could be done by adding an empty commit with commit - message containing the flag ``Sem-Ver: api-break`` to indicate a version. Here - is a sample commit attempting to `bump the release version`_. -- The migration tooling that Glance uses relies on some constants defined in - `glance/db/migration.py`_. Post final release, those need *immediate* - updating. - - -Acknowledgements ----------------- -This document was originally written by Ian Cordasco. It's maintained and -revised by the Glance Release CPLs: - -- Ian Cordasco, Release CPL for Ocata -- Hemanth Makkapati, Release CPL for Pike - - -.. links -.. _weekly meeting: - http://eavesdrop.openstack.org/#Release_Team_Meeting -.. _openstack/governance: - https://git.openstack.org/cgit/openstack/governance -.. _openstack/releases: - https://git.openstack.org/cgit/openstack/releases -.. _StoryBoard: - https://storyboard.openstack.org/ -.. _glance/db/migration.py: - https://github.com/openstack/glance/blob/master/glance/db/migration.py -.. _release management process guide: - https://docs.openstack.org/project-team-guide/release-management.html -.. _release management tooling guide: - http://git.openstack.org/cgit/openstack/releases/tree/README.rst -.. _bug tracker: - https://bugs.launchpad.net/glance -.. _release notes: - https://docs.openstack.org/project-team-guide/release-management.html#managing-release-notes -.. _bump the release version: - https://review.openstack.org/#q,I21480e186a2aab6c54f7ea798c215660bddf9e4c,n,z diff --git a/doc/source/deprecation-note.inc b/doc/source/deprecation-note.inc deleted file mode 100644 index c3fe751a..00000000 --- a/doc/source/deprecation-note.inc +++ /dev/null @@ -1,6 +0,0 @@ -.. note:: The Images API v1 has been DEPRECATED in the Newton release. The - migration path is to use the `Images API v2 - `_ instead of version 1 - of the API. The Images API v1 will ultimately be removed, following the - `OpenStack standard deprecation policy - `_. diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst deleted file mode 100644 index d04f8b21..00000000 --- a/doc/source/glossary.rst +++ /dev/null @@ -1,4153 +0,0 @@ -======== -Glossary -======== - -0-9 -~~~ - -.. glossary:: - - 6to4 - - A mechanism that allows IPv6 packets to be transmitted - over an IPv4 network, providing a strategy for migrating to - IPv6. - -A -~ - -.. glossary:: - - absolute limit - - Impassable limits for guest VMs. Settings include total RAM - size, maximum number of vCPUs, and maximum disk size. - - access control list (ACL) - - A list of permissions attached to an object. An ACL specifies - which users or system processes have access to objects. It also - defines which operations can be performed on specified objects. Each - entry in a typical ACL specifies a subject and an operation. For - instance, the ACL entry ``(Alice, delete)`` for a file gives - Alice permission to delete the file. - - access key - - Alternative term for an Amazon EC2 access key. See EC2 access - key. - - account - - The Object Storage context of an account. Do not confuse with a - user account from an authentication service, such as Active Directory, - /etc/passwd, OpenLDAP, OpenStack Identity, and so on. - - account auditor - - Checks for missing replicas and incorrect or corrupted objects - in a specified Object Storage account by running queries against the - back-end SQLite database. - - account database - - A SQLite database that contains Object Storage accounts and - related metadata and that the accounts server accesses. - - account reaper - - An Object Storage worker that scans for and deletes account - databases and that the account server has marked for deletion. - - account server - - Lists containers in Object Storage and stores container - information in the account database. - - account service - - An Object Storage component that provides account services such - as list, create, modify, and audit. Do not confuse with OpenStack - Identity service, OpenLDAP, or similar user-account services. - - accounting - - The Compute service provides accounting information through the - event notification and system usage data facilities. - - Active Directory - - Authentication and identity service by Microsoft, based on LDAP. - Supported in OpenStack. - - active/active configuration - - In a high-availability setup with an active/active - configuration, several systems share the load together and if one - fails, the load is distributed to the remaining systems. - - active/passive configuration - - In a high-availability setup with an active/passive - configuration, systems are set up to bring additional resources online - to replace those that have failed. - - address pool - - A group of fixed and/or floating IP addresses that are assigned - to a project and can be used by or assigned to the VM instances in a - project. - - Address Resolution Protocol (ARP) - - The protocol by which layer-3 IP addresses are resolved into - layer-2 link local addresses. - - admin API - - A subset of API calls that are accessible to authorized - administrators and are generally not accessible to end users or the - public Internet. They can exist as a separate service (keystone) or - can be a subset of another API (nova). - - admin server - - In the context of the Identity service, the worker process that - provides access to the admin API. - - administrator - - The person responsible for installing, configuring, - and managing an OpenStack cloud. - - Advanced Message Queuing Protocol (AMQP) - - The open standard messaging protocol used by OpenStack - components for intra-service communications, provided by RabbitMQ, - Qpid, or ZeroMQ. - - Advanced RISC Machine (ARM) - - Lower power consumption CPU often found in mobile and embedded - devices. Supported by OpenStack. - - alert - - The Compute service can send alerts through its notification - system, which includes a facility to create custom notification - drivers. Alerts can be sent to and displayed on the dashboard. - - allocate - - The process of taking a floating IP address from the address - pool so it can be associated with a fixed IP on a guest VM - instance. - - Amazon Kernel Image (AKI) - - Both a VM container format and disk format. Supported by Image - service. - - Amazon Machine Image (AMI) - - Both a VM container format and disk format. Supported by Image - service. - - Amazon Ramdisk Image (ARI) - - Both a VM container format and disk format. Supported by Image - service. - - Anvil - - A project that ports the shell script-based project named - DevStack to Python. - - aodh - - Part of the OpenStack :term:`Telemetry service `; provides alarming functionality. - - Apache - - The Apache Software Foundation supports the Apache community of - open-source software projects. These projects provide software - products for the public good. - - Apache License 2.0 - - All OpenStack core projects are provided under the terms of the - Apache License 2.0 license. - - Apache Web Server - - The most common web server software currently used on the - Internet. - - API endpoint - - The daemon, worker, or service that a client communicates with - to access an API. API endpoints can provide any number of services, - such as authentication, sales data, performance meters, Compute VM - commands, census data, and so on. - - API extension - - Custom modules that extend some OpenStack core APIs. - - API extension plug-in - - Alternative term for a Networking plug-in or Networking API - extension. - - API key - - Alternative term for an API token. - - API server - - Any node running a daemon or worker that provides an API - endpoint. - - API token - - Passed to API requests and used by OpenStack to verify that the - client is authorized to run the requested operation. - - API version - - In OpenStack, the API version for a project is part of the URL. - For example, ``example.com/nova/v1/foobar``. - - applet - - A Java program that can be embedded into a web page. - - Application Catalog service (murano) - - The project that provides an application catalog service so that users - can compose and deploy composite environments on an application - abstraction level while managing the application lifecycle. - - Application Programming Interface (API) - - A collection of specifications used to access a service, - application, or program. Includes service calls, required parameters - for each call, and the expected return values. - - application server - - A piece of software that makes available another piece of - software over a network. - - Application Service Provider (ASP) - - Companies that rent specialized applications that help - businesses and organizations provide additional services - with lower cost. - - arptables - - Tool used for maintaining Address Resolution Protocol packet - filter rules in the Linux kernel firewall modules. Used along with - iptables, ebtables, and ip6tables in Compute to provide firewall - services for VMs. - - associate - - The process associating a Compute floating IP address with a - fixed IP address. - - Asynchronous JavaScript and XML (AJAX) - - A group of interrelated web development techniques used on the - client-side to create asynchronous web applications. Used extensively - in horizon. - - ATA over Ethernet (AoE) - - A disk storage protocol tunneled within Ethernet. - - attach - - The process of connecting a VIF or vNIC to a L2 network in - Networking. In the context of Compute, this process connects a storage - volume to an instance. - - attachment (network) - - Association of an interface ID to a logical port. Plugs an - interface into a port. - - auditing - - Provided in Compute through the system usage data - facility. - - auditor - - A worker process that verifies the integrity of Object Storage - objects, containers, and accounts. Auditors is the collective term for - the Object Storage account auditor, container auditor, and object - auditor. - - Austin - - The code name for the initial release of - OpenStack. The first design summit took place in - Austin, Texas, US. - - auth node - - Alternative term for an Object Storage authorization - node. - - authentication - - The process that confirms that the user, process, or client is - really who they say they are through private key, secret token, - password, fingerprint, or similar method. - - authentication token - - A string of text provided to the client after authentication. - Must be provided by the user or process in subsequent requests to the - API endpoint. - - AuthN - - The Identity service component that provides authentication - services. - - authorization - - The act of verifying that a user, process, or client is - authorized to perform an action. - - authorization node - - An Object Storage node that provides authorization - services. - - AuthZ - - The Identity component that provides high-level - authorization services. - - Auto ACK - - Configuration setting within RabbitMQ that enables or disables - message acknowledgment. Enabled by default. - - auto declare - - A Compute RabbitMQ setting that determines whether a message - exchange is automatically created when the program starts. - - availability zone - - An Amazon EC2 concept of an isolated area that is used for fault - tolerance. Do not confuse with an OpenStack Compute zone or - cell. - - AWS CloudFormation template - - AWS CloudFormation allows Amazon Web Services (AWS) users to create and manage a - collection of related resources. The Orchestration service - supports a CloudFormation-compatible format (CFN). - -B -~ - -.. glossary:: - - back end - - Interactions and processes that are obfuscated from the user, - such as Compute volume mount, data transmission to an iSCSI target by - a daemon, or Object Storage object integrity checks. - - back-end catalog - - The storage method used by the Identity service catalog service - to store and retrieve information about API endpoints that are - available to the client. Examples include an SQL database, LDAP - database, or KVS back end. - - back-end store - - The persistent data store used to save and retrieve information - for a service, such as lists of Object Storage objects, current state - of guest VMs, lists of user names, and so on. Also, the method that the - Image service uses to get and store VM images. Options include Object - Storage, locally mounted file system, RADOS block devices, VMware - datastore, and HTTP. - - Backup, Restore, and Disaster Recovery service (freezer) - - The project that provides integrated tooling for backing up, restoring, - and recovering file systems, instances, or database backups. - - bandwidth - - The amount of available data used by communication resources, - such as the Internet. Represents the amount of data that is used to - download things or the amount of data available to download. - - barbican - - Code name of the :term:`Key Manager service - `. - - bare - - An Image service container format that indicates that no - container exists for the VM image. - - Bare Metal service (ironic) - - The OpenStack service that provides a service and associated libraries - capable of managing and provisioning physical machines in a - security-aware and fault-tolerant manner. - - base image - - An OpenStack-provided image. - - Bell-LaPadula model - - A security model that focuses on data confidentiality - and controlled access to classified information. - This model divides the entities into subjects and objects. - The clearance of a subject is compared to the classification of the - object to determine if the subject is authorized for the specific access mode. - The clearance or classification scheme is expressed in terms of a lattice. - - Benchmark service (rally) - - OpenStack project that provides a framework for - performance analysis and benchmarking of individual - OpenStack components as well as full production OpenStack - cloud deployments. - - Bexar - - A grouped release of projects related to - OpenStack that came out in February of 2011. It - included only Compute (nova) and Object Storage (swift). - Bexar is the code name for the second release of - OpenStack. The design summit took place in - San Antonio, Texas, US, which is the county seat for Bexar county. - - binary - - Information that consists solely of ones and zeroes, which is - the language of computers. - - bit - - A bit is a single digit number that is in base of 2 (either a - zero or one). Bandwidth usage is measured in bits per second. - - bits per second (BPS) - - The universal measurement of how quickly data is transferred - from place to place. - - block device - - A device that moves data in the form of blocks. These device - nodes interface the devices, such as hard disks, CD-ROM drives, flash - drives, and other addressable regions of memory. - - block migration - - A method of VM live migration used by KVM to evacuate instances - from one host to another with very little downtime during a - user-initiated switchover. Does not require shared storage. Supported - by Compute. - - Block Storage API - - An API on a separate endpoint for attaching, - detaching, and creating block storage for compute - VMs. - - Block Storage service (cinder) - - The OpenStack service that implement services and libraries to provide - on-demand, self-service access to Block Storage resources via abstraction - and automation on top of other block storage devices. - - BMC (Baseboard Management Controller) - - The intelligence in the IPMI architecture, which is a specialized - micro-controller that is embedded on the motherboard of a computer - and acts as a server. Manages the interface between system management - software and platform hardware. - - bootable disk image - - A type of VM image that exists as a single, bootable - file. - - Bootstrap Protocol (BOOTP) - - A network protocol used by a network client to obtain an IP - address from a configuration server. Provided in Compute through the - dnsmasq daemon when using either the FlatDHCP manager or VLAN manager - network manager. - - Border Gateway Protocol (BGP) - - The Border Gateway Protocol is a dynamic routing protocol - that connects autonomous systems. Considered the - backbone of the Internet, this protocol connects disparate - networks to form a larger network. - - browser - - Any client software that enables a computer or device to access - the Internet. - - builder file - - Contains configuration information that Object Storage uses to - reconfigure a ring or to re-create it from scratch after a serious - failure. - - bursting - - The practice of utilizing a secondary environment to - elastically build instances on-demand when the primary - environment is resource constrained. - - button class - - A group of related button types within horizon. Buttons to - start, stop, and suspend VMs are in one class. Buttons to associate - and disassociate floating IP addresses are in another class, and so - on. - - byte - - Set of bits that make up a single character; there are usually 8 - bits to a byte. - -C -~ - -.. glossary:: - - cache pruner - - A program that keeps the Image service VM image cache at or - below its configured maximum size. - - Cactus - - An OpenStack grouped release of projects that came out in the - spring of 2011. It included Compute (nova), Object Storage (swift), - and the Image service (glance). - Cactus is a city in Texas, US and is the code name for - the third release of OpenStack. When OpenStack releases went - from three to six months long, the code name of the release - changed to match a geography nearest the previous - summit. - - CALL - - One of the RPC primitives used by the OpenStack message queue - software. Sends a message and waits for a response. - - capability - - Defines resources for a cell, including CPU, storage, and - networking. Can apply to the specific services within a cell or a - whole cell. - - capacity cache - - A Compute back-end database table that contains the current - workload, amount of free RAM, and number of VMs running on each host. - Used to determine on which host a VM starts. - - capacity updater - - A notification driver that monitors VM instances and updates the - capacity cache as needed. - - CAST - - One of the RPC primitives used by the OpenStack message queue - software. Sends a message and does not wait for a response. - - catalog - - A list of API endpoints that are available to a user after - authentication with the Identity service. - - catalog service - - An Identity service that lists API endpoints that are available - to a user after authentication with the Identity service. - - ceilometer - - Part of the OpenStack :term:`Telemetry service `; gathers and stores metrics from other - OpenStack services. - - cell - - Provides logical partitioning of Compute resources in a child - and parent relationship. Requests are passed from parent cells to - child cells if the parent cannot provide the requested - resource. - - cell forwarding - - A Compute option that enables parent cells to pass resource - requests to child cells if the parent cannot provide the requested - resource. - - cell manager - - The Compute component that contains a list of the current - capabilities of each host within the cell and routes requests as - appropriate. - - CentOS - - A Linux distribution that is compatible with OpenStack. - - Ceph - - Massively scalable distributed storage system that consists of - an object store, block store, and POSIX-compatible distributed file - system. Compatible with OpenStack. - - CephFS - - The POSIX-compliant file system provided by Ceph. - - certificate authority (CA) - - In cryptography, an entity that issues digital certificates. The digital - certificate certifies the ownership of a public key by the named - subject of the certificate. This enables others (relying parties) to - rely upon signatures or assertions made by the private key that - corresponds to the certified public key. In this model of trust - relationships, a CA is a trusted third party for both the subject - (owner) of the certificate and the party relying upon the certificate. - CAs are characteristic of many public key infrastructure (PKI) - schemes. - In OpenStack, a simple certificate authority is provided by Compute for - cloudpipe VPNs and VM image decryption. - - Challenge-Handshake Authentication Protocol (CHAP) - - An iSCSI authentication method supported by Compute. - - chance scheduler - - A scheduling method used by Compute that randomly chooses an - available host from the pool. - - changes since - - A Compute API parameter that downloads changes to the requested - item since your last request, instead of downloading a new, fresh set - of data and comparing it against the old data. - - Chef - - An operating system configuration management tool supporting - OpenStack deployments. - - child cell - - If a requested resource such as CPU time, disk storage, or - memory is not available in the parent cell, the request is forwarded - to its associated child cells. If the child cell can fulfill the - request, it does. Otherwise, it attempts to pass the request to any of - its children. - - cinder - - Codename for :term:`Block Storage service - `. - - CirrOS - - A minimal Linux distribution designed for use as a test - image on clouds such as OpenStack. - - Cisco neutron plug-in - - A Networking plug-in for Cisco devices and technologies, - including UCS and Nexus. - - cloud architect - - A person who plans, designs, and oversees the creation of - clouds. - - Cloud Auditing Data Federation (CADF) - - Cloud Auditing Data Federation (CADF) is a - specification for audit event data. CADF is - supported by OpenStack Identity. - - cloud computing - - A model that enables access to a shared pool of configurable - computing resources, such as networks, servers, storage, applications, - and services, that can be rapidly provisioned and released with - minimal management effort or service provider interaction. - - cloud controller - - Collection of Compute components that represent the global state - of the cloud; talks to services, such as Identity authentication, - Object Storage, and node/storage workers through a - queue. - - cloud controller node - - A node that runs network, volume, API, scheduler, and image - services. Each service may be broken out into separate nodes for - scalability or availability. - - Cloud Data Management Interface (CDMI) - - SINA standard that defines a RESTful API for managing objects in - the cloud, currently unsupported in OpenStack. - - Cloud Infrastructure Management Interface (CIMI) - - An in-progress specification for cloud management. Currently - unsupported in OpenStack. - - cloud-init - - A package commonly installed in VM images that performs - initialization of an instance after boot using information that it - retrieves from the metadata service, such as the SSH public key and - user data. - - cloudadmin - - One of the default roles in the Compute RBAC system. Grants - complete system access. - - Cloudbase-Init - - A Windows project providing guest initialization features, - similar to cloud-init. - - cloudpipe - - A compute service that creates VPNs on a per-project - basis. - - cloudpipe image - - A pre-made VM image that serves as a cloudpipe server. - Essentially, OpenVPN running on Linux. - - Clustering service (senlin) - - The project that implements clustering services and libraries - for the management of groups of homogeneous objects exposed - by other OpenStack services. - - command filter - - Lists allowed commands within the Compute rootwrap - facility. - - Common Internet File System (CIFS) - - A file sharing protocol. It is a public or open variation of the - original Server Message Block (SMB) protocol developed and used by - Microsoft. Like the SMB protocol, CIFS runs at a higher level and uses - the TCP/IP protocol. - - Common Libraries (oslo) - - The project that produces a set of python libraries containing code - shared by OpenStack projects. The APIs provided by these libraries - should be high quality, stable, consistent, documented and generally - applicable. - - community project - - A project that is not officially endorsed by the OpenStack - Foundation. If the project is successful enough, it might be elevated - to an incubated project and then to a core project, or it might be - merged with the main code trunk. - - compression - - Reducing the size of files by special encoding, the file can be - decompressed again to its original content. OpenStack supports - compression at the Linux file system level but does not support - compression for things such as Object Storage objects or Image service - VM images. - - Compute API (Nova API) - - The nova-api daemon provides access to nova services. Can communicate with - other APIs, such as the Amazon EC2 API. - - compute controller - - The Compute component that chooses suitable hosts on which to - start VM instances. - - compute host - - Physical host dedicated to running compute nodes. - - compute node - - A node that runs the nova-compute daemon that manages VM - instances that provide a wide - range of services, such as web applications and analytics. - - Compute service (nova) - - The OpenStack core project that implements services and associated - libraries to provide massively-scalable, on-demand, self-service - access to compute resources, including bare metal, virtual machines, - and containers. - - compute worker - - The Compute component that runs on each compute node and manages - the VM instance lifecycle, including run, reboot, terminate, - attach/detach volumes, and so on. Provided by the nova-compute daemon. - - concatenated object - - A set of segment objects that Object Storage combines and sends - to the client. - - conductor - - In Compute, conductor is the process that proxies database - requests from the compute process. Using conductor improves security - because compute nodes do not need direct access to the - database. - - congress - - Code name for the :term:`Governance service - `. - - consistency window - - The amount of time it takes for a new Object Storage object to - become accessible to all clients. - - console log - - Contains the output from a Linux VM console in Compute. - - container - - Organizes and stores objects in Object Storage. Similar to the - concept of a Linux directory but cannot be nested. Alternative term - for an Image service container format. - - container auditor - - Checks for missing replicas or incorrect objects in specified - Object Storage containers through queries to the SQLite back-end - database. - - container database - - A SQLite database that stores Object Storage containers and - container metadata. The container server accesses this - database. - - container format - - A wrapper used by the Image service that contains a VM image and - its associated metadata, such as machine state, OS disk size, and so - on. - - Container Infrastructure Management service (magnum) - - The project which provides a set of services for provisioning, scaling, - and managing container orchestration engines. - - container server - - An Object Storage server that manages containers. - - container service - - The Object Storage component that provides container services, - such as create, delete, list, and so on. - - content delivery network (CDN) - - A content delivery network is a specialized network that is - used to distribute content to clients, typically located - close to the client for increased performance. - - controller node - - Alternative term for a cloud controller node. - - core API - - Depending on context, the core API is either the OpenStack API - or the main API of a specific core project, such as Compute, - Networking, Image service, and so on. - - core service - - An official OpenStack service defined as core by - DefCore Committee. Currently, consists of - Block Storage service (cinder), Compute service (nova), - Identity service (keystone), Image service (glance), - Networking service (neutron), and Object Storage service (swift). - - cost - - Under the Compute distributed scheduler, this is calculated by - looking at the capabilities of each host relative to the flavor of the - VM instance being requested. - - credentials - - Data that is only known to or accessible by a user and - used to verify that the user is who he says he is. - Credentials are presented to the server during - authentication. Examples include a password, secret key, - digital certificate, and fingerprint. - - CRL - - A Certificate Revocation List (CRL) in a PKI model is a list of - certificates that have been revoked. End entities presenting - these certificates should not be trusted. - - Cross-Origin Resource Sharing (CORS) - - A mechanism that allows many resources (for example, - fonts, JavaScript) on a web page to be requested from - another domain outside the domain from which the resource - originated. In particular, JavaScript's AJAX calls can use - the XMLHttpRequest mechanism. - - Crowbar - - An open source community project by SUSE that aims to provide - all necessary services to quickly deploy and manage clouds. - - current workload - - An element of the Compute capacity cache that is calculated - based on the number of build, snapshot, migrate, and resize operations - currently in progress on a given host. - - customer - - Alternative term for project. - - customization module - - A user-created Python module that is loaded by horizon to change - the look and feel of the dashboard. - -D -~ - -.. glossary:: - - daemon - - A process that runs in the background and waits for requests. - May or may not listen on a TCP or UDP port. Do not confuse with a - worker. - - Dashboard (horizon) - - OpenStack project which provides an extensible, unified, web-based - user interface for all OpenStack services. - - data encryption - - Both Image service and Compute support encrypted virtual machine - (VM) images (but not instances). In-transit data encryption is - supported in OpenStack using technologies such as HTTPS, SSL, TLS, and - SSH. Object Storage does not support object encryption at the - application level but may support storage that uses disk encryption. - - Data loss prevention (DLP) software - - Software programs used to protect sensitive information - and prevent it from leaking outside a network boundary - through the detection and denying of the data transportation. - - Data Processing service (sahara) - - OpenStack project that provides a scalable - data-processing stack and associated management - interfaces. - - data store - - A database engine supported by the Database service. - - database ID - - A unique ID given to each replica of an Object Storage - database. - - database replicator - - An Object Storage component that copies changes in the account, - container, and object databases to other nodes. - - Database service (trove) - - An integrated project that provides scalable and reliable - Cloud Database-as-a-Service functionality for both - relational and non-relational database engines. - - deallocate - - The process of removing the association between a floating IP - address and a fixed IP address. Once this association is removed, the - floating IP returns to the address pool. - - Debian - - A Linux distribution that is compatible with OpenStack. - - deduplication - - The process of finding duplicate data at the disk block, file, - and/or object level to minimize storage use—currently unsupported - within OpenStack. - - default panel - - The default panel that is displayed when a user accesses the - dashboard. - - default project - - New users are assigned to this project if no project is specified - when a user is created. - - default token - - An Identity service token that is not associated with a specific - project and is exchanged for a scoped token. - - delayed delete - - An option within Image service so that an image is deleted after - a predefined number of seconds instead of immediately. - - delivery mode - - Setting for the Compute RabbitMQ message delivery mode; can be - set to either transient or persistent. - - denial of service (DoS) - - Denial of service (DoS) is a short form for - denial-of-service attack. This is a malicious attempt to - prevent legitimate users from using a service. - - deprecated auth - - An option within Compute that enables administrators to create - and manage users through the ``nova-manage`` command as - opposed to using the Identity service. - - designate - - Code name for the :term:`DNS service `. - - Desktop-as-a-Service - - A platform that provides a suite of desktop environments - that users access to receive a desktop experience from - any location. This may provide general use, development, or - even homogeneous testing environments. - - developer - - One of the default roles in the Compute RBAC system and the - default role assigned to a new user. - - device ID - - Maps Object Storage partitions to physical storage - devices. - - device weight - - Distributes partitions proportionately across Object Storage - devices based on the storage capacity of each device. - - DevStack - - Community project that uses shell scripts to quickly build - complete OpenStack development environments. - - DHCP agent - - OpenStack Networking agent that provides DHCP services - for virtual networks. - - Diablo - - A grouped release of projects related to OpenStack that came out - in the fall of 2011, the fourth release of OpenStack. It included - Compute (nova 2011.3), Object Storage (swift 1.4.3), and the Image - service (glance). - Diablo is the code name for the fourth release of - OpenStack. The design summit took place in - the Bay Area near Santa Clara, - California, US and Diablo is a nearby city. - - direct consumer - - An element of the Compute RabbitMQ that comes to life when a RPC - call is executed. It connects to a direct exchange through a unique - exclusive queue, sends the message, and terminates. - - direct exchange - - A routing table that is created within the Compute RabbitMQ - during RPC calls; one is created for each RPC call that is - invoked. - - direct publisher - - Element of RabbitMQ that provides a response to an incoming MQ - message. - - disassociate - - The process of removing the association between a floating IP - address and fixed IP and thus returning the floating IP address to the - address pool. - - Discretionary Access Control (DAC) - - Governs the ability of subjects to access objects, while enabling - users to make policy decisions and assign security attributes. - The traditional UNIX system of users, groups, and read-write-execute - permissions is an example of DAC. - - disk encryption - - The ability to encrypt data at the file system, disk partition, - or whole-disk level. Supported within Compute VMs. - - disk format - - The underlying format that a disk image for a VM is stored as - within the Image service back-end store. For example, AMI, ISO, QCOW2, - VMDK, and so on. - - dispersion - - In Object Storage, tools to test and ensure dispersion of - objects and containers to ensure fault tolerance. - - distributed virtual router (DVR) - - Mechanism for highly available multi-host routing when using - OpenStack Networking (neutron). - - Django - - A web framework used extensively in horizon. - - DNS record - - A record that specifies information about a particular domain - and belongs to the domain. - - DNS service (designate) - - OpenStack project that provides scalable, on demand, self - service access to authoritative DNS services, in a - technology-agnostic manner. - - dnsmasq - - Daemon that provides DNS, DHCP, BOOTP, and TFTP services for - virtual networks. - - domain - - An Identity API v3 entity. Represents a collection of - projects, groups and users that defines administrative boundaries for - managing OpenStack Identity entities. - On the Internet, separates a website from other sites. Often, - the domain name has two or more parts that are separated by dots. - For example, yahoo.com, usa.gov, harvard.edu, or - mail.yahoo.com. - Also, a domain is an entity or container of all DNS-related - information containing one or more records. - - Domain Name System (DNS) - - A system by which Internet domain name-to-address and - address-to-name resolutions are determined. - DNS helps navigate the Internet by translating the IP address - into an address that is easier to remember. For example, translating - 111.111.111.1 into www.yahoo.com. - All domains and their components, such as mail servers, utilize - DNS to resolve to the appropriate locations. DNS servers are usually - set up in a master-slave relationship such that failure of the master - invokes the slave. DNS servers might also be clustered or replicated - such that changes made to one DNS server are automatically propagated - to other active servers. - In Compute, the support that enables associating DNS entries - with floating IP addresses, nodes, or cells so that hostnames are - consistent across reboots. - - download - - The transfer of data, usually in the form of files, from one - computer to another. - - durable exchange - - The Compute RabbitMQ message exchange that remains active when - the server restarts. - - durable queue - - A Compute RabbitMQ message queue that remains active when the - server restarts. - - Dynamic Host Configuration Protocol (DHCP) - - A network protocol that configures devices that are connected to a - network so that they can communicate on that network by using the - Internet Protocol (IP). The protocol is implemented in a client-server - model where DHCP clients request configuration data, such as an IP - address, a default route, and one or more DNS server addresses from a - DHCP server. - A method to automatically configure networking for a host at - boot time. Provided by both Networking and Compute. - - Dynamic HyperText Markup Language (DHTML) - - Pages that use HTML, JavaScript, and Cascading Style Sheets to - enable users to interact with a web page or show simple - animation. - -E -~ - -.. glossary:: - - east-west traffic - - Network traffic between servers in the same cloud or data center. - See also north-south traffic. - - EBS boot volume - - An Amazon EBS storage volume that contains a bootable VM image, - currently unsupported in OpenStack. - - ebtables - - Filtering tool for a Linux bridging firewall, enabling - filtering of network traffic passing through a Linux bridge. - Used in Compute along with arptables, iptables, and ip6tables - to ensure isolation of network communications. - - EC2 - - The Amazon commercial compute product, similar to - Compute. - - EC2 access key - - Used along with an EC2 secret key to access the Compute EC2 - API. - - EC2 API - - OpenStack supports accessing the Amazon EC2 API through - Compute. - - EC2 Compatibility API - - A Compute component that enables OpenStack to communicate with - Amazon EC2. - - EC2 secret key - - Used along with an EC2 access key when communicating with the - Compute EC2 API; used to digitally sign each request. - - Elastic Block Storage (EBS) - - The Amazon commercial block storage product. - - encapsulation - - The practice of placing one packet type within another for - the purposes of abstracting or securing data. Examples - include GRE, MPLS, or IPsec. - - encryption - - OpenStack supports encryption technologies such as HTTPS, SSH, - SSL, TLS, digital certificates, and data encryption. - - endpoint - - See API endpoint. - - endpoint registry - - Alternative term for an Identity service catalog. - - endpoint template - - A list of URL and port number endpoints that indicate where a - service, such as Object Storage, Compute, Identity, and so on, can be - accessed. - - entity - - Any piece of hardware or software that wants to connect to the - network services provided by Networking, the network connectivity - service. An entity can make use of Networking by implementing a - VIF. - - ephemeral image - - A VM image that does not save changes made to its volumes and - reverts them to their original state after the instance is - terminated. - - ephemeral volume - - Volume that does not save the changes made to it and reverts to - its original state when the current user relinquishes control. - - Essex - - A grouped release of projects related to OpenStack that came out - in April 2012, the fifth release of OpenStack. It included Compute - (nova 2012.1), Object Storage (swift 1.4.8), Image (glance), Identity - (keystone), and Dashboard (horizon). - Essex is the code name for the fifth release of - OpenStack. The design summit took place in - Boston, Massachusetts, US and Essex is a nearby city. - - ESXi - - An OpenStack-supported hypervisor. - - ETag - - MD5 hash of an object within Object Storage, used to ensure data - integrity. - - euca2ools - - A collection of command-line tools for administering VMs; most - are compatible with OpenStack. - - Eucalyptus Kernel Image (EKI) - - Used along with an ERI to create an EMI. - - Eucalyptus Machine Image (EMI) - - VM image container format supported by Image service. - - Eucalyptus Ramdisk Image (ERI) - - Used along with an EKI to create an EMI. - - evacuate - - The process of migrating one or all virtual machine (VM) - instances from one host to another, compatible with both shared - storage live migration and block migration. - - exchange - - Alternative term for a RabbitMQ message exchange. - - exchange type - - A routing algorithm in the Compute RabbitMQ. - - exclusive queue - - Connected to by a direct consumer in RabbitMQ—Compute, the - message can be consumed only by the current connection. - - extended attributes (xattr) - - File system option that enables storage of additional - information beyond owner, group, permissions, modification time, and - so on. The underlying Object Storage file system must support extended - attributes. - - extension - - Alternative term for an API extension or plug-in. In the context - of Identity service, this is a call that is specific to the - implementation, such as adding support for OpenID. - - external network - - A network segment typically used for instance Internet - access. - - extra specs - - Specifies additional requirements when Compute determines where - to start a new instance. Examples include a minimum amount of network - bandwidth or a GPU. - -F -~ - -.. glossary:: - - FakeLDAP - - An easy method to create a local LDAP directory for testing - Identity and Compute. Requires Redis. - - fan-out exchange - - Within RabbitMQ and Compute, it is the messaging interface that - is used by the scheduler service to receive capability messages from - the compute, volume, and network nodes. - - federated identity - - A method to establish trusts between identity providers and the - OpenStack cloud. - - Fedora - - A Linux distribution compatible with OpenStack. - - Fibre Channel - - Storage protocol similar in concept to TCP/IP; encapsulates SCSI - commands and data. - - Fibre Channel over Ethernet (FCoE) - - The fibre channel protocol tunneled within Ethernet. - - fill-first scheduler - - The Compute scheduling method that attempts to fill a host with - VMs rather than starting new VMs on a variety of hosts. - - filter - - The step in the Compute scheduling process when hosts that - cannot run VMs are eliminated and not chosen. - - firewall - - Used to restrict communications between hosts and/or nodes, - implemented in Compute using iptables, arptables, ip6tables, and - ebtables. - - FireWall-as-a-Service (FWaaS) - - A Networking extension that provides perimeter firewall - functionality. - - fixed IP address - - An IP address that is associated with the same instance each - time that instance boots, is generally not accessible to end users or - the public Internet, and is used for management of the - instance. - - Flat Manager - - The Compute component that gives IP addresses to authorized - nodes and assumes DHCP, DNS, and routing configuration and services - are provided by something else. - - flat mode injection - - A Compute networking method where the OS network configuration - information is injected into the VM image before the instance - starts. - - flat network - - Virtual network type that uses neither VLANs nor tunnels to - segregate project traffic. Each flat network typically requires - a separate underlying physical interface defined by bridge - mappings. However, a flat network can contain multiple - subnets. - - FlatDHCP Manager - - The Compute component that provides dnsmasq (DHCP, DNS, BOOTP, - TFTP) and radvd (routing) services. - - flavor - - Alternative term for a VM instance type. - - flavor ID - - UUID for each Compute or Image service VM flavor or instance - type. - - floating IP address - - An IP address that a project can associate with a VM so that the - instance has the same public IP address each time that it boots. You - create a pool of floating IP addresses and assign them to instances as - they are launched to maintain a consistent IP address for maintaining - DNS assignment. - - Folsom - - A grouped release of projects related to OpenStack that came out - in the fall of 2012, the sixth release of OpenStack. It includes - Compute (nova), Object Storage (swift), Identity (keystone), - Networking (neutron), Image service (glance), and Volumes or Block - Storage (cinder). - Folsom is the code name for the sixth release of - OpenStack. The design summit took place in - San Francisco, California, US and Folsom is a nearby city. - - FormPost - - Object Storage middleware that uploads (posts) an image through - a form on a web page. - - freezer - - Code name for the :term:`Backup, Restore, and Disaster Recovery service - `. - - front end - - The point where a user interacts with a service; can be an API - endpoint, the dashboard, or a command-line tool. - -G -~ - -.. glossary:: - - gateway - - An IP address, typically assigned to a router, that - passes network traffic between different networks. - - generic receive offload (GRO) - - Feature of certain network interface drivers that - combines many smaller received packets into a large packet - before delivery to the kernel IP stack. - - generic routing encapsulation (GRE) - - Protocol that encapsulates a wide variety of network - layer protocols inside virtual point-to-point links. - - glance - - Codename for the :term:`Image service`. - - glance API server - - Alternative name for the :term:`Image API`. - - glance registry - - Alternative term for the Image service :term:`image registry`. - - global endpoint template - - The Identity service endpoint template that contains services - available to all projects. - - GlusterFS - - A file system designed to aggregate NAS hosts, compatible with - OpenStack. - - gnocchi - - Part of the OpenStack :term:`Telemetry service `; provides an indexer and time-series - database. - - golden image - - A method of operating system installation where a finalized disk - image is created and then used by all nodes without - modification. - - Governance service (congress) - - The project that provides Governance-as-a-Service across - any collection of cloud services in order to monitor, - enforce, and audit policy over dynamic infrastructure. - - Graphic Interchange Format (GIF) - - A type of image file that is commonly used for animated images - on web pages. - - Graphics Processing Unit (GPU) - - Choosing a host based on the existence of a GPU is currently - unsupported in OpenStack. - - Green Threads - - The cooperative threading model used by Python; reduces race - conditions and only context switches when specific library calls are - made. Each OpenStack service is its own thread. - - Grizzly - - The code name for the seventh release of - OpenStack. The design summit took place in - San Diego, California, US and Grizzly is an element of the state flag of - California. - - Group - - An Identity v3 API entity. Represents a collection of users that is - owned by a specific domain. - - guest OS - - An operating system instance running under the control of a - hypervisor. - -H -~ - -.. glossary:: - - Hadoop - - Apache Hadoop is an open source software framework that supports - data-intensive distributed applications. - - Hadoop Distributed File System (HDFS) - - A distributed, highly fault-tolerant file system designed to run - on low-cost commodity hardware. - - handover - - An object state in Object Storage where a new replica of the - object is automatically created due to a drive failure. - - HAProxy - - Provides a load balancer for TCP and HTTP-based applications that - spreads requests across multiple servers. - - hard reboot - - A type of reboot where a physical or virtual power button is - pressed as opposed to a graceful, proper shutdown of the operating - system. - - Havana - - The code name for the eighth release of OpenStack. The - design summit took place in Portland, Oregon, US and Havana is - an unincorporated community in Oregon. - - health monitor - - Determines whether back-end members of a VIP pool can - process a request. A pool can have several health monitors - associated with it. When a pool has several monitors - associated with it, all monitors check each member of the - pool. All monitors must declare a member to be healthy for - it to stay active. - - heat - Codename for the :term:`Orchestration service - `. - - Heat Orchestration Template (HOT) - - Heat input in the format native to OpenStack. - - high availability (HA) - - A high availability system design approach and associated - service implementation ensures that a prearranged level of - operational performance will be met during a contractual - measurement period. High availability systems seek to - minimize system downtime and data loss. - - horizon - - Codename for the :term:`Dashboard `. - - horizon plug-in - - A plug-in for the OpenStack Dashboard (horizon). - - host - - A physical computer, not a VM instance (node). - - host aggregate - - A method to further subdivide availability zones into hypervisor - pools, a collection of common hosts. - - Host Bus Adapter (HBA) - - Device plugged into a PCI slot, such as a fibre channel or - network card. - - hybrid cloud - - A hybrid cloud is a composition of two or more clouds - (private, community or public) that remain distinct entities - but are bound together, offering the benefits of multiple - deployment models. Hybrid cloud can also mean the ability - to connect colocation, managed and/or dedicated services - with cloud resources. - - Hyper-V - - One of the hypervisors supported by OpenStack. - - hyperlink - - Any kind of text that contains a link to some other site, - commonly found in documents where clicking on a word or words opens up - a different website. - - Hypertext Transfer Protocol (HTTP) - - An application protocol for distributed, collaborative, - hypermedia information systems. It is the foundation of data - communication for the World Wide Web. Hypertext is structured - text that uses logical links (hyperlinks) between nodes containing - text. HTTP is the protocol to exchange or transfer hypertext. - - Hypertext Transfer Protocol Secure (HTTPS) - - An encrypted communications protocol for secure communication - over a computer network, with especially wide deployment on the - Internet. Technically, it is not a protocol in and of itself; - rather, it is the result of simply layering the Hypertext Transfer - Protocol (HTTP) on top of the TLS or SSL protocol, thus adding the - security capabilities of TLS or SSL to standard HTTP communications. - Most OpenStack API endpoints and many inter-component communications - support HTTPS communication. - - hypervisor - - Software that arbitrates and controls VM access to the actual - underlying hardware. - - hypervisor pool - - A collection of hypervisors grouped together through host - aggregates. - -I -~ - -.. glossary:: - - Icehouse - - The code name for the ninth release of OpenStack. The - design summit took place in Hong Kong and Ice House is a - street in that city. - - ID number - - Unique numeric ID associated with each user in Identity, - conceptually similar to a Linux or LDAP UID. - - Identity API - - Alternative term for the Identity service API. - - Identity back end - - The source used by Identity service to retrieve user - information; an OpenLDAP server, for example. - - identity provider - - A directory service, which allows users to login with a user - name and password. It is a typical source of authentication - tokens. - - Identity service (keystone) - - The project that facilitates API client authentication, service - discovery, distributed multi-project authorization, and auditing. - It provides a central directory of users mapped to the OpenStack - services they can access. It also registers endpoints for OpenStack - services and acts as a common authentication system. - - Identity service API - - The API used to access the OpenStack Identity service provided - through keystone. - - IETF - - Internet Engineering Task Force (IETF) is an open standards - organization that develops Internet standards, particularly the - standards pertaining to TCP/IP. - - image - - A collection of files for a specific operating system (OS) that - you use to create or rebuild a server. OpenStack provides pre-built - images. You can also create custom images, or snapshots, from servers - that you have launched. Custom images can be used for data backups or - as "gold" images for additional servers. - - Image API - - The Image service API endpoint for management of VM - images. - Processes client requests for VMs, updates Image service - metadata on the registry server, and communicates with the store - adapter to upload VM images from the back-end store. - - image cache - - Used by Image service to obtain images on the local host rather - than re-downloading them from the image server each time one is - requested. - - image ID - - Combination of a URI and UUID used to access Image service VM - images through the image API. - - image membership - - A list of projects that can access a given VM image within Image - service. - - image owner - - The project who owns an Image service virtual machine - image. - - image registry - - A list of VM images that are available through Image - service. - - Image service (glance) - - The OpenStack service that provide services and associated libraries - to store, browse, share, distribute and manage bootable disk images, - other data closely associated with initializing compute resources, - and metadata definitions. - - image status - - The current status of a VM image in Image service, not to be - confused with the status of a running instance. - - image store - - The back-end store used by Image service to store VM images, - options include Object Storage, locally mounted file system, - RADOS block devices, VMware datastore, or HTTP. - - image UUID - - UUID used by Image service to uniquely identify each VM - image. - - incubated project - - A community project may be elevated to this status and is then - promoted to a core project. - - Infrastructure Optimization service (watcher) - - OpenStack project that aims to provide a flexible and scalable resource - optimization service for multi-project OpenStack-based clouds. - - Infrastructure-as-a-Service (IaaS) - - IaaS is a provisioning model in which an organization outsources - physical components of a data center, such as storage, hardware, - servers, and networking components. A service provider owns the - equipment and is responsible for housing, operating and maintaining - it. The client typically pays on a per-use basis. - IaaS is a model for providing cloud services. - - ingress filtering - - The process of filtering incoming network traffic. Supported by - Compute. - - INI format - - The OpenStack configuration files use an INI format to - describe options and their values. It consists of sections - and key value pairs. - - injection - - The process of putting a file into a virtual machine image - before the instance is started. - - Input/Output Operations Per Second (IOPS) - - IOPS are a common performance measurement used to benchmark computer - storage devices like hard disk drives, solid state drives, and - storage area networks. - - instance - - A running VM, or a VM in a known state such as suspended, that - can be used like a hardware server. - - instance ID - - Alternative term for instance UUID. - - instance state - - The current state of a guest VM image. - - instance tunnels network - - A network segment used for instance traffic tunnels - between compute nodes and the network node. - - instance type - - Describes the parameters of the various virtual machine images - that are available to users; includes parameters such as CPU, storage, - and memory. Alternative term for flavor. - - instance type ID - - Alternative term for a flavor ID. - - instance UUID - - Unique ID assigned to each guest VM instance. - - Intelligent Platform Management Interface (IPMI) - - IPMI is a standardized computer system interface used by system - administrators for out-of-band management of computer systems and - monitoring of their operation. In layman's terms, it is a way to - manage a computer using a direct network connection, whether it is - turned on or not; connecting to the hardware rather than an operating - system or login shell. - - interface - - A physical or virtual device that provides connectivity - to another device or medium. - - interface ID - - Unique ID for a Networking VIF or vNIC in the form of a - UUID. - - Internet Control Message Protocol (ICMP) - - A network protocol used by network devices for control messages. - For example, :command:`ping` uses ICMP to test - connectivity. - - Internet protocol (IP) - - Principal communications protocol in the internet protocol - suite for relaying datagrams across network boundaries. - - Internet Service Provider (ISP) - - Any business that provides Internet access to individuals or - businesses. - - Internet Small Computer System Interface (iSCSI) - - Storage protocol that encapsulates SCSI frames for transport - over IP networks. - Supported by Compute, Object Storage, and Image service. - - IP address - - Number that is unique to every computer system on the Internet. - Two versions of the Internet Protocol (IP) are in use for addresses: - IPv4 and IPv6. - - IP Address Management (IPAM) - - The process of automating IP address allocation, deallocation, - and management. Currently provided by Compute, melange, and - Networking. - - ip6tables - - Tool used to set up, maintain, and inspect the tables of IPv6 - packet filter rules in the Linux kernel. In OpenStack Compute, - ip6tables is used along with arptables, ebtables, and iptables to - create firewalls for both nodes and VMs. - - ipset - - Extension to iptables that allows creation of firewall rules - that match entire "sets" of IP addresses simultaneously. These - sets reside in indexed data structures to increase efficiency, - particularly on systems with a large quantity of rules. - - iptables - - Used along with arptables and ebtables, iptables create - firewalls in Compute. iptables are the tables provided by the Linux - kernel firewall (implemented as different Netfilter modules) and the - chains and rules it stores. Different kernel modules and programs are - currently used for different protocols: iptables applies to IPv4, - ip6tables to IPv6, arptables to ARP, and ebtables to Ethernet frames. - Requires root privilege to manipulate. - - ironic - - Codename for the :term:`Bare Metal service `. - - iSCSI Qualified Name (IQN) - - IQN is the format most commonly used for iSCSI names, which uniquely - identify nodes in an iSCSI network. - All IQNs follow the pattern iqn.yyyy-mm.domain:identifier, where - 'yyyy-mm' is the year and month in which the domain was registered, - 'domain' is the reversed domain name of the issuing organization, and - 'identifier' is an optional string which makes each IQN under the same - domain unique. For example, 'iqn.2015-10.org.openstack.408ae959bce1'. - - ISO9660 - - One of the VM image disk formats supported by Image - service. - - itsec - - A default role in the Compute RBAC system that can quarantine an - instance in any project. - -J -~ - -.. glossary:: - - Java - - A programming language that is used to create systems that - involve more than one computer by way of a network. - - JavaScript - - A scripting language that is used to build web pages. - - JavaScript Object Notation (JSON) - - One of the supported response formats in OpenStack. - - jumbo frame - - Feature in modern Ethernet networks that supports frames up to - approximately 9000 bytes. - - Juno - - The code name for the tenth release of OpenStack. The - design summit took place in Atlanta, Georgia, US and Juno is - an unincorporated community in Georgia. - -K -~ - -.. glossary:: - - Kerberos - - A network authentication protocol which works on the basis of - tickets. Kerberos allows nodes communication over a non-secure - network, and allows nodes to prove their identity to one another in a - secure manner. - - kernel-based VM (KVM) - - An OpenStack-supported hypervisor. KVM is a full - virtualization solution for Linux on x86 hardware containing - virtualization extensions (Intel VT or AMD-V), ARM, IBM - Power, and IBM zSeries. It consists of a loadable kernel - module, that provides the core virtualization infrastructure - and a processor specific module. - - Key Manager service (barbican) - - The project that produces a secret storage and - generation system capable of providing key management for - services wishing to enable encryption features. - - keystone - - Codename of the :term:`Identity service `. - - Kickstart - - A tool to automate system configuration and installation on Red - Hat, Fedora, and CentOS-based Linux distributions. - - Kilo - - The code name for the eleventh release of OpenStack. The - design summit took place in Paris, France. Due to delays in the name - selection, the release was known only as K. Because ``k`` is the - unit symbol for kilo and the kilogram reference artifact is stored - near Paris in the Pavillon de Breteuil in Sèvres, the community - chose Kilo as the release name. - -L -~ - -.. glossary:: - - large object - - An object within Object Storage that is larger than 5 GB. - - Launchpad - - The collaboration site for OpenStack. - - Layer-2 (L2) agent - - OpenStack Networking agent that provides layer-2 - connectivity for virtual networks. - - Layer-2 network - - Term used in the OSI network architecture for the data link - layer. The data link layer is responsible for media access - control, flow control and detecting and possibly correcting - errors that may occur in the physical layer. - - Layer-3 (L3) agent - - OpenStack Networking agent that provides layer-3 - (routing) services for virtual networks. - - Layer-3 network - - Term used in the OSI network architecture for the network - layer. The network layer is responsible for packet - forwarding including routing from one node to another. - - Liberty - - The code name for the twelfth release of OpenStack. The - design summit took place in Vancouver, Canada and Liberty is - the name of a village in the Canadian province of - Saskatchewan. - - libvirt - - Virtualization API library used by OpenStack to interact with - many of its supported hypervisors. - - Lightweight Directory Access Protocol (LDAP) - - An application protocol for accessing and maintaining distributed - directory information services over an IP network. - - Linux - - Unix-like computer operating system assembled under the model of - free and open-source software development and distribution. - - Linux bridge - - Software that enables multiple VMs to share a single physical - NIC within Compute. - - Linux Bridge neutron plug-in - - Enables a Linux bridge to understand a Networking port, - interface attachment, and other abstractions. - - Linux containers (LXC) - - An OpenStack-supported hypervisor. - - live migration - - The ability within Compute to move running virtual machine - instances from one host to another with only a small service - interruption during switchover. - - load balancer - - A load balancer is a logical device that belongs to a cloud - account. It is used to distribute workloads between multiple back-end - systems or services, based on the criteria defined as part of its - configuration. - - load balancing - - The process of spreading client requests between two or more - nodes to improve performance and availability. - - Load-Balancer-as-a-Service (LBaaS) - - Enables Networking to distribute incoming requests evenly - between designated instances. - - Load-balancing service (octavia) - - The project that aims to provide scalable, on demand, self service - access to load-balancer services, in technology-agnostic manner. - - Logical Volume Manager (LVM) - - Provides a method of allocating space on mass-storage - devices that is more flexible than conventional partitioning - schemes. - -M -~ - -.. glossary:: - - magnum - - Code name for the :term:`Containers Infrastructure Management - service`. - - management API - - Alternative term for an admin API. - - management network - - A network segment used for administration, not accessible to the - public Internet. - - manager - - Logical groupings of related code, such as the Block Storage - volume manager or network manager. - - manifest - - Used to track segments of a large object within Object - Storage. - - manifest object - - A special Object Storage object that contains the manifest for a - large object. - - manila - - Codename for OpenStack :term:`Shared File Systems service`. - - manila-share - - Responsible for managing Shared File System Service devices, specifically - the back-end devices. - - maximum transmission unit (MTU) - - Maximum frame or packet size for a particular network - medium. Typically 1500 bytes for Ethernet networks. - - mechanism driver - - A driver for the Modular Layer 2 (ML2) neutron plug-in that - provides layer-2 connectivity for virtual instances. A - single OpenStack installation can use multiple mechanism - drivers. - - melange - - Project name for OpenStack Network Information Service. To be - merged with Networking. - - membership - - The association between an Image service VM image and a project. - Enables images to be shared with specified projects. - - membership list - - A list of projects that can access a given VM image within Image - service. - - memcached - - A distributed memory object caching system that is used by - Object Storage for caching. - - memory overcommit - - The ability to start new VM instances based on the actual memory - usage of a host, as opposed to basing the decision on the amount of - RAM each running instance thinks it has available. Also known as RAM - overcommit. - - message broker - - The software package used to provide AMQP messaging capabilities - within Compute. Default package is RabbitMQ. - - message bus - - The main virtual communication line used by all AMQP messages - for inter-cloud communications within Compute. - - message queue - - Passes requests from clients to the appropriate workers and - returns the output to the client after the job completes. - - Message service (zaqar) - - The project that provides a messaging service that affords a - variety of distributed application patterns in an efficient, - scalable and highly available manner, and to create and maintain - associated Python libraries and documentation. - - Meta-Data Server (MDS) - - Stores CephFS metadata. - - Metadata agent - - OpenStack Networking agent that provides metadata - services for instances. - - migration - - The process of moving a VM instance from one host to - another. - - mistral - - Code name for :term:`Workflow service `. - - Mitaka - - The code name for the thirteenth release of OpenStack. - The design summit took place in Tokyo, Japan. Mitaka - is a city in Tokyo. - - Modular Layer 2 (ML2) neutron plug-in - - Can concurrently use multiple layer-2 networking technologies, - such as 802.1Q and VXLAN, in Networking. - - monasca - - Codename for OpenStack :term:`Monitoring `. - - Monitor (LBaaS) - - LBaaS feature that provides availability monitoring using the - ``ping`` command, TCP, and HTTP/HTTPS GET. - - Monitor (Mon) - - A Ceph component that communicates with external clients, checks - data state and consistency, and performs quorum functions. - - Monitoring (monasca) - - The OpenStack service that provides a multi-project, highly scalable, - performant, fault-tolerant monitoring-as-a-service solution for metrics, - complex event processing and logging. To build an extensible platform for - advanced monitoring services that can be used by both operators and - projects to gain operational insight and visibility, ensuring availability - and stability. - - multi-factor authentication - - Authentication method that uses two or more credentials, such as - a password and a private key. Currently not supported in - Identity. - - multi-host - - High-availability mode for legacy (nova) networking. - Each compute node handles NAT and DHCP and acts as a gateway - for all of the VMs on it. A networking failure on one compute - node doesn't affect VMs on other compute nodes. - - multinic - - Facility in Compute that allows each virtual machine instance to - have more than one VIF connected to it. - - murano - - Codename for the :term:`Application Catalog service `. - -N -~ - -.. glossary:: - - Nebula - - Released as open source by NASA in 2010 and is the basis for - Compute. - - netadmin - - One of the default roles in the Compute RBAC system. Enables the - user to allocate publicly accessible IP addresses to instances and - change firewall rules. - - NetApp volume driver - - Enables Compute to communicate with NetApp storage devices - through the NetApp OnCommand - Provisioning Manager. - - network - - A virtual network that provides connectivity between entities. - For example, a collection of virtual ports that share network - connectivity. In Networking terminology, a network is always a layer-2 - network. - - Network Address Translation (NAT) - - Process of modifying IP address information while in transit. - Supported by Compute and Networking. - - network controller - - A Compute daemon that orchestrates the network configuration of - nodes, including IP addresses, VLANs, and bridging. Also manages - routing for both public and private networks. - - Network File System (NFS) - - A method for making file systems available over the network. - Supported by OpenStack. - - network ID - - Unique ID assigned to each network segment within Networking. - Same as network UUID. - - network manager - - The Compute component that manages various network components, - such as firewall rules, IP address allocation, and so on. - - network namespace - - Linux kernel feature that provides independent virtual - networking instances on a single host with separate routing - tables and interfaces. Similar to virtual routing and forwarding - (VRF) services on physical network equipment. - - network node - - Any compute node that runs the network worker daemon. - - network segment - - Represents a virtual, isolated OSI layer-2 subnet in - Networking. - - Network Service Header (NSH) - - Provides a mechanism for metadata exchange along the - instantiated service path. - - Network Time Protocol (NTP) - - Method of keeping a clock for a host or node correct via - communication with a trusted, accurate time source. - - network UUID - - Unique ID for a Networking network segment. - - network worker - - The ``nova-network`` worker daemon; provides - services such as giving an IP address to a booting nova - instance. - - Networking API (Neutron API) - - API used to access OpenStack Networking. Provides an extensible - architecture to enable custom plug-in creation. - - Networking service (neutron) - - The OpenStack project which implements services and associated - libraries to provide on-demand, scalable, and technology-agnostic - network abstraction. - - neutron - - Codename for OpenStack :term:`Networking service `. - - neutron API - - An alternative name for :term:`Networking API `. - - neutron manager - - Enables Compute and Networking integration, which enables - Networking to perform network management for guest VMs. - - neutron plug-in - - Interface within Networking that enables organizations to create - custom plug-ins for advanced features, such as QoS, ACLs, or - IDS. - - Newton - - The code name for the fourteenth release of OpenStack. The - design summit took place in Austin, Texas, US. The - release is named after "Newton House" which is located at - 1013 E. Ninth St., Austin, TX. which is listed on the - National Register of Historic Places. - - Nexenta volume driver - - Provides support for NexentaStor devices in Compute. - - NFV Orchestration Service (tacker) - - OpenStack service that aims to implement Network Function Virtualization - (NFV) orchestration services and libraries for end-to-end life-cycle - management of network services and Virtual Network Functions (VNFs). - - Nginx - - An HTTP and reverse proxy server, a mail proxy server, and a generic - TCP/UDP proxy server. - - No ACK - - Disables server-side message acknowledgment in the Compute - RabbitMQ. Increases performance but decreases reliability. - - node - - A VM instance that runs on a host. - - non-durable exchange - - Message exchange that is cleared when the service restarts. Its - data is not written to persistent storage. - - non-durable queue - - Message queue that is cleared when the service restarts. Its - data is not written to persistent storage. - - non-persistent volume - - Alternative term for an ephemeral volume. - - north-south traffic - - Network traffic between a user or client (north) and a - server (south), or traffic into the cloud (south) and - out of the cloud (north). See also east-west traffic. - - nova - - Codename for OpenStack :term:`Compute service `. - - Nova API - - Alternative term for the :term:`Compute API `. - - nova-network - - A Compute component that manages IP address allocation, - firewalls, and other network-related tasks. This is the legacy - networking option and an alternative to Networking. - -O -~ - -.. glossary:: - - object - - A BLOB of data held by Object Storage; can be in any - format. - - object auditor - - Opens all objects for an object server and verifies the MD5 - hash, size, and metadata for each object. - - object expiration - - A configurable option within Object Storage to automatically - delete objects after a specified amount of time has passed or a - certain date is reached. - - object hash - - Unique ID for an Object Storage object. - - object path hash - - Used by Object Storage to determine the location of an object in - the ring. Maps objects to partitions. - - object replicator - - An Object Storage component that copies an object to remote - partitions for fault tolerance. - - object server - - An Object Storage component that is responsible for managing - objects. - - Object Storage API - - API used to access OpenStack :term:`Object Storage`. - - Object Storage Device (OSD) - - The Ceph storage daemon. - - Object Storage service (swift) - - The OpenStack core project that provides eventually consistent - and redundant storage and retrieval of fixed digital content. - - object versioning - - Allows a user to set a flag on an :term:`Object Storage` container so that all objects within the container are - versioned. - - Ocata - - The code name for the fifteenth release of OpenStack. The - design summit will take place in Barcelona, Spain. Ocata is - a beach north of Barcelona. - - Octavia - - Code name for the :term:`Load-balancing service - `. - - Oldie - - Term for an :term:`Object Storage` - process that runs for a long time. Can indicate a hung process. - - Open Cloud Computing Interface (OCCI) - - A standardized interface for managing compute, data, and network - resources, currently unsupported in OpenStack. - - Open Virtualization Format (OVF) - - Standard for packaging VM images. Supported in OpenStack. - - Open vSwitch - - Open vSwitch is a production quality, multilayer virtual - switch licensed under the open source Apache 2.0 license. It - is designed to enable massive network automation through - programmatic extension, while still supporting standard - management interfaces and protocols (for example NetFlow, - sFlow, SPAN, RSPAN, CLI, LACP, 802.1ag). - - Open vSwitch (OVS) agent - - Provides an interface to the underlying Open vSwitch service for - the Networking plug-in. - - Open vSwitch neutron plug-in - - Provides support for Open vSwitch in Networking. - - OpenLDAP - - An open source LDAP server. Supported by both Compute and - Identity. - - OpenStack - - OpenStack is a cloud operating system that controls large pools - of compute, storage, and networking resources throughout a data - center, all managed through a dashboard that gives administrators - control while empowering their users to provision resources through a - web interface. OpenStack is an open source project licensed under the - Apache License 2.0. - - OpenStack code name - - Each OpenStack release has a code name. Code names ascend in - alphabetical order: Austin, Bexar, Cactus, Diablo, Essex, - Folsom, Grizzly, Havana, Icehouse, Juno, Kilo, Liberty, - Mitaka, Newton, Ocata, Pike, Queens, and Rocky. - Code names are cities or counties near where the - corresponding OpenStack design summit took place. An - exception, called the Waldon exception, is granted to - elements of the state flag that sound especially cool. Code - names are chosen by popular vote. - - openSUSE - - A Linux distribution that is compatible with OpenStack. - - operator - - The person responsible for planning and maintaining an OpenStack - installation. - - optional service - - An official OpenStack service defined as optional by - DefCore Committee. Currently, consists of - Dashboard (horizon), Telemetry service (Telemetry), - Orchestration service (heat), Database service (trove), - Bare Metal service (ironic), and so on. - - Orchestration service (heat) - - The OpenStack service which orchestrates composite cloud - applications using a declarative template format through - an OpenStack-native REST API. - - orphan - - In the context of Object Storage, this is a process that is not - terminated after an upgrade, restart, or reload of the service. - - Oslo - - Codename for the :term:`Common Libraries project`. - -P -~ - -.. glossary:: - - panko - - Part of the OpenStack :term:`Telemetry service `; provides event storage. - - parent cell - - If a requested resource, such as CPU time, disk storage, or - memory, is not available in the parent cell, the request is forwarded - to associated child cells. - - partition - - A unit of storage within Object Storage used to store objects. - It exists on top of devices and is replicated for fault - tolerance. - - partition index - - Contains the locations of all Object Storage partitions within - the ring. - - partition shift value - - Used by Object Storage to determine which partition data should - reside on. - - path MTU discovery (PMTUD) - - Mechanism in IP networks to detect end-to-end MTU and adjust - packet size accordingly. - - pause - - A VM state where no changes occur (no changes in memory, network - communications stop, etc); the VM is frozen but not shut down. - - PCI passthrough - - Gives guest VMs exclusive access to a PCI device. Currently - supported in OpenStack Havana and later releases. - - persistent message - - A message that is stored both in memory and on disk. The message - is not lost after a failure or restart. - - persistent volume - - Changes to these types of disk volumes are saved. - - personality file - - A file used to customize a Compute instance. It can be used to - inject SSH keys or a specific network configuration. - - Pike - - The code name for the sixteenth release of OpenStack. The design - summit will take place in Boston, Massachusetts, US. The release - is named after the Massachusetts Turnpike, abbreviated commonly - as the Mass Pike, which is the easternmost stretch of - Interstate 90. - - Platform-as-a-Service (PaaS) - - Provides to the consumer an operating system and, often, a - language runtime and libraries (collectively, the "platform") - upon which they can run their own application code, without - providing any control over the underlying infrastructure. - Examples of Platform-as-a-Service providers include Cloud Foundry - and OpenShift. - - plug-in - - Software component providing the actual implementation for - Networking APIs, or for Compute APIs, depending on the context. - - policy service - - Component of Identity that provides a rule-management - interface and a rule-based authorization engine. - - policy-based routing (PBR) - - Provides a mechanism to implement packet forwarding and routing - according to the policies defined by the network administrator. - - pool - - A logical set of devices, such as web servers, that you - group together to receive and process traffic. The load - balancing function chooses which member of the pool handles - the new requests or connections received on the VIP - address. Each VIP has one pool. - - pool member - - An application that runs on the back-end server in a - load-balancing system. - - port - - A virtual network port within Networking; VIFs / vNICs are - connected to a port. - - port UUID - - Unique ID for a Networking port. - - preseed - - A tool to automate system configuration and installation on - Debian-based Linux distributions. - - private image - - An Image service VM image that is only available to specified - projects. - - private IP address - - An IP address used for management and administration, not - available to the public Internet. - - private network - - The Network Controller provides virtual networks to enable - compute servers to interact with each other and with the public - network. All machines must have a public and private network - interface. A private network interface can be a flat or VLAN network - interface. A flat network interface is controlled by the - flat_interface with flat managers. A VLAN network interface is - controlled by the ``vlan_interface`` option with VLAN - managers. - - project - - Projects represent the base unit of “ownership” in OpenStack, - in that all resources in OpenStack should be owned by a specific project. - In OpenStack Identity, a project must be owned by a specific domain. - - project ID - - Unique ID assigned to each project by the Identity service. - - project VPN - - Alternative term for a cloudpipe. - - promiscuous mode - - Causes the network interface to pass all traffic it - receives to the host rather than passing only the frames - addressed to it. - - protected property - - Generally, extra properties on an Image service image to - which only cloud administrators have access. Limits which user - roles can perform CRUD operations on that property. The cloud - administrator can configure any image property as - protected. - - provider - - An administrator who has access to all hosts and - instances. - - proxy node - - A node that provides the Object Storage proxy service. - - proxy server - - Users of Object Storage interact with the service through the - proxy server, which in turn looks up the location of the requested - data within the ring and returns the results to the user. - - public API - - An API endpoint used for both service-to-service communication - and end-user interactions. - - public image - - An Image service VM image that is available to all - projects. - - public IP address - - An IP address that is accessible to end-users. - - public key authentication - - Authentication method that uses keys rather than - passwords. - - public network - - The Network Controller provides virtual networks to enable - compute servers to interact with each other and with the public - network. All machines must have a public and private network - interface. The public network interface is controlled by the - ``public_interface`` option. - - Puppet - - An operating system configuration-management tool supported by - OpenStack. - - Python - - Programming language used extensively in OpenStack. - -Q -~ - -.. glossary:: - - QEMU Copy On Write 2 (QCOW2) - - One of the VM image disk formats supported by Image - service. - - Qpid - - Message queue software supported by OpenStack; an alternative to - RabbitMQ. - - Quality of Service (QoS) - - The ability to guarantee certain network or storage requirements to - satisfy a Service Level Agreement (SLA) between an application provider - and end users. - Typically includes performance requirements like networking bandwidth, - latency, jitter correction, and reliability as well as storage - performance in Input/Output Operations Per Second (IOPS), throttling - agreements, and performance expectations at peak load. - - quarantine - - If Object Storage finds objects, containers, or accounts that - are corrupt, they are placed in this state, are not replicated, cannot - be read by clients, and a correct copy is re-replicated. - - Queens - - The code name for the seventeenth release of OpenStack. The - design summit will take place in Sydney, Australia. The release - is named after the Queens Pound river in the South Coast region - of New South Wales. - - Quick EMUlator (QEMU) - - QEMU is a generic and open source machine emulator and - virtualizer. - One of the hypervisors supported by OpenStack, generally used - for development purposes. - - quota - - In Compute and Block Storage, the ability to set resource limits - on a per-project basis. - -R -~ - -.. glossary:: - - RabbitMQ - - The default message queue software used by OpenStack. - - Rackspace Cloud Files - - Released as open source by Rackspace in 2010; the basis for - Object Storage. - - RADOS Block Device (RBD) - - Ceph component that enables a Linux block device to be striped - over multiple distributed data stores. - - radvd - - The router advertisement daemon, used by the Compute VLAN - manager and FlatDHCP manager to provide routing services for VM - instances. - - rally - - Codename for the :term:`Benchmark service`. - - RAM filter - - The Compute setting that enables or disables RAM - overcommitment. - - RAM overcommit - - The ability to start new VM instances based on the actual memory - usage of a host, as opposed to basing the decision on the amount of - RAM each running instance thinks it has available. Also known as - memory overcommit. - - rate limit - - Configurable option within Object Storage to limit database - writes on a per-account and/or per-container basis. - - raw - - One of the VM image disk formats supported by Image service; an - unstructured disk image. - - rebalance - - The process of distributing Object Storage partitions across all - drives in the ring; used during initial ring creation and after ring - reconfiguration. - - reboot - - Either a soft or hard reboot of a server. With a soft reboot, - the operating system is signaled to restart, which enables a graceful - shutdown of all processes. A hard reboot is the equivalent of power - cycling the server. The virtualization platform should ensure that the - reboot action has completed successfully, even in cases in which the - underlying domain/VM is paused or halted/stopped. - - rebuild - - Removes all data on the server and replaces it with the - specified image. Server ID and IP addresses remain the same. - - Recon - - An Object Storage component that collects meters. - - record - - Belongs to a particular domain and is used to specify - information about the domain. - There are several types of DNS records. Each record type contains - particular information used to describe the purpose of that record. - Examples include mail exchange (MX) records, which specify the mail - server for a particular domain; and name server (NS) records, which - specify the authoritative name servers for a domain. - - record ID - - A number within a database that is incremented each time a - change is made. Used by Object Storage when replicating. - - Red Hat Enterprise Linux (RHEL) - - A Linux distribution that is compatible with OpenStack. - - reference architecture - - A recommended architecture for an OpenStack cloud. - - region - - A discrete OpenStack environment with dedicated API endpoints - that typically shares only the Identity (keystone) with other - regions. - - registry - - Alternative term for the Image service registry. - - registry server - - An Image service that provides VM image metadata information to - clients. - - Reliable, Autonomic Distributed Object Store - (RADOS) - - A collection of components that provides object storage within - Ceph. Similar to OpenStack Object Storage. - - Remote Procedure Call (RPC) - - The method used by the Compute RabbitMQ for intra-service - communications. - - replica - - Provides data redundancy and fault tolerance by creating copies - of Object Storage objects, accounts, and containers so that they are - not lost when the underlying storage fails. - - replica count - - The number of replicas of the data in an Object Storage - ring. - - replication - - The process of copying data to a separate physical device for - fault tolerance and performance. - - replicator - - The Object Storage back-end process that creates and manages - object replicas. - - request ID - - Unique ID assigned to each request sent to Compute. - - rescue image - - A special type of VM image that is booted when an instance is - placed into rescue mode. Allows an administrator to mount the file - systems for an instance to correct the problem. - - resize - - Converts an existing server to a different flavor, which scales - the server up or down. The original server is saved to enable rollback - if a problem occurs. All resizes must be tested and explicitly - confirmed, at which time the original server is removed. - - RESTful - - A kind of web service API that uses REST, or Representational - State Transfer. REST is the style of architecture for hypermedia - systems that is used for the World Wide Web. - - ring - - An entity that maps Object Storage data to partitions. A - separate ring exists for each service, such as account, object, and - container. - - ring builder - - Builds and manages rings within Object Storage, assigns - partitions to devices, and pushes the configuration to other storage - nodes. - - Rocky - - The code name for the eightteenth release of OpenStack. The - design summit will take place in Vancouver, Kanada. The release - is named after the Rocky Mountains. - - role - - A personality that a user assumes to perform a specific set of - operations. A role includes a set of rights and privileges. A user - assuming that role inherits those rights and privileges. - - Role Based Access Control (RBAC) - - Provides a predefined list of actions that the user can perform, - such as start or stop VMs, reset passwords, and so on. Supported in - both Identity and Compute and can be configured using the dashboard. - - role ID - - Alphanumeric ID assigned to each Identity service role. - - Root Cause Analysis (RCA) service (Vitrage) - - OpenStack project that aims to organize, analyze and visualize OpenStack - alarms and events, yield insights regarding the root cause of problems - and deduce their existence before they are directly detected. - - rootwrap - - A feature of Compute that allows the unprivileged "nova" user to - run a specified list of commands as the Linux root user. - - round-robin scheduler - - Type of Compute scheduler that evenly distributes instances - among available hosts. - - router - - A physical or virtual network device that passes network - traffic between different networks. - - routing key - - The Compute direct exchanges, fanout exchanges, and topic - exchanges use this key to determine how to process a message; - processing varies depending on exchange type. - - RPC driver - - Modular system that allows the underlying message queue software - of Compute to be changed. For example, from RabbitMQ to ZeroMQ or - Qpid. - - rsync - - Used by Object Storage to push object replicas. - - RXTX cap - - Absolute limit on the amount of network traffic a Compute VM - instance can send and receive. - - RXTX quota - - Soft limit on the amount of network traffic a Compute VM - instance can send and receive. - -S -~ - -.. glossary:: - - sahara - - Codename for the :term:`Data Processing service`. - - SAML assertion - - Contains information about a user as provided by the identity - provider. It is an indication that a user has been authenticated. - - scheduler manager - - A Compute component that determines where VM instances should - start. Uses modular design to support a variety of scheduler - types. - - scoped token - - An Identity service API access token that is associated with a - specific project. - - scrubber - - Checks for and deletes unused VMs; the component of Image - service that implements delayed delete. - - secret key - - String of text known only by the user; used along with an access - key to make requests to the Compute API. - - secure boot - - Process whereby the system firmware validates the authenticity of - the code involved in the boot process. - - secure shell (SSH) - - Open source tool used to access remote hosts through an - encrypted communications channel, SSH key injection is supported by - Compute. - - security group - - A set of network traffic filtering rules that are applied to a - Compute instance. - - segmented object - - An Object Storage large object that has been broken up into - pieces. The re-assembled object is called a concatenated - object. - - self-service - - For IaaS, ability for a regular (non-privileged) account to - manage a virtual infrastructure component such as networks without - involving an administrator. - - SELinux - - Linux kernel security module that provides the mechanism for - supporting access control policies. - - senlin - - Code name for the :term:`Clustering service - `. - - server - - Computer that provides explicit services to the client software - running on that system, often managing a variety of computer - operations. - A server is a VM instance in the Compute system. Flavor and - image are requisite elements when creating a server. - - server image - - Alternative term for a VM image. - - server UUID - - Unique ID assigned to each guest VM instance. - - service - - An OpenStack service, such as Compute, Object Storage, or Image - service. Provides one or more endpoints through which users can access - resources and perform operations. - - service catalog - - Alternative term for the Identity service catalog. - - Service Function Chain (SFC) - - For a given service, SFC is the abstracted view of the required - service functions and the order in which they are to be applied. - - service ID - - Unique ID assigned to each service that is available in the - Identity service catalog. - - Service Level Agreement (SLA) - - Contractual obligations that ensure the availability of a - service. - - service project - - Special project that contains all services that are listed in the - catalog. - - service provider - - A system that provides services to other system entities. In - case of federated identity, OpenStack Identity is the service - provider. - - service registration - - An Identity service feature that enables services, such as - Compute, to automatically register with the catalog. - - service token - - An administrator-defined token used by Compute to communicate - securely with the Identity service. - - session back end - - The method of storage used by horizon to track client sessions, - such as local memory, cookies, a database, or memcached. - - session persistence - - A feature of the load-balancing service. It attempts to force - subsequent connections to a service to be redirected to the same node - as long as it is online. - - session storage - - A horizon component that stores and tracks client session - information. Implemented through the Django sessions framework. - - share - - A remote, mountable file system in the context of the :term:`Shared - File Systems service`. You can - mount a share to, and access a share from, several hosts by several - users at a time. - - share network - - An entity in the context of the :term:`Shared File Systems - service` that encapsulates - interaction with the Networking service. If the driver you selected - runs in the mode requiring such kind of interaction, you need to - specify the share network to create a share. - - Shared File Systems API - - A Shared File Systems service that provides a stable RESTful API. - The service authenticates and routes requests throughout the Shared - File Systems service. There is python-manilaclient to interact with - the API. - - Shared File Systems service (manila) - - The service that provides a set of services for - management of shared file systems in a multi-project cloud - environment, similar to how OpenStack provides block-based storage - management through the OpenStack :term:`Block Storage service` project. - With the Shared File Systems service, you can create a remote file - system and mount the file system on your instances. You can also - read and write data from your instances to and from your file system. - - shared IP address - - An IP address that can be assigned to a VM instance within the - shared IP group. Public IP addresses can be shared across multiple - servers for use in various high-availability scenarios. When an IP - address is shared to another server, the cloud network restrictions - are modified to enable each server to listen to and respond on that IP - address. You can optionally specify that the target server network - configuration be modified. Shared IP addresses can be used with many - standard heartbeat facilities, such as keepalive, that monitor for - failure and manage IP failover. - - shared IP group - - A collection of servers that can share IPs with other members of - the group. Any server in a group can share one or more public IPs with - any other server in the group. With the exception of the first server - in a shared IP group, servers must be launched into shared IP groups. - A server may be a member of only one shared IP group. - - shared storage - - Block storage that is simultaneously accessible by multiple - clients, for example, NFS. - - Sheepdog - - Distributed block storage system for QEMU, supported by - OpenStack. - - Simple Cloud Identity Management (SCIM) - - Specification for managing identity in the cloud, currently - unsupported by OpenStack. - - Simple Protocol for Independent Computing Environments (SPICE) - - SPICE provides remote desktop access to guest virtual machines. It - is an alternative to VNC. SPICE is supported by OpenStack. - - Single-root I/O Virtualization (SR-IOV) - - A specification that, when implemented by a physical PCIe - device, enables it to appear as multiple separate PCIe devices. This - enables multiple virtualized guests to share direct access to the - physical device, offering improved performance over an equivalent - virtual device. Currently supported in OpenStack Havana and later - releases. - - SmokeStack - - Runs automated tests against the core OpenStack API; written in - Rails. - - snapshot - - A point-in-time copy of an OpenStack storage volume or image. - Use storage volume snapshots to back up volumes. Use image snapshots - to back up data, or as "gold" images for additional servers. - - soft reboot - - A controlled reboot where a VM instance is properly restarted - through operating system commands. - - Software Development Lifecycle Automation service (solum) - - OpenStack project that aims to make cloud services easier to - consume and integrate with application development process - by automating the source-to-image process, and simplifying - app-centric deployment. - - Software-defined networking (SDN) - - Provides an approach for network administrators to manage computer - network services through abstraction of lower-level functionality. - - SolidFire Volume Driver - - The Block Storage driver for the SolidFire iSCSI storage - appliance. - - solum - - Code name for the :term:`Software Development Lifecycle Automation - service `. - - spread-first scheduler - - The Compute VM scheduling algorithm that attempts to start a new - VM on the host with the least amount of load. - - SQLAlchemy - - An open source SQL toolkit for Python, used in OpenStack. - - SQLite - - A lightweight SQL database, used as the default persistent - storage method in many OpenStack services. - - stack - - A set of OpenStack resources created and managed by the - Orchestration service according to a given template (either an - AWS CloudFormation template or a Heat Orchestration - Template (HOT)). - - StackTach - - Community project that captures Compute AMQP communications; - useful for debugging. - - static IP address - - Alternative term for a fixed IP address. - - StaticWeb - - WSGI middleware component of Object Storage that serves - container data as a static web page. - - storage back end - - The method that a service uses for persistent storage, such as - iSCSI, NFS, or local disk. - - storage manager - - A XenAPI component that provides a pluggable interface to - support a wide variety of persistent storage back ends. - - storage manager back end - - A persistent storage method supported by XenAPI, such as iSCSI - or NFS. - - storage node - - An Object Storage node that provides container services, account - services, and object services; controls the account databases, - container databases, and object storage. - - storage services - - Collective name for the Object Storage object services, - container services, and account services. - - strategy - - Specifies the authentication source used by Image service or - Identity. In the Database service, it refers to the extensions - implemented for a data store. - - subdomain - - A domain within a parent domain. Subdomains cannot be - registered. Subdomains enable you to delegate domains. Subdomains can - themselves have subdomains, so third-level, fourth-level, fifth-level, - and deeper levels of nesting are possible. - - subnet - - Logical subdivision of an IP network. - - SUSE Linux Enterprise Server (SLES) - - A Linux distribution that is compatible with OpenStack. - - suspend - - The VM instance is paused and its state is saved to disk of the host. - - swap - - Disk-based virtual memory used by operating systems to provide - more memory than is actually available on the system. - - swauth - - An authentication and authorization service for Object Storage, - implemented through WSGI middleware; uses Object Storage itself as the - persistent backing store. - - swift - - Codename for OpenStack :term:`Object Storage service`. - - swift All in One (SAIO) - - Creates a full Object Storage development environment within a - single VM. - - swift middleware - - Collective term for Object Storage components that provide - additional functionality. - - swift proxy server - - Acts as the gatekeeper to Object Storage and is responsible for - authenticating the user. - - swift storage node - - A node that runs Object Storage account, container, and object - services. - - sync point - - Point in time since the last container and accounts database - sync among nodes within Object Storage. - - sysadmin - - One of the default roles in the Compute RBAC system. Enables a - user to add other users to a project, interact with VM images that are - associated with the project, and start and stop VM instances. - - system usage - - A Compute component that, along with the notification system, - collects meters and usage information. This information can be used - for billing. - -T -~ - -.. glossary:: - - tacker - - Code name for the :term:`NFV Orchestration service ` - - Telemetry service (telemetry) - - The OpenStack project which collects measurements of the utilization - of the physical and virtual resources comprising deployed clouds, - persists this data for subsequent retrieval and analysis, and triggers - actions when defined criteria are met. - - TempAuth - - An authentication facility within Object Storage that enables - Object Storage itself to perform authentication and authorization. - Frequently used in testing and development. - - Tempest - - Automated software test suite designed to run against the trunk - of the OpenStack core project. - - TempURL - - An Object Storage middleware component that enables creation of - URLs for temporary object access. - - tenant - - A group of users; used to isolate access to Compute resources. - An alternative term for a project. - - Tenant API - - An API that is accessible to projects. - - tenant endpoint - - An Identity service API endpoint that is associated with one or - more projects. - - tenant ID - - An alternative term for :term:`project ID`. - - token - - An alpha-numeric string of text used to access OpenStack APIs - and resources. - - token services - - An Identity service component that manages and validates tokens - after a user or project has been authenticated. - - tombstone - - Used to mark Object Storage objects that have been - deleted; ensures that the object is not updated on another node after - it has been deleted. - - topic publisher - - A process that is created when a RPC call is executed; used to - push the message to the topic exchange. - - Torpedo - - Community project used to run automated tests against the - OpenStack API. - - transaction ID - - Unique ID assigned to each Object Storage request; used for - debugging and tracing. - - transient - - Alternative term for non-durable. - - transient exchange - - Alternative term for a non-durable exchange. - - transient message - - A message that is stored in memory and is lost after the server - is restarted. - - transient queue - - Alternative term for a non-durable queue. - - TripleO - - OpenStack-on-OpenStack program. The code name for the - OpenStack Deployment program. - - trove - - Codename for OpenStack :term:`Database service `. - - trusted platform module (TPM) - - Specialized microprocessor for incorporating cryptographic keys - into devices for authenticating and securing a hardware platform. - -U -~ - -.. glossary:: - - Ubuntu - - A Debian-based Linux distribution. - - unscoped token - - Alternative term for an Identity service default token. - - updater - - Collective term for a group of Object Storage components that - processes queued and failed updates for containers and objects. - - user - - In OpenStack Identity, entities represent individual API - consumers and are owned by a specific domain. In OpenStack Compute, - a user can be associated with roles, projects, or both. - - user data - - A blob of data that the user can specify when they launch - an instance. The instance can access this data through the - metadata service or config drive. - Commonly used to pass a shell script that the instance runs on boot. - - User Mode Linux (UML) - - An OpenStack-supported hypervisor. - -V -~ - -.. glossary:: - - VIF UUID - - Unique ID assigned to each Networking VIF. - - Virtual Central Processing Unit (vCPU) - - Subdivides physical CPUs. Instances can then use those - divisions. - - Virtual Disk Image (VDI) - - One of the VM image disk formats supported by Image - service. - - Virtual Extensible LAN (VXLAN) - - A network virtualization technology that attempts to reduce the - scalability problems associated with large cloud computing - deployments. It uses a VLAN-like encapsulation technique to - encapsulate Ethernet frames within UDP packets. - - Virtual Hard Disk (VHD) - - One of the VM image disk formats supported by Image - service. - - virtual IP address (VIP) - - An Internet Protocol (IP) address configured on the load - balancer for use by clients connecting to a service that is load - balanced. Incoming connections are distributed to back-end nodes based - on the configuration of the load balancer. - - virtual machine (VM) - - An operating system instance that runs on top of a hypervisor. - Multiple VMs can run at the same time on the same physical - host. - - virtual network - - An L2 network segment within Networking. - - Virtual Network Computing (VNC) - - Open source GUI and CLI tools used for remote console access to - VMs. Supported by Compute. - - Virtual Network InterFace (VIF) - - An interface that is plugged into a port in a Networking - network. Typically a virtual network interface belonging to a - VM. - - virtual networking - - A generic term for virtualization of network functions - such as switching, routing, load balancing, and security using - a combination of VMs and overlays on physical network - infrastructure. - - virtual port - - Attachment point where a virtual interface connects to a virtual - network. - - virtual private network (VPN) - - Provided by Compute in the form of cloudpipes, specialized - instances that are used to create VPNs on a per-project basis. - - virtual server - - Alternative term for a VM or guest. - - virtual switch (vSwitch) - - Software that runs on a host or node and provides the features - and functions of a hardware-based network switch. - - virtual VLAN - - Alternative term for a virtual network. - - VirtualBox - - An OpenStack-supported hypervisor. - - Vitrage - - Code name for the :term:`Root Cause Analysis service `. - - VLAN manager - - A Compute component that provides dnsmasq and radvd and sets up - forwarding to and from cloudpipe instances. - - VLAN network - - The Network Controller provides virtual networks to enable - compute servers to interact with each other and with the public - network. All machines must have a public and private network - interface. A VLAN network is a private network interface, which is - controlled by the ``vlan_interface`` option with VLAN - managers. - - VM disk (VMDK) - - One of the VM image disk formats supported by Image - service. - - VM image - - Alternative term for an image. - - VM Remote Control (VMRC) - - Method to access VM instance consoles using a web browser. - Supported by Compute. - - VMware API - - Supports interaction with VMware products in Compute. - - VMware NSX Neutron plug-in - - Provides support for VMware NSX in Neutron. - - VNC proxy - - A Compute component that provides users access to the consoles - of their VM instances through VNC or VMRC. - - volume - - Disk-based data storage generally represented as an iSCSI target - with a file system that supports extended attributes; can be - persistent or ephemeral. - - Volume API - - Alternative name for the Block Storage API. - - volume controller - - A Block Storage component that oversees and coordinates storage - volume actions. - - volume driver - - Alternative term for a volume plug-in. - - volume ID - - Unique ID applied to each storage volume under the Block Storage - control. - - volume manager - - A Block Storage component that creates, attaches, and detaches - persistent storage volumes. - - volume node - - A Block Storage node that runs the cinder-volume daemon. - - volume plug-in - - Provides support for new and specialized types of back-end - storage for the Block Storage volume manager. - - volume worker - - A cinder component that interacts with back-end storage to manage - the creation and deletion of volumes and the creation of compute - volumes, provided by the cinder-volume daemon. - - vSphere - - An OpenStack-supported hypervisor. - -W -~ - -.. glossary:: - - Watcher - - Code name for the :term:`Infrastructure Optimization service - `. - - weight - - Used by Object Storage devices to determine which storage - devices are suitable for the job. Devices are weighted by size. - - weighted cost - - The sum of each cost used when deciding where to start a new VM - instance in Compute. - - weighting - - A Compute process that determines the suitability of the VM - instances for a job for a particular host. For example, not enough RAM - on the host, too many CPUs on the host, and so on. - - worker - - A daemon that listens to a queue and carries out tasks in - response to messages. For example, the cinder-volume worker manages volume - creation and deletion on storage arrays. - - Workflow service (mistral) - - The OpenStack service that provides a simple YAML-based language to - write workflows (tasks and transition rules) and a service that - allows to upload them, modify, run them at scale and in a highly - available manner, manage and monitor workflow execution state and state - of individual tasks. - -X -~ - -.. glossary:: - - X.509 - - X.509 is the most widely used standard for defining digital - certificates. It is a data structure that contains the subject - (entity) identifiable information such as its name along with - its public key. The certificate can contain a few other - attributes as well depending upon the version. The most recent - and standard version of X.509 is v3. - - Xen - - Xen is a hypervisor using a microkernel design, providing - services that allow multiple computer operating systems to - execute on the same computer hardware concurrently. - - Xen API - - The Xen administrative API, which is supported by - Compute. - - Xen Cloud Platform (XCP) - - An OpenStack-supported hypervisor. - - Xen Storage Manager Volume Driver - - A Block Storage volume plug-in that enables communication with - the Xen Storage Manager API. - - XenServer - - An OpenStack-supported hypervisor. - - XFS - - High-performance 64-bit file system created by Silicon - Graphics. Excels in parallel I/O operations and data - consistency. - -Z -~ - -.. glossary:: - - zaqar - - Codename for the :term:`Message service `. - - ZeroMQ - - Message queue software supported by OpenStack. An alternative to - RabbitMQ. Also spelled 0MQ. - - Zuul - - Tool used in OpenStack development to ensure correctly ordered - testing of changes in parallel. diff --git a/doc/source/images/architecture.png b/doc/source/images/architecture.png deleted file mode 100644 index e02f13b9a9d406a2973bcc0b659bac413c58b219..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48303 zcmcG$WmJ`4)ICZlr3X+_4y80w5{HzKMq0W%4=p9#0us`Qq?CvPhwhYai9?st-Ob(n z-v7Awy<^<3SBFDHcw+Cp)|zv!xd>NQl!oAt;h>Om8=0>wTKLyYsVMz?v#g;t9q_?8VKxO-*)_g{axrBP0<-IjV$qGC4l) zrfIBIm?Uag&kk?AMb6F+ysqy`RvqU|t1N8tE%J{(e3g!|{l)Jx@Sb&V%`?+we(k33 zPFnK**Gai=c7cDy{Vzno|Brtu+}-T?Ei`zVT3SwKi-#HdUi_P_b!oo4y~>e{^u5r2 z-z;E`leC{yH5x!l>GoA)9^H-=h)CrQ*$VV_HG zIFiG~<)|x$$}jh6{OIUt5}Uq&SvS6(&Qi(mFg&X6%I@CYoV8f+Rt3X>67_%D+Lmvw z&YqgJXT0_@`n0~06pV(B4xYeasU##Q2)&V5O--#0>!*^mv@|wiX)s-|wzl?I;;qcx z#gJccGjUyA-I&+6oSdf!7c^q=K)9%GVupFoce|OY7kagZzS)+aA|m8uT1{kPAyHG5 z>B2tKZEbHpuu+<$6D1yRfjjr~^nCvOncc9dSjqIw6g46gmm=a0jWVf_sse!seAl^0 z2fmbusJ^$i7X<};@bKZo>)+@z2=liL3=G)Xc)g(lOiWDS{ae#jcDfW(i#t1wwv)f= zu2Zhg_GXYsWtg%`PbM=n^YQU9H#aw%UX8Mjj*hx|;+HQU_7_`S)&_Y9p@&CDOweT5*KJ#C_20otR{*(! z!T>e_y^zmkLZhOA!ThIZT#n>iT;+fNnjKDr6EKJ}@um4_6%`lf!n1PXpi3K+0`CiB zAp==iS)NC$u3Hm7eXdUXl32yjo2Ag({zze$N-8I_>8}gX%^w~g2fq%n)YQ@{%*hG- zGf=YH6VISRK*IFu)hhvk>At?LZ!HHHdXMew?A+bYy1ToV+QaamuutFrG5SPmi&4bK>3*k&v|e!>21` zWnf=ldl8drzlw^UI`U5$=0y2NN>r8T*N581W@IRCGBPt?o}b57v#_!{nW6f~-5rVC z(Og%a9}S6Yh|5UQg#?y`KA~spOHYWmv$R@G2R$${@}XlSShHpAyD8aFpLN0f)9 zVzdP@mk>3z(eK;aa8guVUES!L(rK&PBQtczK!y(|o8ykSDT9o@7{QtUz2T^*+;xR% zSF~Zx*bhzi-7g?MJZyKSE8idj#tfUhFI-`tDaBQ$M7yAcg?~kY${vq)g z{NHmsX@&CGPH;QSd#??qS34|F3cA6g;ER^`FVc%KAGkK2Esp77Rx(xLb!LAp_~gkG zp{s=HY6qoGl;DkbZ+tRV6a%OHlWlBlwhf-&2bFg>2rx0R=P+266bzqX{hN!GH zQ_5OeSPZ6MnXDGu4=e0uh#}geW`stf9@C7dRqHfS4vXKPaanvKeHG}{oSOP<&{b0M zVfEV@hXr9*SJ&%iQJ;k-ACU<{5}$44D?7U#u!)6*h3~hK2P5C4c@U_{R3h@)+Rt`r z?_c)o##FiGp8_RKGwcCR(lEtzfmSyc-OvT!tDoBC7KI$vqqK&Gh85bfGlu^Eb2@Tj zxSPa2eYm^zk&?ndzVv1=)zBc;!F=KKRz>9r8EyjDcxNWk*x=w`5FVN(2H1tCmnXKJ zxGnTpzx2LEEn7!gc?Ug??BrEeQIVAa*9w(f0UKLr76%z~h%L182FWT>9)T)-mo8`W z)U_cX;}u9gvMI^arArRX;j(J5VJ<02NjmJKH&dcu9VN#Zj}QO7?ZvpyG0a7o$9{Kb zehHBK=TK7B3|S;2G&y;f1ccCnTPlH-bK64p})lam28(X{_a-DI&jK+b1N@ zb&vZbZH{g(9#ocHRAf`#D846*VRDcP(k|T2&Po3A{QmyRux@6kQAZdq^p(XZcXR1E z!*8ms^JdEg-&%?bv$_iwadGhoOlIcqi3wnZVKR77xEuecR6d1mhF170+yn2w%%do4 z?Tv08bX=M(?SSs_*hB=|_#pN2^74QGCo;XMPA{^~oz5w_f0X^b$JYos8@jv$2;_ID z9d1}q&L8s-$O*v$b*-(~l<2tsCD;QgDJdtX14F-?-%Shc?JL_;<;T}D4&LGvJA-4X zM-{pi@-Hv+bCq1j_m(!FwKO)T+ZSwuD1A5|*P7}^cJvICOTiW?>lUHR>hQwMgZm@nscLMTT~*}< z^4dF9)x}rWw^w^-yL07LRrZcB(r3@;=;-Dey(R|+?2x6#{=wztTwWmnX@rJ^?9A4_ zQ&9M6*zB7Csbo40Q&LEMWqFIJG-XId?D3Mry;pqEreKu(gGX*1b$0dYYHGTb){~MY zUQ!z*rL7m+ml!sy!anL<9#g#^6zpD8EY68jRnx#O zZw^uX@k1=8+pTL*Uxk6`PCrKYhCKQ!{dh~L8-;*?K;}Us38n>yDV~wIXoo4f_%|Yr ziT?tJvqFv)q%9;+e_P-Dm%=_q>G!Ia&GqEV^Hm5C|{GkGLh(t zrof{R=S(>9ElBI=b9_&%hXo^6&587FT4sB_1zS zqr~8~+e^C;{AK@en0Go1Cb zUa#&s72MI;`S9@Y@^lAkjxIu*H4@gj%A3^G8@+FPXN29H`7}^PLgK-0U1=%D^Qjgr zwHC~vpdi^0Vpbg#Qe@|M4Gj%pzgwXVtiXB>-)oOkJd=X_e0oxG1CX6v=NmlVAX<^g zl&NqW9F`@^n)>?5_S(4W^U#X8g|zCiDXV(RuBLB{Gvv4{aL#O9qxM8yywcAGm)ZsD z&``)@bQ?4AH}h#p0zXkPtc}FyR-nW4V&WLT*uKEzXTlJup)IYg*)b^-F-3l^8`E}M z8e4~l&ZP9D?D&Dp0gSAykhQL^t}q{l=|xcN*1AFgB5t$RYW%<>?6J}b*~M9WK#m1B zGdIu1y+YwEcUf!+Fx46z9}oFW5(oM4;ltZxHuE!RL}cWFe!8H$L9KIM?oeXGZ{Dx( z&vyPeP+w85d|n~`r6pZ;g-3?iYB|Wgc3(Y8Vtn}nA12S8UTblL`+UmZdA$PDcRu#X zw|_KE#(VRP9a)S_OiNPo^78P%a1>H0P#WFZ2zmFe+@Bf>dmQRi$stz|k-Rybikw^= z>yZ-xvr&m+)jT1@!ym-h74XmW0nkBkroy^?0OM=f!$-vLwlPrlaeh*-(M zx9!w;vhRzI!jjfD|7PbgQ~DWp=u4D6F&UVek{1O`l?+LpbdI3~@$vCF)nJ(B0cRL6 z7q6(v7xeXy*NK$PK0`b{bt!#);UcrhW=gMEjX)F?7JA&>zWzmjLiw>XhH9bC)mAh| z3T9?zwn*%=zlgdaSo9gf*8?gTsOf9#@7fgguv(iV(je#R`x~hjYTI!C$QY{jPf>le z&|JrBuDsF+L29+9^qsQwWd z8Hoam6<_-N7z+y}eB_ssg2Hb_?<#=aCx7Xq3ju!7weyAG51s>&p_;NXJ|(5SwRJev z#_DRPf$>x(dd3$GB09Q7{`ImEy5~A5adoa+Fw@RY!NCtQi1u9psiG@4WdgXO!|~wU zhiO|9lqY^63|m`UkH>EeBD+7CT(uV{8Xl0ihCKal9z^aRKzPkAD=W*-&kqWgBLzUs zZ$M^Yc%a}=(eW^opyD|7R=r{?gr9ShgqYZwoEVRcn$<%wOD%lW!D<_B)S2^~j{ua{ zHJMtPa~&-^cV%TYS}I-+?g+orF9@+BBt8K_d`1TDJ9TaCa10i6^u*Yl3n&>6bU1dz zqNlf)Sx~T^c=C<-JD1ZOBI6!UOugBe4L6-y<23p5B~TO+xL^`eQu+D$%Ws-Jk1%$$ z1^$ZCbUhYhuGR{2;z4r#?Zn(^+`xX(RA~VL?Jo# z$zdxY_h%hnNO4xf=6ImFAOa9#r^r~9%MeS9*B3_Zm9brg z7DqEo#>moni@W@EX|{z3-Ks%i3G8T}*0GVDVt`B-=Jjo~DcrV*zPy>&X|PBO3|yZ6 z*{qK(&*yv^dNp9Rtyl5Ewu|jjdgw}!PhV|vc-pHC+)K_mZuA^rsVVdJ#*Ts?yi-*+ ze|~l1^|C>u^&3KL6fGUF+A;F|an8qD|IEv$=MQ`ddNv-P9@m$UkWfq!ZF*&If;}4| z*7jC;ymPTT0Jr0wivN%J2H%;%Kd?s0fg|q~v505|Xul{{V~tNe|=_ zK4D>DfMs61c)`N54YIhHm{_&_+~L_-v(rjwX66FO)jrIz-ctoweTv!z?xA`;(lg)!Kq=VzmWtGYGU~qUJ~}Ad z{6`6%D;g4{d(LwazbT9iINuT_Rzd6giKpH+LnCj6w)_bksk;Sa3N5S~lTDpyx3}ki z-1)cCGya^e*qa;rh!D$>@i^P1kcT~Xx3Hj>5hc#`Jl}tAgtFp$cY6Z>34od;Rl+20`9G9*yjx2{VTurfOG=J%}7C0h-RuUM9 z<|so({Tx8_1c(VZ#U!@QTgDN;nSWSJ52%zEi1{h7vhKxNT~In;g};=jq_eFq->cB7 z&6DiYAa@fHsx-MC)82V6STb*hCn8P?(`$GVVnYrN9z)Z}K_Mm+S_5%!XdM76wY@__ z(%&c*y!bGkoSc9#VzmLV(yw2?0Q7Qma^iPd?f_CnS%3f^i&aOHYB6~<@0dUnIF8$j znY7E$<;=!iK_d?4*!A@-_m$Fj)$fv_OX1E`3mn}Z?~jOoqF}ee9h;v;p;I>EiwZ;s z8@H!-B+K>ow)^hJ)um|#rgN>8Pap_UPkTq{gAd);I#jSmsB6`}d z4euGiC>c%wdk(L1Ic&^)<#!=P?%f_O4)g&|_Z|TvqVYxsp}m5Jz%Ix6v*&KhD>@35 zJGklSLTKNP<{Eb2!9HsG-JDGR&@4e95UTIqL6abgii!aN0pPqk_hAUkQ=hFsmY9ZE zt9Lq9$&8zf z6PNS%si}?2xW}TvS}xNDb@Er%>;o#uh=N+>|NUm9ukL$@}QZ6Sl?ReLLRf@L1a&a*y34taCS=f6%3?V+R z>7oNIgyRheaf4%}JEgsMg_ro?p!l7?IDdPU(80D(&CHBr`B(^rG*0`4IBuK;`fY}w z`)+UI%lY~F=g*&?KPxLMJ9oP$Hk11D-M5&y91GSoH<4#%Bop4gVjNfN47p!T&;v$C@C@DLe>IbhO~ z?Zb!fbBz;XsUCW;Im5q~b@0v-Xq2$-SfF&JfBkASVVoa;s%1%PtF;NZbANw-F;#DO zxBo*-4$((rVhaAj#u&m^i>>I(TEQV9bCyh>@-SJlaW_}WYuMzy#f^=PeSL+ioUR#^ zGFqfjElzi4CjBjg?#cAWObE9hpv^43v$Ng+hr?hnfE&dlDd$mGgjsmDU4i~(Z*Om4 zKv}k8^)5LnDd}byoQ2-LzPH*Yr}%&XHW+X0%v7tZtJgdLsOb`bAfU0neEG7B%2iHI z&QYG6d?92jWbb5uzqGtux5m*JJp)3vK-~_Hjk|y8UW{7f_OJXdEG#S<#ahPZ<{=hW zi7z#W=cQkQEJ%s_04sn?TUps(8V`lqjY^6tl+x{Pr7MQ7Jrk74!+(7M^E3nLvx~~l z+jL_KZ%H2wgMOH-yY=&uAE?fkS6B6Qb-O?h0^AOWq${b~I*Of8pw=Ao`NVy60K^m? zs|nN6(XpSdz!nDD7K7+VfTO}fL+|w%I=Z~Pyw-S+%VQ=d8X%4vEeGIyHP-U>K;J_K zzZS*fK)FLPc$kGof|}Xe(-V-4@^`UxUuAY~ZV0^5_4z>%Cf@YSOrHp}aDn=@BGn0W zb1-!28$hZ9NFYkUtWHc_cqz6>E`;{ey)y) zknla&60oQu-skwY5_)=iCML^Z3Ai{p7g2@0PFXVPwEPf9HV`9ge( zPtnL|5s1$~iJMqQ0;Tb~n&BLJx5QWft>b<{@)o~q%;ga>M5 zNPo;qHaIDV8SRw-vA6;~BhyR6=47d>gujUp$C{quibA;mf+#%&5E_*Mvo0?$XM)l` zpY`KwOn~I{wzrEL8ZP|)odRe#eQDKJ=0eUnmdY<&L^HzuySr@ru_UIbv0*K$?KWPX zp5eV1UtQn@d3imIP+I6hDPZs5OuU21ym_hlt7F5?5ms3AeIa~kNi(-xsRxN9HT;>E zSLwE+BQ)e{jE?h|yWIPCT;)vV7n_o~y0!jFj%_s!m0IG%b1$+?#kmh;j~yNB_*`z~ zBqj5VztN`-=vOWL)2YWzUuDzoB9F3sL4A6Ff%+@}+`JV7z{+?AB?F+T6T@2!w-W%$ zf;FM?E*GkN8XgLRk4QJF=>+Cl5M<;BZ5TT$?6~R#~{3_(P7Mt9m2>s#9rzr4)59 zMo(Gm)D)<^eU(P&38|^F8oTlQ_fHV8i0b}VB|16@KRH z_tRmX?j6YQjhDDjGa%PbQB`1LF?#y?KY#vAX3=hI4FyR;zs51!4c-FNHWF8Z!Dnln z0MEWZ$4`@kuQ5`gk*jOGR<^dLmKGNIGVoHZT1kEJEdX(4F(xI%j7K(q_Sa9dzNh~( zNc4Z-0Ax&bQ^-^NKmDbsL(yqzGXR>|wqjflba%JV;b%Z_adANz5%NCg*aUbQHr92= z=mry#W8a);Oztm3#bZ9e`}p|r<9#g+H8m`*c_R*^!o8cvdxGBq98DY0&o3;jFt6r% zXB=_yW=?TV(Eta7LBnn~N<6*pDYFptoDMAqUP#sGcSqStp)u3dg;{+s=CPchCIMOH z-_DM{AS~8QEO5&4U|G_G)d)S-@o)DNI3^}$pUW-CycZVey(jff*q#n4$!BpH+Qo=X z2k!U(LhY0yMC2HtKzoom8v(cs&^?JS1;?-1bi}gL?bn`|=6|gv?)+R}+)wM$@S`Xn z_lF%o;r2mn9t{EDtoMdqs(^Nyy!_)qtv7GbwmpCpZDm!MnVDHoz|arcUe#dTz4qo` z`vr(g&{!z`an*WTjAl~Ir;P8d{|6CzMqIXU$c3)XeKqO>O_CO{<)zMy9!PPvAZo}{w0 zK5t3w@9PWFMRA}Gql+6seJsjT`aCg@FbPX@d1a-&z1{e7DI~U;;}R*ZvDOjmbR0*E;)8@9jR5f3|v>n^e1$MUszs z==UE{a5-Vn00??q77*?egy(d9?u1-#X%UTMBvcdyPY(Jkk6*(Dv;)&*XIe7KQ5i1S zlN{v{0qGtjt|_y^DCoartgV^YAl}g(9UbfIR?4mI?LqPhaYrW!jL^S=O{l+j7iB^B zcWh*9B<-1p=#=|%V7|sXeyy+*1LxE*Klzh$F*(m5q@KZS%GHu{y)QBqI|#Xvhe(>JO#I(aX1%#k$%C+!5n3}_A`u$l6S9%8``j!OZF^A12Fl?VVj&R|0v-b z%t;aG19{c^V1+GC@xNamE;H@AS3dvsCT3+(pYj*e?r?25y{Pa%!9)kBX^}YAI}?eQ z8T5(cznvrdO|F4Dr;v=^W}XajB;-zmZH-l4c`k%4eJ28XASJ(7kdKOxN}q4Ctp*Ge zVB&NL$fjVMh?$z6$I$Hufq=;_D}BB!EA}W|qyKH@a7s-9kr@=KT0fD&@qgHS`U9cJ zyp%y56D#gOgxJJ=()fMf*bap3LzScMub@&IU^?&^U7RMLN;ZGWQRH7DA_8Z><zP9wGc%ezD#rUEO^@LT!e#CmbDnxPX-@w7oI(!xu~}n= zk5UaLkWc7&X56<&f9z#ZT{$>@EvC-sxV z#{o|sx5M|Sl2TLnPc=U_Z~w64k`#>#tt%C~zxL^Gs7hh8*^XP+H_BYTn|)_Ss0}M1 zl>iz74OENco{$?&fU3nArG9=5Ik&k7<N zk0m<~2YFizjSEuc4{rq%5GS_GyCWOQ2ETw-|l zz<{IzV(iD_HKeMl3c%Tzh=}{nP;B)%1|b8l5>Q(BGckcLWX|h^;Q(SB)2CN5Pq%`4 z9mkDj(SG`xL0xS;MYe3VTqUIErlsjMAhJL6r`^H^=VZN~k){v17!a5T6>2sY7Baqg zu?+AH$DhU4CQ$Ve9fKH3N<%v3sN*K4rVmS(07W^kL&wBDzqmNiV@|m*CKkxpguC5h zO=>UVu-t38a(-e?iUoh?2?o6qOTz{=%}F&CmGLcO6c-k-aYnG&2=9r4J0tGXfmN23 z9t>k=ky3|t*OYRksFZqPP^vrYn*{bLkJ zBdrOOUm%6(Fd>iqMIcZ=cx(W87A2)oz)@&5B)igZ z55NR|8jj1-bHEQ|MS_$0qG(akA76r6JhB-R9=_DQAgXbsXvq|>;2}G)D@Eb|oSU1l z8^lP!9V*fv$5C1lkh|U4S^$@gM6R)&_d^G;g`)x1FT)=>c7> zVuY%)8ad3SVUb8{L!kIQg$5I$ci`fp5q&kD6SYi{W!yaGDcRKl8=TjG^Gb#5HcW;B z%XLu;AL&mg`p7#3WF1uO^A{c%`=kH`ED4-nU12_aC@CSa=%4k7F|d6(#b5b97AZQV z>;>x(@tVt(jgLjjA3AUU-Pj}}i&p%l{m-xaQj6PMoMvn^?=2oV-#!0EI*q~^YJEOZ z#m2|Sr>84$Uc|yhkm8)zMj{7ze4|%F7U6L48QP`*+C*`4Z{p-tAf$zagwR&w;f4$a z5*T#?F7-cbT|nvkDcmFW@{Oc=lxMBW-K+Sr_`jwZPr(fd!+ms{eQ$utM(m@%KgwuQ z*hp9p1_o%JeWbW|Qh0h^fHspXao8D)x`u`c-ffd9!Koi$i=c%A7WoA~CFqoWN|v{$ ziaW*ZdO#{l<3S4a79d6e4y#*ggi3J_HhRuOH<7%OzX6?$9sTBRxI@gEn#_2QW;wes z=uhoHwzC}kdT??A!N#ryZAvpUTC%HsJW;HaLjZ_kvxbau%~r-1{Q}!O49m`%8IQ0ADF*Gi*^*4Ix1uvb?+u%BKe{Fbp-gZV}_Yoixsa zTS**1DwJg9ko)Hcra*=(4+wYgr#L%4Zt%Sk(7DVRG^V2=Sj1_-uan{h5dd5S=;f$Y z*zoh?4G`%y$il$Pdt4y+pg^ZT$T)WPqz(dOqkyU5o0v$MFO z=2ab*pMAHd6z_w?-{i(afT{y%w-LHcC+Pi!gp6GO_nn9i49~dSDd5I&o!*)KGwBDA zC3|?Doo@vWH{Xf-;3+{1tbyu3)t8|xCz0kMdAfl1X z3;%b58GWevP;UH8bui&<{NMQ5Ap?{7o-cDAW1{@Ux8Q{8@qhn@^3EYl3(65Po;MEP zF=0qSKKWN^blHNzOa*KPGoZLsls%`WUIebJN>gzF4kRQbK!51|zSLny34b#ni9#(c zK{$Fw=ixZyM)>j}oEd714>8=B9fPuC)i(nm6awp~On&!`@oR*bbG7u%0&67b;sWX<1Fr!G3Fr?y zNPj%gSHdxB2eEbE(E;*?B-{wIeWtGdQRS{g(a(BiedeYdATQ;DG`9oLJ|LWJ<*1#n z>FM*`;4<>^4|E*yGvG+jA_4}U`%A2Q1aX%IkdV%gt+T0(&*|e!gislsYXsG|{ z&b{odvK{lmVuQrmLj0%T=Z=4D4aFn+9s`s5j9KtP=yRj`C45k$5?Qh!@eq!$d`^LB z5D;yA?O=PsdaJ1st}m~xt!-^ZHZMNV&Z;jh-3O+H{vzo8IUDT`rEG*;w=Ms^Ee!i@ zxw*CVmI2xnlx=T1_>U)=znQ>~LA7-#k8+~FkQXL2@MBNvIGNoL?4ePwsi`SI^2;d^!Z)A+OKea;O| z9AFE3MNklaqr#iUKQl3*p))mX^iC}%y~*Po`AntjYwh>_oQAq?0%+6iNDDCm#Ph3o z4Vu|VWNvOQX%dJ=qXjOlz@H3zoN!SeQlG=*Lf|8;sUaQv{y)7~qy1{NhmsZ)g-j_=x^%0Rjq2H6w7qqAIrxQ+pT z1Ox)fn}BAmfE_W3bih9W9BvJ`7b;0zD6P27J{~xdcQ`f2bL;c_Ntsn14;>=$}7{2?_h}XlQ8e598kj2diNCq9aK#xl6RdU}u-Zg6G&wmf?ag zIw=Q>UW3Vx0sA*0%e;nOr?CCHCpaA2{EqUv^|f`O7^f_Tb`RZ+J4;+zL(|0YK84~s zEQq;>-Jq(>J$&;}mCUB$`N4(m^P{%jXHb~Ky!_Xd|AZ)@^?E1>6OwVqd#Qs zxbAFy%5vdJGArz~@LAfSyqbG<>y(lEzFEPq4-=^~^ZXJ%(y=Fq)W(A3)ne zRr*uPr}}FN7?dmpKDS`=;e*g}@n7O+GB+gi#{J{(F~&#x#n*fS-2MJ35VPKxoqzm0}}I(h~do9HX^Omj2uD zcHBl!F*jhs0TGdq9QH`H{Y}$976YeL{Ma$5boGm_Hi~8WNyPFZvmq*`ooPc~q>+v^ z9M_o8BEiorNxwMBI6pme#DLP{!%D&Xg*L+Bg{e4jqwbfs-JWv5JT%vU#_(lOO;vSb z%tpZdpa`^>gwUy?(@a8N_#HqrG#38MzY@odZE9-TR4<-7AX zHk(S1UQQJhwrN^T$O|@audLd+Q3$$+dw%uW9X;87?ZpN6FIjwlNG_c6Rb4{hb5r?J z*E(7|#&f~Xq|ATENuVB`yn6c(yYYOArnzq9-$kaggtnqgX&UB#@P=&A6ngviQM2R5 zD3HqYkbG|2qyZ9ITBP?N1uVKV=e%;H`Dc3h44Ccin`l1EyVx+k>5IN571^oA4`)B^ zWlq;ms8_LJgEe09in+&@B^VGGFbio{z)$dCDV))0#zFhLZ|0l-xfpD$g&snqGtP7> zjlHb&+-OqUH-Rc)Wkm{w_T3VqkODaYs2lfr8JI3jmk|H$Y+v<{+b9Z0cUUMhrwCx zEv6!opd|+8k9`h~j*ir(MQm(rLY8KSX5wnAeHj=xB^a-tj_y8M`LE9dqW>wh_~%bR z@%s42{^~AD2=epuz^i8J%CZf(+$TOn*iia$2uU(ML5J>tX49|ZAUFlCk+h5q0~)B& zdekQcAhC5Ra#9F-3cNd?@-e=CybKTFSU zEzK6@a?cTx>o;hga{Dq>T%PP5l9|&xq)%;x{|I_(!74ED%5DLuE`5EG@4p?HwLe@K z{?4`LMB?jaNGf7l-H6^bL><5U(emBdO#*~G=RUw(i3o6Q?Ck-5^?r)_K&X2epWGN- zgjxOzr@n5zr((Ew<`n=&)8SyX9^{;|Lw-Zbu25%+cm%AEPM* z>FZ5Lu4f;XqVB~x!j{=M;P|uQf1Oxc4q9Gy>ho>5U95kPd5nvvSCT#S zNJYe*F_JGK05)~<%1D&*fq92*4RHkp1u4xqV@ve2bu?sFaWR@8DcpTRT`UVkU_ifc}5=~v*ng6UOX$^f`O|83& zJr{iwh`n$xo$Q<<;#3uukfVeSUm%3tw z3Nj8W(42zJhEu~mlvgXthk)}PEi$NzPKclkilp7uXrkQE}qu4)~ zM?*LHmyS});1`S&{q$MBJYa!vJp5gL$xO{o$1}V!{tugI-VZ|| z9r{RwPqo`gcWZW5saAQa3+O$5qY{Zy9HbDj*2!$TDOte9i3Xeb-K(|y5cwOf%h-e%Z*iepa`6W761A(0Rng9m77X9?_ z6bs=3*BLh_>ky&2kgrsd90c}m`#3n1~yav zZgj>$;025CcT&9#qvp|*GHYCKq0q{P{s5SGPX!i9d#%%XpoGAQU1$ zhSnty-rnMUK$OAh6! z3t@u7$r%*yh@eRmZo!a^yiBpktC-A5ao`02?x>+I}*Ni!<*smUNnhwV2e}Ry4 z{}qIWPIiP+nGWWQAo1G-7BOt=-;p%P*1mro%!d;US!iuhct}i&h}_!b{TK;JEos93 zRXXA}H2eDpa%yq&8OmiK{z+_ez~H-Ork+mT_03IT-|G~F7zd&z=o6e#a3Hz%pNqgANagYTv2Xei|8t}GVL36IE-1J_H;&Y>0~H;@ z2p+yWcp#AoCR$^3BBHcGA}c2_)#9Lizk3mzM46(8hWM}+&6Ija>ty>&U$G}s4-;Ya zXXQOq~OD1)MOzqpa*rQx)sC!1H zkYA`#ZoJIxyNmY2`wig6`~3?>m2<5o;NFyZB-S=O>E>nL<5Ex{Nu&O1_sm?3POpzU z`kqx-<8dMtZG;7U2AcP*U^%gnmgKE>kd{D#x%mk13gX_nX@^Qxd z7kMHj9OVVf(k%2u{F*jir5g;YL^q?hRF(o`0v1bTVi{URXfBtQ8?vMux zGSD~=x7zra{{zlCQDjRP&O&RfIy-!EgICLbuClfBb4R`FZoX1z0x&U$_cAYeGSCY9 zrK#Uaul{fVjzZ8Nq9Rcn;V({roMeM`@7elDkmoZBil0SAo2#qzF7U!Wy_vdmNK`ze z(m;}@eo-IZ>%y^k)hi>?NwoY22k~fO(nLFVI|mPM*#!|%bRNa#`0n|S!n!eZsK-2| zW)NlN9nje!^mTuudMg-4Q{Z*HfjMc@`-z|O*4UAT*1Z~-M;`+H>v{nk^PZy5z&8*HsbT1h zf?ArH9fN*$S-CL)GYV<2Jb3IQ*vG1fq8il$#~`u&d+dcKNB={nM~H4I^8bzV`*)>2 zp4q?R;YkxXkWg3A)uFU*$I68 zQlmHpW=H0g&hN6yCm>+*t)|9f;0q7%Zre{3Pk}akcRhpRSF7=&#K6EeR7t~^)K|cL ziv$B~fM}900Ba}6;AVF~TkedeNJ~!UHSK(SP5VE?daL!(R3d{1UO*VlgrEt0?ntXG z$8cZzw|CE3Y96KLlxMRMOEkDk!UtczwG|YSJAjZ>%FZu$&-fA1Qt= zD|^*rqIPx?WbG|J@Wxc)0}{+IM(IB1<4al5CpFP~`@cu^9t*PT)%d3a9WGZ9^ie(g zOYIySV!qMQ(zblLy14j^Gc`2@G#ph;O+3>LpfVyimX~vii{W7IfQkx_v;scm40=7e z!ft?&k`to-0`|P=@9zqLX`G{3T30u1<*b_rdx#dlA0Lrk32csXF9~ z-hYYg`6F(^1z5^d^7OA))p#4BZ3Uo9{{Kl>!p|{`{N*Fx(8wz&M8Q{5^Hv}dpnKXv zB7+Sz@;3gGY9U-dJa2iO2F5jw1z*3u0b`UL!{8ruS7iaKB2!Xt7lwyR!5CksFdcd7 z4KyGCSlfVW5+C-zW&;n7Qj}PAt3Iuq#U@`|TmV57wePtMJCruH_GtgW?z0GN;vGL{`c^a*3sN-Bgf~@#8*W zS>n-B8d!vD=Ku>87(tAB-_SZgPbm`4av+hAmiE>@atr?Eh}_|KSC?sXSQuuZzt=|} zSw=M!e!w3-05KJyuy}~Jw)Xk??afu{Yrsd^DV3CzP~p_gm?B_!wi`I7{Qu$Srb6Dh zy@<+$+weJd55&q`H4XwY7h*_yO|1f=HWcDW5}ncX{=DOYY%Tmhvh+~94B_NpkgA5-jQiXL>LV@zhYuxsL^k9GA(IVo>#&yr&AQ1BGz^Du* z?coFnvTh#&5|S=*h~Pm(04icGIvF-#%9z&5= zpEY#swJT=hl%LnHui%D>f0*<7Ui95{d+g*de~I={@}O=-gcC{N3(B+gabC6?bBNkG zYAA+Tdr-plt(17YVFLqud)yHQgu;nIxQl^1AHYLkvRE#}!-ze=a2wEWKu}s#zM2|b z8U}l31B-ol?*fcj<hO}Y>4>}-e}U-0c$B-yhIY_K^`m6s$F3t zYrC!{%ekt#a+04dowxBMn2#wtRTf4rK7B5e?TANfs5f000fS1sd()UC<&;Pi+Qn`# zH2w2xOdS|9^pvpagk((Rre^|tW) z_e#f&FPX`cx9L7T4HegN+`OStFAo}%t296yMcPlMTEaP%9L9&i7Hnpp+> z{p-Z<9+jGdvtUwM*3;b7mb=%4R&Hj{-A?G;F>#CEm2#A8&4mehh6}6F+aIx?=REPu zTU4YC#j$2r7UoV^aNJU1*2@Fs5wE7k@VLIbwNnL*V~k6s*(A5!W&v2u98SYi z?)8|7oX2Hi97M>1!axX(RMZ-&3vNTEe~@0ohe&L_6ppFlyO!2k5%!Yw_Ri3jtMS%E z$0xTB(QPZH#^*|8<$?C~tGhO49*AFZsGdLAQ;ne&@dU ziym*#BLHKtn6hh)#vvLJkJaZJ)5i;v629KUiwp}t?j6OzuP#Ker}Rw?@B zbBS4X2T~gSKk=8dmnc-V1~GUh0`6tXApbArQ$o_+FUhy*A#W#vT6M7yMKY>S{ZF>B zR7}+S9@2Tq4n2@&-Dhzpa8{A9lWif4CN<`CyPwGQ@O{(-3yo8`{O9&JzQ>~c@`She z5hbng{JtzP?aTj`^FeU896vNT+{L2InEVWlw0wyb^-6bpUN zP^;-$3NmC5ZggO@=?+tKf04J^m|pJ4e~nHEQDD8gVdzddI>}#rG$A8P5Frn|6WIXP#Eot7r?cn7xUFWRui5Ico=}696mZP3v$KcZcfj=P z-h(;%0#X`UCg_9U202zJZF()2Zio428*-P`d^EYCPN}=-;4BKOh8W_^Z^=+m^P5xzr9^XpI_l>jPNx;mWd&(Or=WJ~h1fF!mJm}+YTYN+H zb37_b!Ulp$0;ab$;12Ie_zGh$8<<1X$s9ADHcM9UsWu$QqjqqSC`TPYMms~%*>=66 z>w4Jk-MrKTzUUe8AhMm0T|q_!m_cU8fjIH+@|Oa_#0!M`0%|=j`I>>zg_xZ1{L_YE z%I5ne3X0urYwsu!4~+`m`7=BWI&9BGdbowZ)qw7R#n?96*lc)*a`rrunz1Qh@`X+u zmrD);b^E!J4;bs4c3tN)>sTk)o&0T;Trl{Rfp46G!XegRYI@qaR7|huHEnKagafb# z1b(Fg!=I%CzDNHTRqq{7<^RTyo5wsxGLKERY{$wLB1D6VkQvz-85!BKL)lasWERRe zMn)uiWF3)BWM=-Z)BE%NeSg1)e<-(e->=u}zQ*%-l6<23X9r3&LU=xKQ ze1LpXX@Mh6-;=o?5z)Beyiw!lI=#77E?-)sVe#@gQeFRwd48CB8E0`> z=B~PcKYs>ZlbOsMX(XIsrIm_-s z8?0`3;1lN!s%Um;FCupNeO4-M18v)e)RR%PRa;hNtTOPkMJ>wX?{@ng%`I)PlQP+f znSONHG6;Pf7fO9_;V`T5VWtt+@s~)^kNyfiLnBZsap-W%#pqx#!!tUScHf4EWVZdc z&1$-ngJ<^5N;gx!dENZ5G*oo@zP>%&oiW4s)Oi%hgAuRzyoH1Wtx*1T>IKlh-qcvT zH!AOFM@ZHcyix`!eVtdW7vY}#4~_amDolx<_xi-6;%DAl%V*2#DLWPwzTcxn8JIUj zAAh^G+a$4Cqhmz<&|L0~&+{a$~?ayh&cV}mQRBV2r z^JtT5J#;flqHR`How4%Xfqp^u|ShS-A12$)hW2wg~eMaru~oHzhmmmJd!oVTQx|s2~9fWqXq0 zz{yT+Xi(N&ztV0?fucqkVST7MF5tV)xG?;A)-7~8JKT~wl<5pbzYgseS;)|$*A|nZ0;823c5@cbZXd3R$-*?pI#J)`o;v04?4$(GO~Yr=PQWIG8to81COO#4{(osaMM zrLal?bNgx<+Ar$&(~+x|HH)6@9$W9tDAFq9-9$fJ6e`1To zAN_9S_J7Y-u2=nRD_vY{h862gOzE}(xy{iirjn)Ix8>sag`1Z%X$QVnp6L4AsiB3_ zADphbOVnabY zRSD-BT9(K)sq`ae*|==!`rLe4Xc%K+PB=ol&G-!sGJ8Hyd@yFSHE#A@HQ?*t?rZqj zI+XNA$(mMSrt>&9W=xEmfbLS?=Th(3v~ImrU`E0NPdG<)iIR*|kFN1w=8$$!@LvBk zbdm@B>7!B3JQfm^?Aipy0JZGKyF1x*OI9(RHJvr3M$bfyqR0UmCq%u#@;s4l09_OE zHAX_hzlPuPOH02uI7B|yIgeZ^Q`YIM{;?kX(GXtefuQV1+jWc@q=NPuPrA4-XWg4B z_pI)2rlw>{_V~K#dj9sOv~3|H~C0Qj}`w<5dr0;&2(1b4I6*=*?yQ zT=~Z5NQxUwR4iY+H9xtdX>%1Gi877r*1C?4abnDM4z4WAH>kZZl@(-lHLdiEgjea= zd0M8Y5rmLs8#07hqknFt728SXkiTwgif}jS{8ZMFGxb>P+v*`N9x>x)b2iHrPpQuZj8n6v;5mOp6TM&C!Pq1DGTCaYQ* zrwTuHLt2ss;5a!t+>2+Dy7CpQRfP^$g7MJckkv6=6druAKM$Sxj~)T=v-2e zoD9{wKkLWs-`(fgvTt9p8j4txlT)&^JD>LUxdO2C?pbO8zReh*q068o&9ae(ZHXvS zA~};*JSGh(bOqgm=Z@$|C#sZ;{iBjQrU7?t2$dpqO;9)PQJHL+eJtRecGx&V*7yMP z+0An{$W}~7?90vFAz@x%4D?`nNjXMb`i#MPA64j7H=p^mw@S#LcX@nwNQ&jhO4|#1 z3EKNj2|3uwz} zl$}B31GFGrxpK|CHV9Qme(l-^sdQHgZ^pM>1Izuve^EA}$m#TLBcsL9THzl%4SPRF zg+*?Dz+W)ex^^FEnpsRXfLUWXVmk6!lp4d5Zy3i)!p|P-H~Kbj5HV1vQPhdX$sx69*VV13J5FY%REtV z>EGTCrT%b7>zXCD`R+-!&Z;F%H1el;ojMy`W9Mx3P&?E?uxKo`6y{}IUaO|jndjula&d*M3!0rZ{jJpuJML%f^ zKVuTeciIs6e^D>)*M&U(h(yU){J5_7bRr>t7WpI`iDaL48x~G{hS!rCE<8+@p~=bl zpvo3iRq|4C-)vFL`gu2FuAFNib>%-6*b&}s`lMUn{RiMt z`)RDj&(F>KZKNvVvZpe}gw8vY*iEE2iDA#3k7+y$pJU##9JcTUEL#fNwHU59H##S( z&iAA+22_u8*iD2Vx1V2^|H%iTCz}4J=Ua;GmV*I>hhY(vtdgulqjdXGUlCj>Xz7kV z5SGB!I95C84jZ5P+_#uCC^IXwE*bxL=d&%pS;^j_Uui`BERY{WMMQuT^wKKlOq~CM z%YmFqP3HfnCcXb?4wm%Z7^W|JI~!^3#W>0u4)_=|Qs&L%zj_3|2CFUMEi&7ZH$gIs zS3rJfv~u)MR|Q5YpTOr&1jr3Byep2+`8<5BaUniXoA`x&N5BqJzkj9g*0sxQ2`%6} zgS781yG{n?#aj4WE_vnKJIYg_EAt%_smj(pc>OR8NNfTC5;)1+AFg^P+b+px*-}n1 z1%zhbiMkOWI%6~95H=c)j&D*@OhDpq<2@*gq;^|P);V#kYfVfztcw3vp!WOb-dAT# zq9>2aFnCc;2l=rFWB;FbvSUY?S?!AA$@%KbTYJOaB%hY{jIWsttPOq{+Y$108~o

FT2|c!gFs zT$F8ImpS}utBoDW`pod-5w=%$O`*8j+olL&s@n#(7rITBd)3m-Qg3;j15fe(y^A?z z)URRlvJPKNwIch)Z+VMeO=b+fd{?msQ^5()*@0vyQx4)+)>eHO0fp;AE1Sv%(zoW@ zpTiQilvrk4i>F@h?L;YRgg!zvr_N%CV$;@N}rCHSRo(EFNXKSt)ozO&;V&Iih49_Vl@{06Z{M=W2DGkg@Socb&Kqm+ zsNdJsFfhQ_pBL%+#R02gpIkNd})bP(9j_pR&Av zzof8`=bLa$rONA?JGB&hr|0x_9&cC7Eac;mCe)1@HjC`6_Pob94<8*;efqq{ zmcdp1gE?_5_PX2jP#N~9LPR`zS?-L`U-iB*RWv93xUq)id^x}m8z#&(KD<-x^M zHI3yI-A}7E^2C?dH!OA-m=AI72vh44oFs>AainZ+S=MC_TEQFM^}A8 zCu&|Gz;ay+y!7C1OQQzGW8~`Ho}l76rT6S;aMI;;mk(K3E_9w^efaTR*QOpNU+`R* zt=lu{o?rA{t#Cigflx)tS;y`!A@lu{o`|B~^)A{s7M^}~3uE?VzhL!T?NMRwXkrWS zxxIx3`B5)4kJej26R;24QlgiINPXR%{`}@mPlnRDblZ?S>csJL$O(>x^c?&~H8ZpD zLXKOQK6b{dz>{Fk~R^`fU^Sz0@t5f`PW60iU9-%3;1VD?#6jH&oGBi^Gdb?`k7 zU1m>T`h63jgn|ARn~N+_N=iyEgc|U_aVv4@tF0iY&-}RV(T0c@e7lNc+iaSZzCA2P zkBX-x-K-jv$d>02*XamC3gaF2C^JXn%}xwiZg6rfAgu_OG@r{n@;eRDc=k*u<<&P9 z85bd6n!hj3-YK`9^tO{U-eOQNvG3-&+e3*fVfe@%Q3A2cc;oiFw{JY>ShH-)>l~%l z_{CHl6Z#a6BytM#V85a!NB}fvNjN0+4HTH}WskGN%6}pTbjq@b{wJCisL2 z=m~$gTI};@;8J6+Z!R_?FOpfDS#giO5tZ^e^=UzSTT`?b_ZWu~FdMwlyMoUOzLw#C zao#P6&)(_roAgV)SuyqLp~6?lpw(Pbe^ns4pG>gg2ett;MA*b;uth#u&kH+5Tm z{O#P6he2ix`{%A=b*4b73?p`d@OC61Of$g(8#g<)uj6a$1)O*T`Cy1yc~Hjk7zzhl zp4TBpyy-?V$|l!j)#!G!&pVM_R-7D(ltH6G>GVgPU!`<{mWsZxCh@u-OCi7h{c(%* z)aA_2T6z^F18mQ{ff3x#4#^*|I zwcn7Q4(xpj2gT@HgrGjZ@c?jPFaXTz(-j(KCKei5}Fs@qI8xE{4(u3C-;gHB@+b2ncf;_ z{8KDMGQ2%mg4H~^iAfih>@|){3xqg(p83ZJafn|v;d*{;nF~jmo_Eb#DKnNG^k5r? z)MIqdH}dLtOth~t=QroXeWAAnBC))0KdWMBvUQK))c`Rg2%>3Fl9w859wJEN1t9^F z%B{8mAQPHn;idjjpRVRV*#ps%jwtFe#Oxx^6rFA}=;G1)exd*-H?Gqqe|RAUKK3u_ zo}c;pIc0P3pHMrJv8dS^(fIV>x7J1X56@*Yr2`qBVfgEvX&%yFE4Nuc+ixC4-)6qA z7A|l-A17!z3&JTHfvxDoXa;~~H7>VFJN6S%g^aPr3Rkla?z=YY`DR>SJmq}$j8otj z;QIXiuQyG|Xx-ocs9F(sc-_gBF1&h`Nq*g*c(P}*d*S>hc?^BOId&wvl?ow$SHs%c zT7x;UJf#Sg83(;mIUC5(5=cVX77_7kepO5O{q&QTgpswu zUl)D38y{VMb@#uIr(~CnS<;N@e9a{Bu%M#Cqqk6`Ka5D0l35~cpM0R3Q9wXI)As6M zk2y9^-R6NGkD)Urf&bm`%HpE)RNddDfg<1sh2vtLyZo1(t~zHZ1rB?oZY|jpnhl%& zU#%k!u?(an+ace3!M6}*u{kTn1bQ{?8JxKtIfU{#X+@-!S}OZELJ;eOp6{kYKOffXV4RJmphUAm8@1e+w7FC1)Z3)DpMldCjzksM&`SbmX-u3|%3HOZ#t8ItYj-=3p zyr@Skq$6lA9cq39$IGWS^N;4(OkNor&L%!gkIKJ@7)A%%Hr`z)J-V<_Qq9SBJAWi- z`$?SB^uv2G%hMAZrO$M7zaNV6o;lXRAPL7Y)2VAe*VZE`tc&rq@y`8LIYwwiQmwsH zJ~!jKk0*?0NaOErVru(4;V<@6{sXUzon@e3r$>~z7f@97>q9Q0c?T(p4}5FmITP<; z^uBgc=x6DmpEeF;PUvF+`+uJ*)P&?VxyrA9;=9(C8x{28W6g|HdhmYX%^U53(vJ0i z;?G~cvKGY@Odq9j@)H5PE86Q!XqPOg&u1I_4s@Y8ji6?IH*(+Dw1&vQ<=GhB^h4Zx5G;n{z}ulcTJ?{<>jShkrt4XGu^EeGFd@Im8zK+^!kB-J)1Gbeic&;e0T8gMeTDBWA-4@Ebx82)TgK0H+l92iZLi^?{$&4O}! zU4AK^0m+;Rj4FWJpjbhDa!+(w8_#r&<;^I#rMa#XWYQjK6!^fwo0cI=Fi**eRhxZlR4vU}^}AO8*X6X)Yk67wSJl!nnf z7vI;Y>FGJmcRvHg?Qbv;Ag92)6}iPILX{^@9O#@`T2;3A5IBy9zUVZN4doEn(ivOVKHPWuP}{m z34!7E-1ul3#yY8c+vpgS*y6JXjxqPNX9&ug2QqHU%|)Z?>g$OIuKDMpG!gRaX0=}1 zNDev7c8g4CbcyETMpr|N+cBKbnM*(YmpYJ=NZ7oYPPc4f4v(-oZ8vKMf%;o+tX14{ zv??~wP!??q9nI(8CMU?pKBgd&*w?H$9A}@buPaxF2a@pFB_W_GCOVT-w@#Bpy*=zXrr32~=k5mb6nMOo}Uv_cZ; z^$l0W?yOLi=i(CL;$^vREudSy7HDIAH{5l&_)gJLinH@)@%r&UsUp%2ZAjVmZ1End<$B=8AiisF0y6ZC{6Gwsc8P4{-v zrefeFevzVKSDOz55Wd-QTzKfUJm|=4&U@7NI64#Uim)aHy#{J(!%LS43`x6*&LYW8 zc^ZQy&kQ^}8EBNa{{wh>J7}(mAkfAwt^xb0kbS<42YXA6QrWIM1D*Qj>Xv@m4l?Kp zrFBqGfvxbPA3L9Mn_>h^x|=u2ExbV_82mZV6m@huU>#wW2B8i?hmZ{uH-a-_e}5l( z@j$#8nwi)W_@^g=7hBk!IFucIc z2gKWc)o74~OmN=veylms3l!-q-Q1Ih40_j#*%h4kt5h8R79J=?TsK`sFUi+Mz_Git*H;B%MRd zAibC&iu>aZZRSQF8j{3u`kS^lu3yBkr4#WJCGsgIml;s3AIqkD*YKcrV z=HXr=XmDs9t2O7BYC<|~44z9z-=3h!HFx~W>$cK!p>WFo5hfTPtcSAaPCPb+x}qCN z5W#7rb(2$=^ApL^dsn={kr5vBHYu>xVq;=Dnqf@}fEuWu)jciexg2Lmog?>~PL1b_ zsWd^z>!jTS1O@~&;9%07Ig^O`gcN%E5VRVjFgK_1>Xi(2eW~6phZO!K@gOANn!SGI z(*)j?l8+yE0lp(zk%avR;0fgJxGF3PsRw3C^?zyevb`XY6cH19MCY>jYX{kC;h)Xg zfh5&zoJSaAFSq%2hBh1r5t{9Gt8&tGAdh2FZYt-92tp=;|NBzqME~!L<^t|%R9*gk zadwctDTN;r30)G|6HzpzV~+<%;5QQkbI$nr>y`WQ(8x^%PjG@6fDgm%9AVqP?+9w~ z+fld+{{0&~Macj8i!|nl|NVJRngR^&I^31&XKNaOq{M~hq>1vrN#jBI?{{#w%7nWy zntwaX>iX~Iaaa7GPs4p8L0{qj@9*R{y0ySUoxv?E4QZ>BTG2q`{!fazh@d8H=VXsg1WGP>YR)cgDV zY>jTd^NZ-?>ynb=gOyUPhGE@Zui)Eh6ay0GZk*aQN8h;Wd^jQ&lVtpjtC761vr1-ZEkOe650--t^T zv&{Y~3}you2J&DrVSqk6+NHN^!C*& zSx`n*?#`*MMoX}{!{{7B1m)-lr}V06Ioq#i|M!u{pA4Gla%J!T0O1v2(3^=cq<1}u zi2+e+zYT1Nh#TRY{hzr`u)sj*1p^OQQ)t}Z{e4zNZ~aTx;ux55D_(GC z?LM)_BlV(zXfGHcLN9zB%$PGwKX{+GEPKGXqSl$ z;2cNM@?0;usrAzM%XCg=CPkymL``{FnUS2ip&_-0GK3Ol_pxqrnxB%A`araYi!!tw zR#5#lS5|#uV&XZyG}x=6t!f^0Vg?E`riR<9Kn;*p)NrR_R}zp%2UAdc?}FF}xIN2j zK<4MHaR!}#%U3Xq;GUJ0gx{YzkPJ@RUjhl44F=BS-rio2je>TH5Jz^Id#UB+?jZFD zl3a|UHwRr1oc?Z#G(eWk;FRFvvVzkALk+fEG(ew&7ZeJp*_D_)2*$$n8(^N3qW3C| z4fT|a>y%PQSy7Swf~&R*rvw`O5$wEWb^{m2Si_yDrN?~DB$U*0tim?5YwF=jiFN`z z?dke>HMPE!I~708dGO4eiNvH~a@AE;kF$&fQoZkZfzk;nF#^frwZAcQbaWJdkctfn zJ;DoqiJ)YnL80_)TssoEprP#P+#1eE;7;0#0`YHDOksXLS2EQ_x`5O88hoZ-+e-tV zeSSw^ReL$kc|3ddYQ7_hXNFf^p4kFlxuHWiYTI$(m8c7$@AgLNyLV37W-pE3KwrDt zw+JNa$FGRg)U*5aQ^Gp#-rf;sL27(+^}&g^LUud@b35*}J#futpr@}t-W`V)1A5&^ zQq|r*Uh8Xnt7B4N&gu9Kf`6HpTYqt?%`j z8NBf)K?;6~{|u~R=;H4OmkRr){D0Sjq!_5~&HwtPb_t|F%d|dEB&MYy+}ogArRmG? z&=4DZ-eWR4(v=A^H0CL16=*$5f!=|HL|=DzoX*I=0MEmaU=Nsjb$ooxCB;2P*cAk` zG6n~)hCX8uvp8xZ;V2t(-roaV8#l%M!_Zi+WW0wU2*Q50b$EDpHbKSW!@X@BkUOJrZh8wKFvsM zA8Zgzo}$zwDcwKX`c9l~-WU`J+V>k(TpAJj&!q)nkOi-+Wd)a>8xIZBQMQaz8kPn7 z0tuI}EBV3;qe&jwA`dTbqAVtsvlw4;B?<*Ah4V2#Xo-fG?%+)`XKiCJ80a7ADeeCD zjehhtU&`MfD`a320pBLKV)qJBUCY@wj z#!YySu}bIuGsKEG3pIi}zThSjQQ|KD*{^I6G_pv++1bZ#fZ+ zv-Su{Ny%RBSzma*RwuADy;rj5=jPnxSkES9<(HS!ib~=h{-=zaySrJ14OzH4?RyjU zgrd^Y(!4x8egSUz|NOLzY8|`fF=$;YLx%+<;%cdQ4_KI(bON;gEDyaC;HX`d)r^h9 zMH;>rKyl?{AOcJx<_?x@U(svPXw^vFxvKW2g8zPYu`ZALwn44mmKEQjV3@SMRo(McoDKmV;>2QunB-hMD= z#FfH}Pt(#0ROjd+%7Ded?<2+ z7FmMnfq7;GnG>lWHsX}?N7(jra6v;#B|BrqV*)ms)7HyXj_uOYs$5AZw;ba`ne0T9 z@W>6W3ogOlHEx@y4JW&Y?Ned}1F=^IeYb~byM%nG;w&nI1}6NrKH)-+W7|MN4-Hq3 zmQItKs+0m&N;V6459NlRE)o70|7hLqQ zOWg^Tm_H|PB!1|5d0tTIzwj!N9G&O8$@!uApAm5u>OlJDHN#GO5VHUKmIqEj-UoFT z3?cO#BCF`}FnR4vE)QSj;4ic8fsCsC)k|tWwE|}=N7i?)r5$lEasO>1gdJHyoyf=E zGxtUobao?~xJe`ZLE%ZIuk4_m8JmuYIeE;`e=Scn7RbQo?l9Dyh+OEDoezCu% zNi1fw=X0TQjxybDd3>FlD6bW(jIdxOL}# zvF(VzXum5`w52Im3o)Wf1zbO(~BVUVOO#0Pa04-RZ@v13EHbNRQC5@EYm&R z+q``D7PtHN{8_k=lzpdt)AOeoQuk{RKs_H+K4|tE-manAYz2J7K0v|$(3K=gn@#=! zZK@4qpqOVg#p3)GUTY!eS~c1~lTY4w2xg+Q}CScM*n?nHtiAS<4bOXEodm`oxpp=Jj&L6ZuHV?;@3P)GM3pz-?NAt zL)9Wl4m5kCa)W0{8g_=uD{m-S@UmRkb@O8&jE086$K*2qFBn?Qp%B%$7s=t%?3r5q zo4nBTu_uMam!jP7ge0N9gC{D6gtw0T<|PFXHiRC;BQ$2urEQld#29Zk+$Aqf+j#kN z0y9!hmV$K*O^#8z&dDLZXgSuDK&yE-X631(Uuj2S(9whG{-;h#clgX|FRWxY(L>(k zl1T63_K<~aicaqB{f|L>qp=1xA)IGsl+QKoV3=>LE1z`>^R$%Scc9tVh)otk$Dz;$ zmry)=`DEgr+u@W<<9^z`bI{rS$+zql7Hh0SUfLPF9q!kN~jyqlH*O&w>< z2y7`XSw2{Lx#rOVm?%*+cLIGHb*qArn>ycyf~APu@40&JgiwG4UvY4CV9Fgd8)p#? zEhhE>2RPg;VD~ShlydguITxBZv}!76GxBjJV(H+OcmFJcDzdP!P%7}i%P$6%MNq&= zID#zEnkRk3F8AX+I=F>OjgZNqMo-*<*8{mhL`38EXU+#shBTeQHRt|D&f#KxnEuRg9#yw zp0dJ5L>dG=?vl4#jclIa#0HOev~;D`?X?G}R;j*`rDsoAfX-V`sN|SM(!ROffq}z9 z!M`r=#VVxU`Z$D#I)b|6u)~opN{V*|@tUPZ-^Pz9iPV+INryFCQ`C)!-!5dHw760n zEq9=eDI_8%Z2$UOw)@y4#5avM;Ef!brVnHFjUw4CY(0=7>Wu0O)ibS{r3O>NCjz9! z=>dhL`Tb3F3rQ#%oWH6f-^h7v!f^h0zR0|B>+Y@?V^*CnPn<O0Fmg#|>qy+qiVcDH7nNoW1Gr*1yBKK94r1wFs;UsqaoFrGyGM zeK&3N`~1I$2o|^)$10f7IwX%)4GKm{_pCI?IM7J2_ILU#KF9lbn1>)O`>vRdi?tTd zIemb0`kZm+u?Z$s0~Orn`8R$nD(QEyD=F10x7_5WLVn9i&WDMKsQm#8M$t7H7fzV; zde7P#grh;BMc>$^Y>$HQ76OsZbUS=DJuvc1Z4-Ye;{4jPcRmCH-P2S@WMjjFwPW(= z4UV0|BraF#`r}@%cyoLi;v8k(=C$APEJzF4L~Q}lJ4ajei7%a~MbywyWGa>x#6)=| z_WQfNRs?tJmx}J3xGsB9jNp}ZX&ab7zHRoI_=A(*okQbL)V!D}CGmFbi%dCR*F#$| zo%y)+NdwAHI^>V9Yedu`g${n~#Y~X?`t#%k{<)*Ng*?j{E{G4d)BP@G7eBb?Bg491 zL^GIDa3~`uSL0tuRZ({SM-O_t@_S=&7M@1q*81xio{J(D4VNY!-#=Z$V^S{L@u$um z{p3kK>QE5*g3bjmtj4z8)zWGPk0dMOFK9w8lCTq7$SP@yQ|P#5jQWRs#;cAyc2lB( z21_9#!wp5h*YY$Pc(-V_T$+BoA1h-nh|F(^Xz?eed*AwG-|3D}-FX&vQkSo2r{VF3 z;a=;ju`ed7JwnC}&HEp%`D?il8HD}`-3Yls)Zz+JTiy4I8R&;eknuUh{3KH z!7Y*fGqn37XOx*;He;=$lw{McQI$to_S=8X6bx~sGUz^YDkv^%z8|K8y%nXRk{?z& z>~&%+$d+8kc!@V-^vMbeP^S*0zVS=)Xwb`%#MQjr2?jvXK&z@_Wn8H%3YHZD$fmbsJ2%ITi)N_&ZyR@?6+Ne!7 z%ca=284~PLLat- zpXtxThzb}4z*}WhTHl7j;IZ%jpsw+dV}TyY11X<%%X z)-zE`o7w{>zf&}oH+>EC3qW*PiT(G78w3)VrvO!mFi@;e)#4C5nhVt8+bk?BCa?5s z{rBGUde_3tVvui)q+(l64o}{=WNT|nmP=_?A9Q>Gx%o^f@7P5b)sC$L1W^GAJ`7^S z;F;praPjkNH-Cn*!K)W92G4nl$m7s^@jaNwnM{`nl_iMvp%{Bz0Eah{wr1j<6Yj{; z^z)!7p$I`l%{%`bpEt9xtX?=M=Ek2}fSHC|*TKqAJSX=_QIP^256=kuycyJv+b~=NEgz+j&Mudc6 z?+;E?c~vQsZB!g52LjFYI;DLI;B*cr)yuZ2lnxEWRSZU##=+)}q&|U)S7Ix`v zFTZ@w;5_La99)ew*Y7~WD;Kigo<|7D6U>aN%a^%oVuCB~WbSp=S-BOzd^z+4_bGhX zj1yV7uaMRW%X{~JJkbpy)%@({;{)=vp9%_oI-xZmib5WkCU$3PXi2{xi@-qi{uzCK z(uI*OYnv{xqpM3JR{y$7FGy2fzkabW!dHVvg90ia>q+#@t>j@jKS~zA)#4Ly3Oo-B zYss{a$oN8}b`3r6lth8``dC%<4-*C15D;;oT+Bb+myF8MN}v=aB^$C zsUxml)r(J9hIk6UV#qlqEJ%Id(q}Y;In1@GIi*EKBVm8fp0cRz`iy+|y2?O=|)dVHv#62FKLV#kRj8GW+ z1i<7(6ex*K2W)+VkuafQVg4KCSi5E^7(g<)b-6&>z6*~HUWZ*QAq6=(HY7K2m$_H; z9wTJ@YRbWXubY@;N;@XMd};grpcb@SF=$Kfmov(HB52iKJa5*c@X=5H^A07z>lS(B*<6`!S6<$Z9$O1z~5DG@>#p zg+Z}~4{mYcE_GC6(V%F?YcQ+xEl-V7W0}Bn^YkR-u`n{~oi#jv{ygH<1wK79oKu{pcV{z{=T$z z@6pd6OZx=q85l$?UixvD5M~=;3PD@?%XvO<1_d|NC`Cmq>S`DxR9BXlv08h-r+>fX zhkdW>dTzN7P|iM9MI(sgRoZW`M8d=3!3ifx!%6X z^ySMJ&I_cq|9f6;=y8NmmZc>nlTM$f@q__o&PQLEp86YuclOf5*?w-^vOWwR(N#26 zgGBj&4FFOG5z~$1y=gt%CxD*Zg&K||eRsmdcUA2#!|^4)!Wn}yf|H$H#Md}DRoi&D z;z1{|2H<@|nZG|&SLaIE$6rmC+~z`8ecb5@Jlbq0BO|N7E-jl`?>r`%ck8x5BwSaQ zYz9ExDw}weB7ce&JlYLnYcXy;!UBtt0g{EEqTpwE8i7G5F z?}MUScN*wy{Pr;hnOzX$Z77W}^#A+r3vkeH*7>@N?Pby`-j8Fy03%|@<^J1uQp;3$ zF;2gsr-PrYr&!v;ILvz4yC1&S=6@LBwiI?heQ>DCSQL3NJvY9N@H4Kt4)oZ#^8`6wDHlNTChvnc+Ac)!l}DFGxJui4-d>ccLMcbP(C!s5XWcf2dV{d{Ge2G zlKY5oz*frq&K-_r7occJCUZb#nwIBcz~Ndo{GiG)oDTjlt|Mf>VM?1L@bQ|??m*L7 z5R(MajS%;BJ(pn2BQTB79SoAmUr3GBZrorT)VX%;>BYiM^&l`$(~nL;n&Nj?d{o}c zo%@KqdwhmDtV~P-6#cye1BV+e)P`AdNl%}Kf77;uVIV00F!Ns+r5j~)!HXn_!e0ry z4PN3-nPn49jj?TAq`645+5`J#bIB;M2Z{HZC(n&}#P!X4L8V>fSQs!(8kW79T7e~n{B(6}Aa&Bw}fdK)1Rb@Xvzlzyg z-B6-|`PkS3>&)b_Mf}?_%JHZ&7M+mdViB;Gmc;uMA5Fa_5dnDD5e0^0<$)D#$S?QD zZ_?=X+Y}9BM2R1SHdeGPl7{T=o1L7q+ny=TlKp0ltr%oE$$tNwZPZ?kj`rOUvi^pcn(h8{Mz506B81kWx}+!PxS$W=g)Cl4d~`msMB$aLnUR8FcHgg7|+hKB4#858`N5%0vlHGG^wp zOF}RrVDySs^(_lrMcjjjkIxQ@SiKHFd9sySj*L@lN%^%YWM<;JVZn8>B23r)%VTB@ zDrk8(^R7Vs3~D@_L7`;{i{+~==lM7|a@W7vE&>)V=vqA;4n|W?RioZ*hM>>LcK94f zGKa8!?a>r8cGGYl@16y{qMhjDmqr4di3SHBFcI%>ez-SWY!-NCBJ00G+mf2g;)}Y; zpc}3QB-MjjE{P-)4S}dEwlT90y-BppGWQ0-aNACAB{$tx`vt>&w@>_m$)p{}RPH|8 z235?Y+;~>`3daFHH969BvhSavhHFu7t>x>x4?)Cq;NGNI!#=^$`p5~9xTNHBP)~%K zoPBpj{+l;~-BGPiK(}2lO$4+h<8yMRz{x>bMeZGvm5Stjt@|uug7g`FTY>Q#?z4(8 z2NO)#DV*8Fzuv;13jcgOd~e_(oP15k{lNoRP2tw<|639amqJw2F%wc#=ZGCGd+Vj< zS^4 zl{S4n^=PLlj6{v^PT0|b6jPj>Q)b}J5Gr}~3UMkVAaEO|*#eX|@ai%l z;a5v>>{o-J{*e(lcjC7N zx0jijD5i%x{O{`??ks~M3ihC*Lnm*PT6tep?k?NYMTqHh1O9YEohigR{8-d7wqY&; z`gx_lR#@I2DY->*J_3Tv)jc0kuy`=w^94%$~j55#|1I_g6 zE+ka$5JgPI^rK=O{EyFK`^gb$80An)G4oHFqu=6#2&Qk3QYb^E4c-82n9YG;Ts?&1 zx#IFd$avE^{vJqJX3Ec!hwtNR_cX8SJ{rDB`4h7z_#pY#W-<9)5N&IHf|0-oG*i=d znrff>QD#L*L!J4JVZ;>AVHre4OT49qb)y3U0-$hrfmD){f!527e%93M5km77N68%P8L&(xf zMZTM!V?^V@bi`oxgL~%+VJB+Z9(PTXq49za*U(sm8l#DU0Tm4fbVXa&USW&&_kC#^ z@+yh3zkU%5g5E+hl-J46pX`j>x&K(zYeid2tH}`fbWgKTtzE*+$n1sdO7B#Ia6T)D zfZVLynz#wss?n2kVcexnL68>DAkSUR>(7{dZn>$*T*L_lBQXEB7Q{@;Fq4znB7PAa zh>W<~ip3ob55u(Z7V-?$FVfM`B__Zb%W{Rp0`eA31-DZuiHFyQh{wZlH&9|AcnL{M zj^#;0)a3TY24@gtw*(3yG$tDZpofJm0)>3w`C&g&vnix@{W^0>Ku;@sgNWvNa5RY| z-3u>ihDlvNt*L)U`gvj^p2}pMU&_AEY}*q}3+2!2w>`^D6+8QEwb_NlJfJxyq)*Ce>U~HX7#jP1qu>9iT*uK)+wH}Zr zx*6=xTe^klz}OGx`}b8&_V1QG<=0c?%pB28B=v*J@@1F`fyiZz!85@DI^MBMgw0nV2V2>>aiL?t3 zw*@u?bO>$E)sz>W!PzxVPP`l}7K_ISV=cMS+hW0r2sHuIQ4r2ba!RprW6GE4IElLk zmBGGo)nd2Q-B=7$$O^fn-|zZ8tQeD+xNfTwaaCSS zd}uHhbRr>pLQYUE0Vt99Xa+Sd+IO(Ck{=j?r&eak8s%s5rKTZ27w`$VUq@?TBgII3E;OL!&}1!-^C zycIV$iPO%W^~YjDDn6p!?lg^z+_-Z`MM;U(ft?%ANCgr|7Zv0Px z4LQGp-yfR}l^`+Febus#b>xTYgm%;=+6yG-(_zeP7#SU30Gw6uGjpOH;AER3f4RxM zcu&>;1|8P44;~kXGzr}AWgrDfEtZ%0`D)(Zl4#e~?z+5I+ol0BEtHzy=jCC4|2A9> zy$wZv2#be3)K&>EDNEIJ7#@GhtpT1q~1;D8D=52p6|jT;>D z9=xr6FI>8bIDbMqPw6+y&S^*5ruY+a<{HiG6z~LZ9*_zBgwPdh4_v~;NvmZ#yb^A7 z;t=JtjEvyrcduXfE_wa!)e?xWt(AYHhd(s4zJ9(338)ism|j6>@$K3(6I2z#^wP*E zc1}uiDyoZxedoZdfkL>0)A^t$6_5GLiu@2&IO!hGTJAHu7~KL^Cf zkZasE_{IG&Bq~}oN%txZL#zz&r~ubXx=Hs&&KNU3j;eBkef=||wL_J+3sH0M^D?SW zMAnUWI1v@pa$s{Wi~oRV5T?cgSSHM|$+}sd4!)uJj=&(!+1qp`pL-#!2SceeJpdSN z?xWTvRD8{7O&tvssJo0Qyo~d99wFUXvI3BGT6yKtPWHYZ)1t zQyxU}^Q#d4L$?NnH8E+h&gB`5cb=mW_o{XB0~gFDg3O#9OSo}yXmD`u?i6wPERU5f zCOPy=QW*0)0$4-t>ojMVfA+cB$adC2@C~<@*gbWhkr{Mpm zuD6bg>U+O{kr+x4X$0v;Kyv7Aq)R%aW9V*BLb|1qZjo+O5E!~8hAv470SVzfe!jo; zuJ@mp#e!MPT<*E&-gD0L?6dcNq9|YOE;c#9g&QYek@(u8jcTHzU0~`<{*2w>E)j_7 zj&ed(VN3u*Z)&%~IV86=4+4Ta36zVAi_ic5fhE|>U0V#Mz%mQ%|4Va0JAW_JITZX1T+f&l{c2>)2PtKh_)WjV;3n61G{f_2NfkH zXkR;w;QHpe~j|P}B18 z?V&Q23@Fkr;lTwry}Mxq3CYMog>wWjdOQwxFg4*tB z2qo%crpHllw_XW`5&=3^2+fw6O=uo6kQ>kdDmaKBL&_L4mS3R0evv*Xz>f4d5hycr zniT8u)?B406^uD7C{f?m(ds}X^X^!fxSAuew=n?gME~=Lu#afhUWUMxBAfK3(AfYM zGEmb3TG+6pWYk}#C?ayyEi<@>d?6(OjdvlS*o6Yg_w@XP!?nVtLdVvj;fXZ~9}C-VDgjwMi~+;&fK- zCl48@`l`V6*^BBJxyyF+t((}ohJ-?GUac=3n1}A@y2<@f>vQzH{Db)J<0}l)mgvD3sdocjk@bL2^TFgCfP`>FXM7{ zT-N`VQa`OIlYJ)kPJT(oqfW&pzIFF*FE4P4$8ebKAldZtXt#~=qp#N_#e*eGq=T2Q zCug*VnYF4qH9m1Got(AOUXZAX<(=nv{r(Zz#gmPL-z-f(itIh;Dmfx`Oae4MJ~s;D zFV|+PlR2*_-;k;~bRS#X{G!`5eD|Gyb60+kRo713pcv+>uxhEaWRX>W`*br*FT%kw z2)P55w>9Mwp}{NxpfS_#&he}HJrzkb2qgK()0C%i$xo3w{I73z(|<4$&)hcbJK0@a zZ5+V*f5%D$1t{Dur@M~kZ>PM*5YbRKSkOa$WGo7JtH4hg%tTAIDCllKE$`XzY$RoI zipn|YJH`Z?Rs=rZ6PH}dWk=P%)!Fadiuq}j9i`e7t?-sS2>q#GeQ?9@U6YE;(vnQ$ z1SYV!iGRR*8*aNM;3H>Xe$eT?DPs%klykLXa9{U)}_0#9~qa`rJ3KO!#JldXAx z(7VX>%nxOM5k1=Bn9-3}Vdi}N)i7t3y_zp51oIu5fP#Y}nl0#FYSuC)_BIwn=`u77 zAF@>)rAMB&YL>ESsgPDE(cO_@dikF&E30!Wbgm_-!kIaCb!ecS5V-887t;nR znpq{4trN+5tMR54R8^aqy4ofTD^`N88!}+=G^g4EF%EZ=y(BTJELK6m{Wh-N4>|W| zl-z!b)Cb4k#eLEj8X`WCQ}h2TgFtwHLg*qbf3PyGwEvvPKte`thOxp8CCWA}=n&He zLf2A_KFk(*K>p?Xxh2%FtVyi6MPX^!&|BQKH~ekyXtXM^-szV*hc&R- zxD)x?dUH#zw>ZN8^g?u2xD(vqD!U>6hOL@XI65VdYFo*#`a-IJR|I1y%y=KUAP~r! z69IywNecydZUo`Qu!Eo4lfq@{81RSa6! z0)1QbM5bYCJGhxv)2ZwMxsOH_R~)3p*R#5RM0~WKPN7Ziy}s30Gj#$@RL#T*bk<)p`$tKZ&;V zY2T&@Nd6nCJojk#1s~I8ko;WGz{p?R%-h51t9h;j0h^q23*#q)-KQ_Es!0r&SpxIf zH<9|J6LPkG>iRvbr z<9%U;h9Pt7yO;UJLM8av6)y@Zzs&Pc=YLGof(Tc0yLW+$t&I>r#h?gox3={^M=K=% z;Jz5{IiGZ$&37dKX2Adtt9AuMG60UDd3{dPIWF=u*6NHcdxki1%2cS=vC*9O1O4o3 z!Y7oc%kOI;xu8mP$6fuM)JszvRdmAK-)u7{EJ(S68%YBsNlT`*i@*A4Wqx-^Lq76E z=5UYs)YsnFEc>LpuP_~aaOnS)pymC60X_~2!)-)}J8E(K*(KGie4+J>Vmf>IH9Mt# zjo3m>kYG*s!sxwzoJr;Pj?o*K-p$w#Gj@!-eSU z-y!R=j7hRPli&GW8R8X7YG=5;4c?J=eM>TcDY9^3?XCpmjejD_h)sd*t**uq{UQ9) z^6gdv@aGpC*8|y@@gJcvgwf%~Du-V8pYrK8LUv<`pZSOzvoTfo01D-B3crZSe(GK8@TIUzT4lV3Q5ahgYIuk@u7w33Q{Kj^l(Gyv_u)CCuS&`Qbu}+{ z>tK~Lnk)xQLK5>$zUSGH;b)}?MVrnB6_#zUMgzPkdFvumFD-SG;h3&kxt4P=x?nY|&0WgTLCG%s zG~5icOdx7mM#5%pB}S{0Ab5ghQ`I&}MKcYhXobCKGIE*l!xH{CJ4|+498SkaxzQ{e zG0fcrxfQl5tWNQqu=gys7{fKe{goHix`LW)l$uI$wAVuJf-luZx!zO>29^Z9a0{1V z*M}#ze~-m|?>Q;{<7JLRzV>>W!SlgN?6KP5%vI9#RZ^1)gsv#Nt2&gSCYnJVy=LZY zu5U_LA({mi$6+7+!WL_nw|g)U4eH0`K%wOm!9_$8rhNaK9-+-sV|`c-kjz|8fVRsv0ayBdMM{K$VlPaz16<`wb<~W?<5#y z6)M!DZzb^W@fEO}4xK{@9|bqcxJY>5Q1>F~=K9+=Jx}xIth_T)hif>Gg)#~D2-YJ{2x%K^BQgS{NB0GpQXqIyj zo7tWb2MmT{?Rk9t`ByBkXy+7sQ=6eb!J#ih_)&$7|N4gBV;?WqS%eRA6DXm{xaca*^23|F8ifXC z^zc@%ZTQ4TVYq{^h^TRvaPcThC112?O zWvOMu<9(rDF9Y}!T3TABrlzHrr@z=FlXA(`)xRO?E@%QXaI|*qR^oQiT1;Giy(1!* zOUt}Bq#m>T6tEEYR$}c04)c2D{8@>JC@SdHa#Pg4GNSb(w|Yy(P|2h|8_*Vst!jK#9h7Qx z+p1#I$t`J$p7q3yr&x#Iww}vth#kec{KRmfGnOP&usu^6CAAY)p>oJHEYxe76 zo6DvGT*G>l%`~2dbw#-p;i?4`T(1}_R&B)}24@yWNFYaH{_^f?GE23cyp z>L}Pa{&6pTc&hQsJ)EM}8P6FWX*A!C-(vZxn%K}zl$H1T-!Qo5N|tQL;cdj$3QDWGBNf_rcDaSUO*2FZdnFn=7i6 z$S^5M&v5dfX6yU)k!}tysE>7%fc0--*t}DQ?>dW%^vAb|wX6mE%*6}@H3)P149{u@ z$O8J)TN(W89=^~29oYIZsA6?&IQpeTMga5c%zE*$z2VG9YSt@;wZ^fD6z26W}hE^#@~(*#m!v2 zzy9y4YU5Go?>rDO6LL60ssZmXpvFB9n2HxIq@N6nV%h`$^QJ&RWN_Pq%EtPL`??s5PB`Uyq! z(*|^j7Y_{s^)XdqNu_Z_yJ38HA7U+Sc|ZC38BAa!t!G^DfMn@+?p#HW{`B8utickV zjKB54+-1((swUgg8J?ykWXP+}+Dx{H9WZN4Vfz8Gh%fS<~tXF_kGoNQhPG zAH{C_vJ7g9!*V^!c)@hugIYg8bFOJ-*rMqZt>Mw8Pk~X)Q}3~x@U@Y2+Td|=)-fXz z*9F9H@!=#oUNf!;CmG6CrGzMlw6KtQUK6cxSRkj{p{;*G1d-yy(_IY}R za1B@k7MXVCT(XrX`wNIM19E+Hro^BjWAiT!LnNa|Vb0&@P`HeO;KC1`hsD83?IZ#O z`fG}g{vXaFpM#*zcsVo_F+oP5#ai=Ih2KfID3gB12$NOaJ%mw3cr7vLEA)1;p_BY8 zMPT=Uo|N{J{H~OcE(b>~d#bXT#mR$1vJ0=@e&MZI4oS0%U%!l(3T|ad&1Kzprbp$t*`UVg^l|Dg}s%mm3h;mX|?wZf4TGuh#qjsD+VG-C! z?^sQ`SlaX;a$h7S*#E`fIDJf3Cc^#TqM+4O=)@hxlrbh!2o#qW7)P(hMVs?g4>r6&Tpcesrj9 zr5X%#Tt?v;*H9F9j+?s1uGhPwJmdbKIc}`?{?v0`TbZ7XcO$&M^q?P-if3vB^OjV8 zV31b89*b#)9vDbmE++yO?eX6Usjh;~nC=C6WA1n%t`_(_V-lF^Qhyg*)BlCZWGYUv z30w2{3{c~FdttYBm8QBilv@nj9*2|o`h^OTbUr*)$mab+CCXUry1i~Y^;mqI^rq9pA)-Qd|3EbO386f(4d$nc10)F1X|IF6<&a?4WcKnMZwmu`k8B+s9tQm$Rp?A8@BLE+6DJQ!Q)+YH3l}_O-y~ys zN3oxY+V?L@w=QF({;mZqhvd+WWzy&Nw$JkFGg1FC7a3=58>6Y2q-%h4l6#b&;h+t^$M>r6ZBD5WwNe8L+J>q@w&zSqH`8D-S#wT` z)4}}AfQQwPhs{u5r{=J7uA228lb$iXS;jrPS3pH_0VlLts3+FoiCl>>{y1~{mZQYF zhJx+AJ&=2)>Ys=^qS}Q_5!TO;ShXync++ z%1~G*NZoSH)c_ZHCoWx(|6)(--%8hq(?_AJ(-9k}dMJ)?i}!VhMW%h|*PUn7Y2&sJ z2EN!Syo#Ek!zqZDWA>ywm|ZxbhW8q^S{Qd zw-UV389*R~rfym8joE*;>4tq0J!qtgO-Vcc4kDJImr%`>88260>Yg4Ghc?ExiyZE&KDBP!!mvNOsXWkm8a_jT+tOj&HAd3aVW zy<&(F{v&W&@&8UtL*L}nU^mo$8%_TBk0r}DoU*&?+`Z(KlHpZLmNA@Lk6j;mO_{m?f10u2?p`PRmv zw9KHRw=g`-lunyDzto3 ziA}S;_KLy*n^)(Er^S#vHLjw^pcoO{pH2l!?DvsIs=C)Os8TVgGIyNSUq$5CkQRIu z&zugbFL-e5ML3NMZKnjcI8`ZV-E)|wId#b5FWJlila~G;Z$KJ`>*Q~|P&y-4pluAG z=si;pJgFdbVjo5Etm#xrdPhDbZErIQck_FD#U%@wZl|z+L+AKw!o?74&1}|W0B4r> z6rJ+-ujJ!?TS6&jCCsfi`zm=H!gG|#*jk@Ur zZC4U*rJ$`tsTJ&5Xz+Sn680AQjNTEdd@~*W@wo49LFAoL7K+fD0Y^wKmty%W=34n; z@u}i(nxBtRDsQ2;Bb#}?H=HpzMN9lX2_X9__e(WGp*L6i`HIz4=H$mm6{SQNp$4<1rB*G?Lji5b=3b;Az`O9uc1;XHrv#>_x}?mHpI( z{hUchVpHG0zF+feGdn1`nzI-mEYly4{)&|^ZHC?!su5FU+_~36w)P=gdysoiiTCl& zm$Ive4SAXN31s4=^j|(Yrv91kwfsYJJbOCYewZIPC#>Ad_vVVO-S)7W@l!c-F=n`HAPVj_z-{ z39^9^-LhEX;o_`p&Q_*n{T|UUg|ME$74t}5_N$Tel|yVNAr#tn+FD>PTfjq&lL*&M zDW-aSEp%Q)=9!OUJk9<$?8)uWbk4b${y@IvVZ)JmZqm45bRWfvSvO48C<0`e(aQY}5&7#OZX*(WOvli7DNeKBJAEcN9WfPWAky~0PSrKF+k;QIR^P1r=Yhh-!>3YO?Or&B{qvkqIrF#&f4}Ch~ zg?s`8LMfv^GQV`{_EeWLRd{+S5~-D2!DcjXUvp6CY6N`7ICy;LKwyjny01Qg(~r>b zPT=DVLmMJoV=u?ZoWRr?U%!|tCwPqMHV1lJ?tXLSy_raEqQ%x1+sj_cViV}5@@D8T zxr#dZUF%BrEa`_OisV&%llf%L#c$=dDw|EE=Wj1tDq*9wuu(hVwH%$<@uvP7#sV}d`k*wLpiHNXJn2>sA|^S7%HBswwozB`|{aT}j8 z$#psS6*=qv!{e}3l?hwYbmy`b8S(Bp{B<5M-X_Xjt|BXbvMo*Hm34g<*03Ki?U1A8 z!;cvNdl}Ch_S&+?ow(u$)R9|J>^S{`x^q6=U{B(cyQ=>--2U;P9qyPjCY!AytCLk~ z%TfIgMh1=CiuL@uDg$mdL(SnV3v6^aw)< z`P<%d;&t;58E6haBtt072=+?uOxNota{R6u7v*R6g_F)YFOSM?R+&=7p`Iib2GN(ULqvo)2eIlQMpnTy<*$fcME2!H9r!Lscu;=%n=1t`qxp5 z@6sL9G=u3X#sA-fXq8<|4#lNNCP=qbL)y4;_$3rD4sD*I0P#+L+Vu>8v-d~xn#x&@J;QNAw-S9 z_0-Fvr&7oz13Y2zpzyCCgBME-o@Pl7T&e($gfd&&lJwAy(f&zpMwX9V+ zG}SfW7EuSM_x;=T8blFKAgAxW350edVmMhJTU^KVGwec??U&=CF5Aqd6ONmhA`y6L zCLSkTPT82ADXwn2?AKx?S!RxyhTN!w?GG@xe@?yEQc2YU4^=nyD!=4EsO$>wgHhdp zkZ%l^DRr((@3wlLxlt3qcO-(XlKNIP0bY11P!DqXDq)@)g{FM`pCf`-yhII_R-0R6 zZR=jV!C2v22(xTWZ=U+IFgz&O_g&dcY+KFq6)$J~agGS#h}oflToF#M1jc7A&flhs+6-p|o8Z7YSg zTJihVWs5G1t}L*F{h{uK8nL?+VrQ=uk}5deW}3|n+f}2Gp?#X2<2xs*Osb+6G#Prj z`Z(n@g8yA1lAHcemFfuV`Kzt0ckVFMPziyIFf2*xc21>F;S#hyX5Vv2FF5QzTyb*#ZWsc02iVbvI5BCEr<;K|rk zutC-}$lt!E>T;iEb1gKEReR!D`p3YQ>}PBceQ@83V2z3mcUB1LAEA_sgsA(uOu6w_ z-BAD{O{t>lKh;a{pB%E7-$uAq*v10H^(Tf^=e{;BCue#vUiIH7eJj<-up67kF z->(sfvu+CbHq||!rVud5+vShk;_Di8c)!Ifx5cM^c2I%VwD+Aa=j_>(p}@a+PDuMJ z`zK@v%C9@3o7lzps2ehy*nON@+SHE%LV3Feo&6RV6BY0jOpKMx*{h06>pBg(JsDFB z72!K*W#<88B2Xby857Xj0R2KCg4Crm+8#0wMv)#a?jA129ZfVUP9 ztw=~Bb(Wk`GcuSM8FjU_J@z{1OBbYo;<;vtO%Vv+l4$L~uq zjl*uh3@I@Y&n_b-^?)2G8$`Sg=UJRS2)Z*S{ed&(3kV8g<^#DGC?TsR{`a5gu}Ch? z&C~Pf&ca+{X<-3aigyNFbH99v_kDg9_$B_cHhLm{cXIHl2eM5D>~(byPf08Ihn(|3e6HoKs9GZOsQVpB}39Iz;|;N9q~t8-cD^mQp2v;yQiz#VQG zNT7Q11f2md_(jqqP~}qrP~u`EA|TnL_dD6nYs8|WqV+%hUcDLt%J#2UB*oRHJu3qP z=&C@8Vl$d0i$QE~uV)&FrXoe{Zfv~lz2dvub+v+W9rD2Zf~_&zEupco@ev9{#Q*Zu zU0J-r5YPnl+#UfKfu4}rMM|{-EPL=Eg}c`w z52v#e5c3V%fZr}$gJlH;60dbJ+9H7mluI~u7@?dzJKEi2baeDqS3qI}0y8FjCY3CY zUn`<7o7M~ytqvuNyD+erP13z&Y1#t1fejVV-#pLJ10+(wsw9X$(LAvNJ5vn}4PR#- zQ;7jrdWxy9Y-nZaz3YE&Z&9T2NUlg=|Ln;M`CD6CJ^FCW>VSOG+t(XifKt&XXe43zm?MI_w6rw2yaHG; zBjGqMHe$YiGDRIlPz)4V!2vgWWiGfHPnjDra2|j+Qz8c~n@lj$3?Fl+j~^4e>>8X> z*vkm#6@hyi<{&qe)GCwNYj#BL9HqEHHqfDh-| zOh`4b6GTNuMwWonm=G#v6qkGetWKe5dO)U2j3R%#6yx(Ky1tPO3%8g0@YK&HfMV)+zCt+yoC zi3ucqKxNH}k%g96ToN*NxYCt3!R#RnuWf$CS~)v=ijh53 zc>?b(o<1%NqF760&5qO6)3Uq$v+JtyY;@}UHnTSJTp<5AZ}l9pylLkG<>GSny1dP8 zjIWZVO&T3l85nJ6Bp(|e=ljMNIRiL@FNpmEx}JOzQwGMF0bRS*RtLVtx(jBrvq*~z z3-E2Xql{(wT-JmEcMlKC`)b`PFo>MS2y0dy`H5Juv9T8%o7Eb=o(8zyam4GKR@`Lf zdAad(Dr+y%ectly7&wS%yN*+#q9HtEmxra#vft$N@%W_zmeTXJVk zPHJ3`1cJ_qsf~|} zlsj?9=~eMqIXS6RRfrH`Wu||o`qy~73uB6^jm?XOjt*gN?m}(#^w*ma13%EEXDIEd z`UeJ5;KeN3KXl(jfu#YMvc+?r;PyEm&D!%FoF$PVL!aR+2~FDa=Bh5q4K>daF3u4k z9$<6NXEFdo)yLbqx5Lr%ounCVo?q9>Ju0tC*~Zq+j`hSC$pAotE{Z&>ca5BCYilX; zNqV}wDJdz%L`50Mg|sWQS4=FBCq(buBwG%cC=;9=LA_lkJFWc-ujZ%q>uUUjSZKIP zYvdG^ltEr|ycg#z85tREMjW}S*%cL*C`fA#C-)GfkmtXgk&x!wPX6K`g*?n;oRcFV z!NwrRP$Z-U3I6}jj}YvaJ&%yg2JZeH^Y2q$BjCW_ulaqtRx~U6zbk2eT1krD~}PZl?xI`+j~M0E8&ln$ny|NQpQ|NcE49{xxY30-A+{L=D;iE+fS8 e|L@K4Z4!p;EOgR>Jt`#dr6{W^Q!8Z__WuCOZG|KN diff --git a/doc/source/images/glance_db.png b/doc/source/images/glance_db.png deleted file mode 100644 index 3f32549314de356234631357f8976636992f5bce..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 108006 zcmaI7Wmr^Q*fu;gLxXem>V5D0`XkC0IXflyxnKlIqBz!lC{xjrC} zA4pzCQr&HCzYR-UU8kYL`zV8oM%uM3l{p~7FXx~^L*0RZgeA}q<@eY6K_QYv+DKX) z4u_$5#~~q3NUF&1jK_@6o*lLsb9RF2WURSNyGz3!Ui?K$8KpH-!nPIlmk$SzSC4kz z@dXy_zFU*jJ$61~G_How*fhR_{Pzp^z$CHx?-C{Af4*4_@ecw9{6F9Pe}n(;H#Yxw z@c+*={Bf`wiPyJA3wAVZDh3#GbikZ{8U2H>&yRc`Z;c*rrX@Zf_yT5~hor9?{SLVb z`alSn>1hPHf4DuGt~AhNX1QZ%T;<6` z`G&}d0ii3lp7jP+GKsE`g9>t^ksazp9=1Ftme#-h3G5yBe`$=tBy7J~{(0dJ*6eeC zLm)5Y723-$KF?U;wK5V{q@306aj^?nCi0$yKnfUlkl~LmVHO-L3lZoR(EPOalA77E ze{7?oqN1&>t&^oM;o+jnca;SUm`p>6d{~osK-1c-Zc{!W&@V16#YshXA3N%Je2{oN zllaeB^CDI1`re^w9RMr!%@uR36BU&pvSzP(DV%)VZC4U}D`U~~fB*g+A0NNDxd}Y=!^uyE|FJIx zE&s@DsC|p_+_cp{(p7&Oypgc(=&cTU_3D-PAnZNy!i|7Uqqk(V?92ToQ0u*5~F=GTw0vk(3>HbyeHzS8)XA&lbDac_)Jkx=~KmQioEi+2va z5s?!k^BvN!uYX8qT?Htup_zY^=R6egh!ef8L^FWv&=7Q6n(&Y$8y1;5fij)9Q@(83 ziulKEF^GB_yn$=HmPD(((~OWY|F+Nnh*6nWY_mciFDZsq$;?=U>9T*qu6#WYDCpZk1Pck(&Sl&pz-)6Ut3N^qAMHWVySu(zI=zFvK zU9Z7M;aW4Z?Omu+2thh%>kGZ`H@6)er?)2GhaF0VLEuhIeUqYwr@{nUe)GcVUcCYQ zk*<|(%A%J7$K#q{Hyr(~DRQKZ z)ghBjam|55Ljl($JY7ES*>KKYL(al7dp7VpD;S2@ZJ|gl%OmliN;G%ER@#AD?Ildl z{+Rt(7t`IXV5irz1C(SpeyaP=L;cVD=j*Me8{~~Qo_#WkRZoq$?)na#p67G)6VRE;BN$3)^Vvx9F+`A9S`-#g0L5=EnBYp ztrDy=m$YzM!XS){@5)<)53CY!tSl|vPuKegQl2v^-)u=dhWkVDSP&r~G?TGfVD%tf z)-51GgrTCYB?T&pDAac~{%Z$t*RDUN>g(Bcc$E_7OH#Ynw9A+jkZ6~UQNN&?%@zVD z$v6hQzZspdG(3E8#>#8DhAKtQ^5*8|`uh5+s-wX#0!S8nEwk%pKlE5s@Y^xarPY<7 zU6nHTehB*ZA36%bZD23QFm_qm-9=zTdofmp>GRlvKrh$kkDlpMS|p=G&?m=NglpZl z44b!JD`l@-@sH$rp<4BE(*C!}a&Pli@jq#R^@bC0j zSWEra^GXw+!q<|7Q2T$qiZb7=Q=HGAdgQ=)zDkuG?GL5$J0JFacmf14E$sY-g@uR+ z97O!h?JeU{lh#2yI;5>kGM+eNk#8=~jzMmgM1vP4lkZ2Qbi3@JO7RawMNT`<<+l?h zt<5Vc_N`V-AX?6}b3#nb%~O(-rCKj9F4{C!9<9(xTIfW*7tcq85cv=>-JuR@iW^?! z3nbeYs(lg3DYR;D!?`4)qv+^pU|ar66PRQ3gD<&n7ZgK*K=FLhaYKsP%>vkHR_-J6X9W_+T_vkuowF_WO5=;_)JQlosd6axGk;&(E2X;yX zC1U2qzx&}L#Ty_4EGZ&gz%1QBhV_T)_(4$uxD#{^(SdouKpMsz+d|1FWbNa#+)#Rf z{4Wb1&AvcZ10lGr+k!Z3xc_2&#Wg?-_4XWzs_gHc45M` z1uX9ar(Rk3BB!ns1D*XY;{JMyOnQK=KGaUpn{>oY1?=}@Td^IS(3mJPSA6|4N!{6O zSgS2&nGvWd@Tm%W&rM^%?3MAscTF*i=U^+EI!pv93&|zx=+%fh7`9m^q>FfCREC-a@?K!n`D_XP< z16Op2-^4jTbQ?IIF&8kblt8+pIXqKzdx9I_CJr} z))+&)`5+fc#$RpJsTI|~9Qc0A4suxJTh7g{rLDPB*qW^zk5QQ8ri;( zlUG{U{$nU*A69C4fVC!-cN((P{FmZLCU3W4YeVsVjZ&Di-1|;4!nsLfkrn@*!E+n? z-@`5ERC@>Gu0}>i(oj=-cz7(ex}UB3pZ5~d4WI40+#{^uuvqm8$VZPTUYJO^Xh%#)c2lBS%GMC|k>bq=~G zTUd}JFBSeUkbNbj`KFon~M|mJbMEdz->k+awe$H9}OD2!z@>9{9Yxo;gO}*hD z`Zyezu>Iz*!?w`kB;jmgR1$>(0*fB>v9KIwKb<$a;FKI@?||6?0+5+sKWo_q`1hk&x@*t~c&OZu2eR`ZewEOQIFgO_ zjlNXZG0;-g)LCAKy;piB&uqZ_QrEaO^8}?gV=g(yN9#x^9#&&tVAO0H=^x|=6tzY< z>N7vl$F!<;rauy>; z-P9~I_NSo*cX||JB$+JBdZ)b~GSbxo{tgXfIlyo4dFclfl_Mi@@o9gX$}XEo_f|Hp zG|TIp@#|a(bz7Z;-jjeV=gz56%civFJcUN2QdwK<&{+g|I9i9MqJ1uNZYN5{*EOJ3 zi;mTfgocdOHz5TtLEwjadwi!;0tv6vKVutrEXbAcJ1&f)IghzU3=FvG;Fpf#>`I0L zxw7CX9ii(F5Jrf-`qF!Y*Ik-HE2Q?!Y(&xI-nNl4O0u{OGwW|VDXB`CeZ^xqEbYKc z-+E6uUUPlwC5+)6YNPF5P2rgD(H(M3gY(?22zG?u>KA?c*x2fv)W`9}Ne&TXpHAQF zZRate!TPDzT_{#BRyF^c1?J%zp6##9L)>FMNo>~}3eMkN7p14q{86dZcjkiVgFt!< z$bRIn*S1iQA^e!umDz4%w~v-Wy)34<4P!ew#{3JFzHBPSmYAZEv{d1th5va{xcPIk z4zCf9u4}&WtP`IVe)-A5Drh>15T$=^lwz>_i;xvR8G^qOrC1v)Jg(##tX^_^vFmM$ zFQ#73ReChcd4#_9MEhV60q+%d%METL{7hD{c$`pQD>2f?4kcR&QgC5GzKDJi-A+%f z$bl4p7#YoaBI?vbz+$c}3=2CKMMU-Svc%BCF;rm|Ob#V_$pmvAZ0UaIAAcp<-cf?N zVW`K|EqxFq{HA9IS3qOJx#^#+$2bvkun2js`;gF3^mukLdPNgslz2H~+5n<_Ie}*x zDhwpd?xu>_qf3EBOGn z=Vb0R#}l)+i>4GbNThEy2<+a;WAFhH-&iipMDptB1v-n?a@gluH#yWDArYj-Cm1Ic zYP_!-^$15tAy(2(GSAjG=QX_iySa9q+O43TEfmxbFH2t_mvH{GPVW>&|*RS6)-|ruV0ePOn0jKugCgL2;s5VWoQI+$IwgW zqm@|EsIt4)qLrlKuo!$G&zh+-ZK<#VUHNO-wX%yPduIx?;@2BsVk3~{7Bq*lR>ty> zBo+UO9ux#CDP$D&0}2Ae*&D`;HT>2N6Fr}aA3`=sNO>Ugt3%bUC5Sc&2>q$>RJajg zG#fU;=>6( z=@;2}EBlFW3BAtVeP|XaytEj5L`ZSKulTk)n4Smi;~#UJ&3Tr; z!~USMAfn99PWhPr==jySK1x=D2cpe>rxMdA2p=wNuf!!J^AB;D4I4CnwYmRs)RA>P zKCDMS)1tyGfFy3Q4n-HObKmp={;Dj$=zbj;DxSrfdZ%O4B4W({QjGfY*mr__F=oGWdoE{gm<;BLUJ{#yI(>pL_A?(3bpH$IXBHNyt;541M0)V8A2{?k(I%o|3*pjpnx|zlQ)x zOGU?kqHxqCZ?p#lwtW*Lz~Trfr?fRD`kOxU@ZM_w6b3USNZ6GacT7QoflRVv!N4a3-BFHCB7qdS$ zB?oQxnO9X*(^Hhb%}k<0jc;)s5kF)8Rl2L$Fl6!x^Tqo*Cw#KR$(5Y3U4GIc&%X>f zv5KM0X>&J3Mdr`-1I2%)CaI}^t#OvU$EWeFaoheaZc}Hx`bW+^X~EK@=E&H>sWdM9 zb8f^9nkRLHm0(|((&7gfZ36|uhXpGiC~Fy>Y;*roAfw9D!dt@cB1PAHY0ZY2qMkZR z_FJ<9X0J{1xo<1f!)lODIv7^ms9EcxQdx&Zg+d`72o>fPmy$PRkqpLz1}> zLy;EA_IOD0>Y4JFW<&3}Mb_85o~84dwIjvlCr@XBtlub2p3!4PgJO)abi-lR=n%%B)+B0g4l#Siqy4^KJPHP=k$?#&g>OA9E-=$iiAVoRB=7>*L zJ1306G3n87b-+cD*EY#Wk@!BI#p}A{wL*lWtjA7xYW!ZB-;umHk)uj<26?5p5iLx5V+Ok4Px^s*?p1bg4(qpeG880j; zwp#SXJ`#9ioRpoEA<`pqw|Dsc7(YVD`;c=P2j!FirtesfY%5!?7a;`lM^N(jKyYNa z>j};eC^c3z8#*E@R%QhG#phQ}UQVzavsYi|4BkGP^_z#k=V8se7PQiGEtd>!UO&lE zcKS*NtYrATZWn($)A$zROrb>6ihk#}o<$+Nua=(xa-?6KwCd6`NX+ZJ=adL4Dk|UW z0i&r(gZZf`pxEVh+$BPe)kL_t75){yePSvbKUVVwLY&krCseRA+siG9RUICt5UgY< zKY01lT%7(Y5U=y_K_X2Ut5dU*JW6p$3YL3~p0mL5vg19FQK>~Riy6V}py;>iX>_7y ztv{opMuys#Y3BY7Cy7$}Edz5n%S{*+ww|k72}A!_Y}jsZdp338dq2NSKqnF%qYicY zjzyuh`HQTjJOASuHV>|EJT&c<=MY*-=kHUV%g)B)ItK{Rh{g9=3a8Gi;&~y4TdZeM7F0e*X{fmcZsPvgOzhNs5(`hGzo|%H*n9lZuBD8TqZ(xuP{ts zEeSb#Zxdr9a(JJb)S)g(<*Ch}k)N_ah&sU+tAQ;Zqbhg0v(1$T9mE&U4cqc^3VwPo zU>ouZ@=;-2v{hyOIRP7W$>QL2O{20oq zIqm(zf^0=)epPqAL8_tnm+sdyE2hQD#zL#AYhHKOdIbS)dO^1UMu`@?gUo=4kkV!s zV>iAO_a&gp>?xoKB;3$)Nll{*rntX2r9)@2OG};jQ=BM8POT&2vFyF_S~#@Ub^GG> z?+Sip`>M#Ri~J1@JVnAnGAOb#+g1wU`d!l0nQk53oM%zx?I?PS+ZhUKe0?5a&T2)r z$3-QbJhNQYTQYnxc_A|I1T^_3=P-D4%b@7?iJgqRB8{KZ_PVTC0vc^-Oiuw>)k3{o zlJKfCO-cL`OVH+^5h;lgY>cC$;Sm>;#4GvnZupT!D@xLLAotBic5x24)G`boQe-EJ#vfjiQ+M0 ze1VEs4TOE%cIz4t5#X&23Jqm%$tNDS@PejcL%iWM2?;h4zkVII*{@d-&Rz;d{NG3`gMb%=KxOYfSxR7AN>5zdRM^z_> zr@*6-;}-9G`e^>)FC4QNGX8l$iKaEp?{panf^<2LG*26N`~cQZ$AP(Hq(@dEId16d z>d(RCznLRVXPuM&>Nu*(1JiKkl3{o;!t5wwNEl$E%c^DNNg%67#&E} zeA#xSDD+3S6|a9!x=g4a^w?bP=u6YtW{U@UHnQCMaEBfPVHX5a9i|Bz@k3HJQd2{7 zQNi76dc)f{)?B2e2W=f2NmB`?!i;gwDV2+oy)MjZFJF*${}Ori+57#1#Q;CAz!8LW zh*70qk~niW(`+!C(=m(HNGH_P^iLYcfyjV zJ#ONBd%fukjX?N6Zs#SxNX%H?`W>5H)TGqj0@|9sS04E@9ReaL7~h~yyLu~uBQ_cJ**VSfS*nqBYUyjnFNf7op_8PRi|Q*Hhc+gr=k7Mpk#NJL zh<6!C@CkE#<`br2BG|$Rzd`_V8SbmQ=(i&6*tl4fu$ilQ{nlcGvrF-d?7&_NBze_H zuxaCji*!iW1Mc~op!V@skk-Tmzjn(*D89rU3rlNde&GjTb=NRiUTH!k<#d(6KK3?G z5)$LIW}6MXh!C;MNSM@w{9uXSf$$n}TzyQaWw=A>>o#-GQ<<}S-$q zwINbL?O$U7rx6zZJ79`W$83jD`@IPN&F?tneF-m*u3Xmmn%85eoVD&-f>PoW!mT%C z-uHwCcfNNziKdgpIjdqUq)r!=86iV9GI@+UHmpEUE_8CW13Q{C>l$+l_0t@0(i07c zR}cWXZ6w@L00lu+K(|79dC;cv_3+fXD=q;P8&>)4STn#XzTy@!#eOHdMOm1 zMTN6UXp@Ck-WAmqWf z@|3$Q1`gU8f*|jh$G`;>bB0hS8H8&K)>kykld+9_W4ZxRTknLx2$A~HY~t4W&`Kxt z98Z@eT#Dzi_ZhnI5YO0n=*>FpX6E@1NuiTB_-WdN5EYDYN|7UwiV{551-Q+ezXIVy~S7i&voV5x>MwArqRg#dxZC1%>31OVf`RYcES3zAO zGR$!98yIW!eXHY^&YUMBwP|8Q3`lkSj7u7pi}3>m8L^v{8{H4yIb?)1CBQzq6_%Df z4ivOtOJO+QkM58aXT>kqy?%>M0zuC=xbbw^IgliZ*L*AOjfmoZXgz3edoG{X`kWeJ zZEdYi`b@X3xVSjXIhjsV(Od2rCiYmsuVnC4Ny~?g?RCkYP!DBlpwkjBl^F@0O9hUp zG8~1(kqHfzBsH;|NkwOg@hNjvtMeB@R1!*N;SApb$ig{m{>Dbq#9um5ly)-ml%@dR zBxIk5;MKvm<;6yGz2W}P2#DlQe>fJKeE-ubV#DV=tjZY^o%WfR)?DUS%i@kZ{5wxb z#v%YzW0sBaY1w&gF{K>D|2l}}nQrT+?>`159=JIa`VN9ra>UyU=qo6+OPQy zWu@0oDQxFkoqtNRKObv*z2}KjCj66!Sbg?}ySDM#n zi%yKwaRZ>!%iMlT;8fVXvEr*{_a3a=^t@ zb#eK-KU)h36`HtSd38ZcZTHyCd0V-nb(@VQT}5yGprs#oZ&#q4w%Zj}zLEz~@VuIv z%(;dwZfL-~tm6DxRRlZ>2a$DoNV4FIW6`+`K51QxnPCfLnaGNM85{(hheP1ExMf$scz8aC#z-&T5vdJ3DHlCp;s3YnM;# zP<~qIGkOqDB-PizXZ`5+%F zIEIZD9t^+zP4JV=ER{~Mw7is8s!nhCtx7P_L1nQOyQR#o%lCe9|J4ldjtnth4Hcb( zKa+Ya(Ur>$>MG+CGX*qMDM~*+zztJ?D&v(m$EdKD{578`0b=ZBE+l(>0nV zjCuN6>g0!B$uPw)<1Pns%ZQM9xn$AYEEu14=6opm!4%+ zREKmOY27d0t|v|$&Nt@JV2~l~S_FMBG6QNtBAQ)b+NUY6nH%X8Um%YUNfPW`!_$9N z{s}4ipd2esAFb`L9_SBc5|Da5zl4%w?6@_EMT+9HpCRVGv)Z+Kvc-cR>B3B$UBAZ< zib!AZDpdSZSy*y5vNZUdg30;uT|ekb*79%;Fk=7TOL;K#SU z%_4-BCpxcWkCgKCrVe?jCMglj{?J@itSA)HoSweB5&ML;&EiM5mDtTnlozUXG${S^ z5~LT5yuuE56zU}{&X0`8Y%i-&ZI%zjs@}3J)z$=xf@R--bdh&3Blaw?!RUZlyy5II z(|k7w1Ob-I*+3<4PhW4n$6p!&D&Wu9h)P2z(o&!xu^R}H)7hNp67pl3Saw}0jw_*x zSf0uhsIO4&TKPzPLdAkaG#vs+6-G#JZ79Dne7Z9+%0`f(@<=-NABZ9<_N38CX^T2NdsX!PKn4Vw!Yz zd^|yTBy%>mr{G5I_m{shd@%6r{o{l;cz#d-c$@HvQG`Nw37%W*Hz(gK`? ze18dLD(##8GOj$-{x6{ThOVXDo1y|~)zWVa%?UfBJCWZ^Nhe9w>n+aRfU3vK2cge{ z8_l!eb_`@~nO+khaf@z)v1N;fmXkH#CTPtDH;CXU^NjNH4RHHLS=4QW^jwbjVaHHg)zj|TRLXrM-i13{t3s>{!WflT7$U2qJF`)P-C@e-EJpc@`Y)LNt zPWB0lMuoB=0NV@lyu#{!@_<2IFX|JHnhAvODvnbf$9HKZU>D$uOZ<=uzDPk*J2ot7#st&$Unw>oZK2NlBJ=U2Ar z=@_J7ARdTg3l+2Mk9&GUE6-=5IJ4u@YeMqVOWWjT#6)b}Dzy+eFXVNk89LFR!Vi1V z7ZNu9G0NJ)qN8_L7*-S>>K?3^B)CWqpfXuo^wVxlSkE)&lqeR5y(mtR`dxP5l z86U9=Vvz=o)s6(+0g>m_%H!>Or-$ZO>++!d?9}wmV(!1u{i`1dq*+^#SFN_~tNoSx zlBUf@RXUQH=;5PzHI=4bT=<@?T<8$+V$In{fk2{TZNMMXJlG*{LZ&tATdMy z(KCW3^!<~{WTvc<$!d3hTaFGT?6wP)KCVE&-{u6;w3cxR9L7m)$Eeys4V-E zRTGAyJKrK5CE(H)R0IP1*&(}VA2*nK+&-}HN%E~nS_!YZN&mzpy=`&A1MpR9W@Ng> zIS}K+6@FC^>vFM{A@&8ogApT=r#!hLECm~h-#$HIKv!i)VYx84VcPT2{2CxekEe%F2(Z2v{BEi%({JF<=n6ug9=y=LReROv1(TJt@ z2gmaZ>8z2*WR>`M=JSpN%PtEh04RA4Ld*riYFtil_lzF-&6iM3>}vAM`BFYRt3z~z zuWCfKq~KKebhFK&exB(qHi`t)_^9Dh%y{{{l>)h8E*)ncc_A_W)SGa*HA7qV$#Zm` z)2Y;FRY#S!3~qZKc@ZH$ovqOplKj!9!}3j%+B70_n(I5VYsg5LKjv$FBf_F-F;epl z%xrTzTmP(JQed;O`w9@Ae9*!ywepQqn}d>@X6Nn>Mf?zt`32K>hK4PhS0TVz;u&u= zds46oH+_PZuiVaiZ7$$+F3HcP{HrpguCTb>7Le%p$HlJbh5Gu#Wu4o$BaIr;vzU>m zWu8RHi8?%Iyh{}1r^P^W9z)^3?n~+rF{hk_*Gm&kcukYuOnbg_H3qMZlir&3CcJ}; zzCgl3;Jpd^y5mMXJ@zBQVl{K0D|JOH394HkMtkidn?IK4U_a;yO@i{wh|2dOdg1pc z;aT4zo|f&E>pzPU?MzVG_RYJ1D;Wmq3!t!=Ime)nosw}?(0cp9ErSO^Bxn*RdodxQI2fbRyElh-($%8l?uV#mdj;EV=zdC}D$cc-QEz$hg2!T@<~%8=H3TH?tD&Wz_F38a)S-WK>4xBW#2R&9 zWXEX6w*yBkr2NlI_}P53a`J`^nToI5&Rf}4nE+8-=BD}wNE@p_n%EZs4$+6eb$HK& zU?CCOs~4*`{yAUuyB*2XJHr4$C6>T)?k~-c>j0kb{+RQG#MF3t7t`v{$JFPFC4Us4 z!ba|-`m@=fO?j#k#y04-OY9uf7~+iOM5Kej?Bd>VV{MX_08ZWe2+Q*aXqq6zo3};b z%|lLJ?6;PlpSi?a=i5&)Nkot{Wsb1Cv`uz~d{qGwM4$@z-$QMxmY`8qgRsY zI>Q*5zadK|@i_u~)TSf7H|4&67~c!2Yi8Kcs^@i0nc-p6n4F+VNy9qg6mJ8`LPxN- zFHpFgu@Q>;{xUzr1t9OJy|JmiaUhCUha^UmBPzXeOMBaaM?75x+E{>!;(d;awiB@L zyiGOmnG?KwlLqa2?nX|2-~C#Tk^(9}M2q{+v>>9AEjr?qnD=(tzdhF*iPfI-7+=!A zWYut{k)y`>eUB9@_KSK+bXo9xK@ek=-*G?$TbTYR9f{47M7G00NPT9A+!5DmmHc56`D| z4weZ2DnXlVx8mZB9+h_DVn%)HFkPV3#HdTN^$V6xw4*|hs&U74N)EdK{iBwC!dGkq0 zlXQY7K`(dHFAGW2B@l2)L>d>wbYEGY7vsP40y&ozje)?5&CqGI2#*rY z=^q>rnO+n~G{plSV)oC1Jn8@O^>G0UweG7;sqmxMd0_Oht=5*FV5>_;2CP5^@s$9$ zCjoj0JI#?uothejJ7wq~t%%|19n*x%&HzX8;gTmuUNLd{&YXc>SOgtOP0M){TDKY! z?cQUfvbCnUCar;inb`e}l6y=4LtagU$gqBMv>$YCgt%nM+QI|b7olsY-){ZV^jG4+ zpBif67H=t%)d2!!PHe@#4@IMS@l%VQv50}POkrPKiy*)CZK~4pO0K76<>n3$#uoC? zFs7BZd1ek`KbcYhR{z0Boxwgl%9lI(r}7iN(wh3*ycu zZp!F6B|}w*xMRxXz6Zm?M%rmm(m!Tt zHB`w^zd4-&pYB@DtP^b4^Wzx@Ppz(!j`VA#$*!0C?P57gfbtXtavl7HX&FmN-)mbc zw!b({zIZG+=Nvnxid+|qqxs#=ub0kgOlFrg+fZglX@wO6wWPcd--5I+EN7_1iec=G%`(wI8|__K zPwT8!M|Xw3-Z1tBBg&lhM?k;od1MRVA_7L+MP@18{XguU`8)Zd#BlS|Y2gfh94fJImY zN#CKvSp92M3?wW&8N_yJ0SlnT_6wS#LGfAP%0`t}`$`#%zmc@@6!49Q z%j<0X&zZy++iK)>_kx4(syWM!ax`~4pLd;&3};H#D6C z|21?J$*A<5+_S+6MncTu?1KDXHuQ?*Oi5n5;-gtGeV?LJ&F7GQ=k2TMZmy}r&hYrL z3c0>2cA3g50{5a9&2bOOH$We z%pXJ!D^wfGBcewywYUMwAP)NL2P_x#lv~(+u5L$_?=*M5SMwOxghEpc4#SD8t#QVb z3l`@CFj1N8s|kId(#&u5XfjM`6P<>lp?X5JM&EqaLZT(W;pJRadnP(&q)Esoha^Zk z+q1Il;#{AVzWyAY#jkk>94K@&?8<<2=QEo6(gA6OO?4W%X2l`aUnur^Km@tJM!=r$ zmF3~WH6Z6pus2Xuuy6o6K$+1tg`e2;-Xh4xeS8#ZA19$g`bmjW*p5 zYi`R!D;lJYBmB(3=n|S3ua^AokN~VhOomv2HQ1mWFEpKzLjpqqh0P}eG>}w&mBlVK zAxijn(W84ro1V+twt0W@ZuGHFoJc_eIvtT~l%PIK=Qr<;ykU7_Sjx||8)fu$Ra7-!Xee&{=+lb$hvVwhgi#%Xvcv~gttqpv+nNKaoEj25oY zUs+j@B2=|XH${WL>mI8}RlISc5X1yfv3lRag&MD8l&Qi-c87R8%lO_}TC+P3i-m31 zrq}@b$Io~N8fC%s8(OgjRK@LUvNk&7+KK)VR?VDWG9A*V)E9;Q-uBqPZK`W8s&0O= z1z!V{qG&rW6R-ib)38xvSnpOb%z>~eM9tu3tH?S^oEDjS0U?nQ%i$D9r_O{b za$L|o1Xd|Mbf!5u@c}g)EqWW#*3wSc)LpZ2_XZ+njzLmnyErA5s%-oU%wpS>vGdc^G`AvYVf({!W5~z~X z)P<)66@yGH2Ibw9i@XsPO3gWDHd;4Mp8O>;NN|OzQNXdiYu%E)tMkN0! z-HuB00Nx5f#A}`ZN{Y+f{iOou=M2)`%R~NsQi4JGrBmi3u4l|Hp)xRCR*p|cN^;`r zqkN$7xaeAj9jJZmuYc_F)nAdv5fi(Zs6S>PN-zug)-ev#M2iCTZFpOMTlWep5fYFG z*4X1iIy_@L1uy+5lP#UE>)Vbw4ZFf&d5L#$V=$;+>;<`>`rCOdXw4;Ly;C0YuN^hR zaod_C+Fb;we0l0%HgZPbDoKsgxi2n~6b#u=^-I*4MGGILqj?@L-DQu#5?xaUyek&> z@gv-)prW=2;AGfcAMMAn%L?C9N5k{NL1SC(Ei!LSIM-n`zH_aXXZ(zD7wQmv>&?{u zpXGG5pEfz3<~61(8}GS*pXzK3=KjG(Tv{&x$ZRl|_RMIfK|o%3G-MjPisFO#A}gukSW{BFM;X)Cw1Q$=gx z_>N}o>qZ8qlFeVO804M}mwyU-53sF`jdgztpS@>6+7N)q*Qa4M6%o;|ly$N=TQ)UIiAeVkdezDs-EKnd zjSBHDY*8RpZ%E1Wrk5i&U1|LM!O&Dr-rz`&H2~)${Z`^PwZ(#ROGsOF`0XJ{AD_bf zDWKU^W-6|(d@fX#X*#Lmp`xHED{%W`9Z(3+CpqPlLVaQ3*@@i^K?`gjzY?1sLBbpti~{vvB;N(WlBcx$HXgQo)eS3EjK zKxo~T1RsKS)THqrLLD3~xk_xlq&JP8Q@spej{liK^O;WAeR__#_v-mpRb=_?YRi$i z&7Eara5NXy(FNV}zYM~>whyT9;x25&8Mm_mR$Pn;=$lkWi%q2bpzfhDf(q!0D?iph zEEwb#H@FV)gUPwd5AIQ2QdX1zJn7peyw?0|?E4=;&HMWq%O5uL0vl|^T8@gL`&mG6 zBz22X-U~u(g)n8V@{c~R&+u8vuS3|fDY2*l2vuk5n1%BxMa<9q_3x7Zh)dgEXqq}S z?JcCu{A*?V8kMftWe!U~sorgw?v2FmAxQzga+mo2Aqnsf8jzui77YS8v(gO5;?x!g zg5j332eV0H+|bw0J`R+-b^F6fA*Syj(fKMCq^h@?0?;Kb^;we>zmkGy%vH8^?3}B_ z*&Kevc?DL*+2swxcZjDixth(R&l^^0$NR42h{9VgQ**>FAs}m7OGKRr3!l8Sf|0!J zqVeTYz$u(G6JUd$&K68SoE(a3@A~gQjm~vLTlzm(W3m;q>t7|Z1EBo66 zqqfxrb1Mr%QLj(&aW$N+tMx9eiIM4IsBwaqVnCmp+qR=ZJ%{NKA+nltcWS2E58MtT zrub+quur>U-g{blhpL z?N!;NL87S6qAB+0CGk#F*GYD!ghp@(^VH|fEM%s#c(cM!Bz68fUP}PNWp?~OOuczL z)ZhEZJsA7G?+ganBO;=)jj``rp@eK%vu_Q_mSOCrtTWa`_MPmaWS3pYzC}XR=RSRZ z_v3Nj{n^8`%z3}BbFSC*e7z2iSKhR4HJV7CM<}m@@WO$ z`GWR7XH2w{Jh3AV)5$$(3Q*iFDEwR-ln{; zvwX)=6L^Zdc&_T=wJ3S8r(F^g?Bw9(X7IcyXTG~n7fcyopH9T?u5?xx>@p{WxQAvp z;kgn5&HZx#OM4w7^gq`Ouj3XOLA-9Y=k#Mc?`7cltA@CWH;+|VxA8<8J1pB062uRW z(vS8nN*%vb*YeJO>jgaqdmB=FSZQDKhjdKRS2WWQI1^B#pb=K+-V~&@dHDTXSl|L& z!M-1C=}#iq5s#r4RCF>XnuUy_LGu6H`T$*gSP0VN|$VQ(RiH4HXQG83^ zlTkCO{4P>INFWSJFcLqMyW{3klXn$A`~UIXR(oK76ut{%UowuKEAbU zane-VtW`gTDv_-I&_fAcxzM(}booyMkyQ*hp83CFtmw73 zs^e?w$q!~@-UwigNNq2UF`;w?HvO29H=X4;FDj^_?`j38QgF=57x0lm(Na*pqO*`9 z^wJtkuM*MgJIp_UnLu1HaD&Z+VMm~dR|BU$@P*DY%`(LlUJ2gJetyd6pC<8lW;Z6y zWq(n;sJQd=RPtw9?a<%jtWNA5bzX%a&!rY3%|BIHUT;cNop%2AcF$geG4bGfW z>63>ei=ix?A4EaVg}TU++zfqSsE%M*lq@K6U*}|^MnpY9!K~+7Ctx|WJO-N%sqx_- zRtyKfKhfdrvNoTrg9(L46>={UZG6h%WyvDoX$C~smC!L&92BOXr{Vy>@Q~d9;}V=7 zOvc}^%se4S_dFL~|6sae$a*-M*d5|6Bcbe2Z~I$3sovVm#&&r0vpgXhx_Ki~<9u|H zv@)pzcI5gv_|i*I2a1 zPzkU^hI|{efFz>h%xWBCxdcUv$a3$EkH&I+AOB2>6$Zj_mk;jSep0RgETxdj z#8HS_1N^ zE#LCX@r3!V^K;9wt@vhju*}Qs%)6D=O%-=*GodCA{0`)okMLQ_$oMg-ED5KK0DG7Z z)I`&kw0VB%)rn=C?S0a}HUjX~?EwU6thI?@z|}|qn5^Cp6ZF^>F4{XJ)^E|gPXXMr z1q?z`)-Aw&d3VBKL$BnEE#Cl!%~%9M`RkTH7;4Ja{e(W(7~Ba#Im*EB&6nn!rYg*K z+1x07+p2%UG@!B6nkqV8L;q)iAY=Dxy6dp1y9FF^D{efWR0UXIU6*ZP6NF=AAJUAi zUaYp4wm4vp<^ZRJyTc4hMmFy=l$6&NaZGbaY`zi%uVWyqSUk2~FouViC^4e=K9$P` z1t24sy~za6AEwHz|1`2Cnph0|lp@B9+-~cR^UI}xJJTu-M_2vZ;?Z zpadU1$WS*WEAGDiz-9rHqa`X!B!|<6a2-QXF-8_p67KVfxsP7!#Ll`yqEFZrquw?X z_ZAl5bQ>Q})nQODC(9&4_(l^wIh^q%?Sxg5CC!8>i?7c5-J5MV_N4>r*$@H&V?YHy zw(c|U!)Ks4oXj7>sEI^m6O1l1(^HmPFh-Xn;-!SJS*1`u2@g_B-W6;`o6PMxbsHT6 zW2bopxb_tCI0MnB4BX1R1rfW9#MJkfSy8b zaD6tA?Du@NfJn2|w;dEUzVKnBC8q;$=` zAlPQwHUy>?GMKbr)Jl{S1}I^@JjR8D1fciwtp?!W0%WRd^kl*At}S+}U7H~1vZ=g?Mu;ToVVaH}M#Wt5Ig!?Tq% zV51wu=qw3#Nzs49Rz;)c4{0=Zc6Ns8lqi_%HV5CAX*pRr8EGP6&%KZz088#N6g^s7 zbz9w}A{>99xZIaLQe`Kwy2G9LroLc}7>-OjAi&=*-Y`QebrV0snRIcVh|R4;L9q%K zd4P|4GBdcj@m2TKPHUUBz=O0P#hTvc9x&{--Z^D6tT=;d6_n+LIVCQDLO>l!e{8?1Nxy z-iDx&H$C}|*vk+mCNt^?lSLe4zTX2)ZSLh^M zjBNbHOl?j-nTw?YHs>YMO73*GU`!aoV70q8qnROL_McEcmOqV8kl$D%`9=g~64-A;Ho*q4`WKdnvW0v?LlkobhB{!gCSf_rq z`A-?!nFm_9-XnHyWScL?r3^o)9a5Tjaj&#b@oz>Hy-1P6egE`(i=0{3rq@ZjTJUWf zb*TnP464aPJOWHUHvZ{mqbykyD2Zu%5bI&j0fyj${|50s%P-$3GCTiMDYlaGY~Clm zej4Ty4|C2NBxkH}n5kI1M&B#rmmVM43uU-ms@hlfQX#?Y+`ask#UF~DZcy9IFv9dQMrLEl-O(kM)Cv<)jQW@4;s5fM)%(|AJ9%dMSgl94BxPI1$48i(U=}mny-X`>!h-o{z-=kh|rl;2u%x3OB1C6 zX7U2{WYkjo=0DlSgq;6rVt2}iBS$@)wkDA17?x}a0oi5I)iMV3C4gW6+(1s{FI$Ep z1s2J49R(x64DGfo$;n5RToMmpjj3cs%l5-&bht-Ln_8afdP1vKxta9)Lk(OffRZzs z(DP#EfLAHp9?0Jw;f!vsP?w);U+(?y4O=0R<3n%|VNlYCV=iV>3$ZLi?f>IFDd%K{^1MaB zVqzc+H^gL=g2V-?2w}Z(I@6QWmi&&LXDk-7zW9{wzv}kYuC4l?)6FJJ&J%oxubYspG?z>jO0C_gA%re@TN7fb9Eh)id;_0em90P{meY=#STtn+-(S zSyMB>m^K>4_pp!IV5FZ5l!Jd!G%v;3_$iRVDMB}-Y|>v^C%DQ6yU2pCW&VKS@xvt$ z?o=KiI`EKN&V!c{OQrv5RJ^r%VsNfY3|H#2*QeONc-hsoC#gCZ#5&noZx#4uTHsa{ z%>{w<@QBit16arD0-U{nO!nVH=aB~7b6<8vewO~<8nqx-rIx4VM$-g1&^xgZ&CLSO zctSJJ8+ta5-Mw#lFLS?0%c~{W@N`f~{rBE!_yQ&5W1`D``xSC1kXkyvVzPiH!NR0& zUOtX)-_xOTdUW#U1M;P>>8h$ywEw+dsOe{^aU8H&Z9xXTdmH#(%yfPJH4ibVgDGX{ zB`0~6?comd!Y4g+*h_M{kjxjB*T24LJ^_TrH`v^FGT8b7XSVqQnLW(~UFw4e#N&gG zWGa0{yDa}xs~D-^o}6i&pfMVGxRm~o`&C17wN)(o=k!DFsm8eH)da>1F0wUN1BCE^ z5Qloc&M067zScG~+38A=ENA}HKoMC&BkXue1am){b4$d}G+ z+o@X`euzz9EOO?e%O(8Fly;XZ!(>nmw{RSg_9uGo?3Hd~Y~DU*uLdeiKuWua~BQ<53zO?aHB+sM=HI^XLg>lXl+T#kcVOqo{b4*&MW zN4cuxzlBJ_B+A2P`?-HOso|TfbaRyupldkgb$H)HMjBlf6ZbzcOa`)s%TMUy;zp`( zIjh%fDq?(cKFxIMnTy|P?i#i3j6G~B5i+jcNY|IsvMQr9huknuE-yv2xaynE$Yo&7 zo5KcDxKRTmOX=zJA~^i^79(&|5TbBx=i7k2kV_>69%u z8s;}Zk?Rx{5du6>Dh~j;eK?5-ML+|25d7XK~KRWpRSwW4t%8Fv*6#{wx2e~X9uL*J+o&G00 zki|j%2@lVRQD%*;UTZtxv~U5O{#jW-cL;sn=iCEy2WKf{h-x^sX}M=fu?i3p!AYPc zGxDvM0)%`-7~Siar8Hgns}3gLIfyHf0&Fg6psLK0)+?E%kInTfEEJ;IAP>wojK6{# zNDiM)Xbp*J$r47QIe({^X6?Cl#m~xNC2rAX>|y5`F`R#X5f#rh)@)3*{%SZ-4I_N( zWu@4YB~el#B3QMv#q4QsSQ;K0TMR01*OUcCf|cU$XY_8%vLr(ovhDl?VKS6bePRIn z9ygQ$!;Auh`0ghj3+#^uB*m@!4T%9K5E=Tb^98ovYV;;GLy-Z6stZlr#Qa_J{`OaKQ{H}=uue~s3eIcNN- zl;4^`Akp9r$))!(x62am);L5ggYcad26RUd3RIdTT-DWH#^umXOX7K0(*sX`+TX=r zq^COmg=bAcgs(}D%iki4r!SvOK@6=y@e4p7Mumr%Qp|tIJgM@f)I%_`uMAlp+`{#4 z&4sJ&b1DFWhBxEnv;1vW!NoX4iPSyB|0zFCG4gY$!it9W(~r8*MfZVJjw8^%_a$!O zY;bUrO4S9}Z8MZ#K|&w%O8}>;9VNh zyRiQ?_`Q($lTV7}FXg4L>BI#yw(2$o@fRFCAAG5{sHDAbw)ssf)~T(`?#1o#`2O-= zV8p+JqFmsURL@N8Qq>}0_~yN#MON=6a?9zJYP}bdniKmqHtGMS=Vw>{?X?s8F>7~x z_%jk(OV#sfgzA8Rfs3U7h^%)}8BU-UMJw}67e*JD^_nyA-!HyMIx@x3N6vxEXZ!Of z;r|`zh~~JHr|O~)Ok+(OCmOxfET<0t>P8e*dXOR1p7*c!UI;SH%WG(|p-LSULY}`L zY^Fttyf^$8YIfgBG~_ECPeCaegCp!_%@g6r%akkxd`FMktr{g?VrBhC$U+7 z3&>l4CGc}>b8`QY8jAF=XU6>TzSnK3J>k+n8pn%Rz4?W6xXpMl*=Z>e`}MmR@mEVx z80><00OVc(L#({q<1;~zWzbrO5jw!8yKMVKcUl1J-zO>b9Q#DxFFBt9IoDwwIr+); zQBLerCXG8y5RtNN7)%q+0>K>FX%W6928KQGGq)t4eJ8_be*`E4W3}JKP}!G?H3+=s zH4PYapFs$;RrQl0W6^; zpvQhy+Ks|Z#P17-}w28_is4%lmd2n}oL3)^2d1k!d33P_ibI89eBuWvCA z5u-%5Qgn?Y9n~RaZKu*uM0V9lqB9)xHf89Z`IEptRK-#El| zbKFU6%KsLj$eh4+a*sO%vc7PSo!;6xW)lAR2`&Uz4pTRQ} zO~CvRJ4K1*uYQ$>84%uu+;31eE#gW^!(ZPc^Wb`it-i^d?)@dwP92v=564c z>(~lgfGS8kH@;V>34cPw;O&z475eXi1Q9SFuerVx_E=Wil^14i;D_Bh*C2`v6R5E; zdA!W(`E?BOkca^U)>5l13x}WuyR<_s^U8~tXos`iTsIH8$V%Ro1hsK5ARm=9DZQ!l5R-kDl}I5=WlJpU*`#Z+{O|;>JW=OdRJF} zcuODm%21#K;>y6$zPTv4(~3m(G_v6r9m*uy*9*=*?im z_L3xSlsNMnCwB1xseCfEYqYP9`y;ZEH}a69CO?Q!d4O8bXfE^QtaRht7_+vVJI^m( z`=|t13cL^5_+$_6D*ICKb5GU0b(ZPW{YizNcXRwNPHCT%lLn1mYp=+9j+DiIb@iL* zMrxZgjw|0|t@x?(IK;$Uopo%hpPMsSP|iti|LeMq9&Tk2E+?VvdaDQ|d6}hN%H5kw z5%u&D;SOJGO255#lxZ4$=B6q9J^#kwp`bvO?ITQ0@ zy$?5I2)mZxcQwrs@d|yZ(nlr6`nxOkwG-pVLs?#TAHt-27*>)~(~V^neYs*a0xPt& zzy39Dl%G5kYpif0Q-3M6w7k{8Y=6NpD@8t|0bgf(^+s>FN>u_KS#53gIP=@LH51<< zyHBMX9h)UmHfdPn+fQe?@`8n6p(b7t(qal*5O`flv?4o_Lo|>>H4T8ci52xRI z4D_8{PB!WvqZhipS_o!?5q_1`g`(FHUDbm1IYwELdJu_UUtP}ZjO8T4LDm>#DGHz! zt83hJ#9gDa%bvjd6TPFrM}Fx$xo>N=7*AzSN7Vc~8UzPi6S~D)9;SvQZ|N7lUYuV} zR4Dl0dkwR*Z&$?<^AOEnm4EMldL>YTkC++h5kp%Qxa_YJy(4%-P%NXynA863TTF`f zg)HVAiPQN@2Cxds9bP_I&hPz^g0;@S?@g^#^?A=G2{0X( z5zYkvp7K)JA+dL{Qh&u?*pqwQyusCnY!*DNb$%MzR~G1D15OXh{_V+sr{^xA<8%I; zs}mGf0qKL1mCw!sXgUy55|7_}Yacu$uBSD(8b5Rz!j)9+b{*>5Ci8*ZQM%Wu%j(we zB2DhvIq3x4qQs!3Yn{Iwu!TLh17k55OcmqF8)UvLO-1g>mXofvU-N7z7s=6`9hz4- zltg8}w}uo)96!*=4}cH~q^)flnR@Rh<);h~4y^HG%o-wwwPAjHgcKOK@ z`BR#fn|bDxJ^M{M*0{xcDvAQ0B;&;bvuz471R2a0#yxlVxpg2G;cs1d9FdRmNuU1R zu_d=QS>eu&$qlqKPyYKYjTF!P#jfQB1^k#@t0+SgpKjvnH@omns`k-7eTvca^&?8d z*bk_IEh`N-cD0n^j(9_>Y!;EL4&MQCwBtTQ(4SmRWJ-?n`S*DP2;4)x_C4Mtnx9&k z@epgKPh={s{akk=34vSooa-ssV_>l~nmMBa1HxBtS}F>jrWcjafSgl?e6oDzH&zL* z8bt9!Jj)*jb6U@SSpC^yq`R2C|F)qUuKG_nyli*uot+d0-J6OteV-K5BM}R z|1f$WmOhcOII-Gnf9EBj>t@dU`LE!N{kYM|z{~dzLQm3cYx)>Cn-!8c!BL4o?Yt;6 zmmWgy_Op7TQ_{9UP3^9o3DW29)$_ru1avPva{Yj{-;=BdaXf&?Rw6+0Nwb#{qaVa2 zCLo=Lj>e~i{ko|)M@U`9WLG)ChKM(GbU4ll)kvQ@y-fu2aE%CnA<$__b?x6mXp5D` z>x!w}cHA@=Y<}kOq??g^>MXu?=1sW3#Ea|{{y4fI(%#u7`Wa}yV~@ap!C7%p*_C1f zBxvZ20IX}md|DQ3I#x_5q9DmDdHwQxInFZ3M3oUj?g5WPM@KK;^z6cJNLa0}Gct2o zLa^67LlV(-o@;^Ao~jT&w=C|EtPFQ-hAv&(aFa=BlYPGF3`^lNMz|~_WDxGvvH{%^!fZ=xm(RZRc808) z=wn`(l&*(UG+l(0&Asa77dc3B_P!(f{j)q=))9Tg9%5DI1p$hPfG(n}yx^^uBS)46 zRJJ=}-P}DI_*aHLWLD9wC`^TNp@=FF6dMq{B^k<&U~JfeY;+BI_yQS5JRYmyH+lS9 zUCUS``lz+?l3ny#s75b1vN|r&pf)g(Oj*_htY&pII0oDW?)4vj&^ifmV>alAF+MQ&^r zL`{`mJGw!tZwf9fKLwfdGikm zCg&Zyb;NK?@Gbrb#6u4L!n}PFWb?>HPf-`+x&M?Tk0IkF(FJh9o-;vAtR4$N%HI7) zoLbaP{UeDX8lFf`;NJ!gIQC>Gpz{4VDLU;_sH`D9p} zL&RLQ`{qITy#`Nb0tS3~1%qmV{}GNTRp#lR-|o6fmP~LUxT{Xq7(xxi_GX1Q3<*Du zE%KH%z9CDFvK&}grvlu}tIhopSoCsJ)3x#)7|bUvc&$CjCQKkp^8QDaFRuY|a9?fZbIs1+N8yL{ zDQi72&U{#9ai}-EFjENz!`?wm-I;?Mmd~Y zw>-?igkA%oV4|Q^APF`P=f*L<1e>9PB^+|khUH&w+AjWJ>#!=2_;~V`>>Fq!H8+$g;P4aV zJzd1dQn#mstY4c+R}1O#sFR-?g)0TcW z>sZGO|4}NhBt-W~TjPIq1Kla{|2CaplViCphOx*i$apJ-IoYqX>18ZkZX!coIJ@XsXnftWbnCBChrm3M6kAzBF8Zpk41oz6LJ zhk%7v4e|z1IB=A?@fLf&E-nlQX|`6QB#ocTg0WivtkFwKn7fy`2h6iyQdi*m0@+9} zZQL0It`a!?BY28l5>cNc(9E2}#jMKcxby~3)kpp8Lax2dk{d(OCSusq=A_V$_XuH9 zHN4`N0rJE}X18M8t0+d@CXpzZsEqQH{n7XrlSgqR3{PD&ROz`n$7DmmL&^zq`3rDm z7i}f_R`PPs+%7mT+Pth3Jr`GiP18hEV1j@ciHAr&~Mc>itJ_Jq%TV%8E=ip zU|BOrF*a?4VjL|cJe+)M3+`isA8+K>Rl4s~r*x!?VKu9eQok!!m(LzjRV;>D&9ky+~+u zH^W$?3CJ%GUk*Ji1Hq0-%KmJq(>=uRCYRnjuyDA#R(Ki}w`#c?cjaU5jm-mwj6;XO z_=<#%sNZ}VxK9a7SHEL#yQCX0y}oyeB}TbADc`E6y^c8sA@M}y1o1ioL~FQq+NWB< zTJm=+s~{he$5I@mKi)bLuqI|EmA{i%NMbTcEnM~Lo^d1-QwqMgOjzHoS&Rcf4}l)q zCuamA9?A}NR4nB?_4Ct!YtkvdYfL^HWN}7cdjr7Wz^39$GR~HOExo6cN#~C%9qjIR z`gG;N1oj-}TkZKm5>*M|xo7qh6XU)^S)Nve!0yOa@Iw@TF@H4ZQ#Ur&G=6X3+HDn5 z^Q&R`caH1kU_(@Qs5Ls3=WW$fAGm&7a(-(tK7GM$l5mUj_4D3GJh#JabSysh+^H!$ zo83OK6m(?zVk*z0b#}J5rNvJWI;zX(RtCXJ%&3M+J^epD3^F<_0d{K$3NB&>K6sjW zrup~-WbxBc*;OU%qc;A*k2T(jts^q81O$KKX80`AH*lL@_9<@{?%?@vSKZ@F!>Ue!70y^LufSRqAgB(!KnLRrs=`w+LI z>lTcZO>4*{w}T`x@4gC8dGE(+9;jwJJt;@n|K7|S|L0M(kWGsZEyqJDIHxeL z)FY-F;`FaN3pxhUbPn%Fv;(-_)M%EV-p)?jR>y!nijUQ+`-(f`4Xp&iMzLkD#S-DC zs;RV7qmF^dJS9qaY|B1)l-k z*Se`bc`gQRVeNM`N8M?;yBvN|8w9;B!1=&@ z*cA&$P9hcy1cIuRZX8f|3zYM2C1zpj2pe$AFX?-TY0XMM5TOvPqxZ-*}|qqZ&Ah5`3rA;lT@rF|X10C(L6Ytr#( zG4HJi7CB1G&mI|*erZq0P6ESc$lUlnu^V1I%KqN+2zoa$8Xgsb$T0=1Vs8Gl13z|E z8r!A~Q{6S5IjyMJCcw+7?W#YlHBKX(+RS}M`#Z^fd%e;AB+y}gx_7`iRjsDMezL)) z-6J{Lk@p(roB)4TBi$TX&EC7W*L8lOyYUh^`G9L|tGI|UoyT5U+uuVyuA|_3*9@1S z95Hx2StyD=J8I1s*n9}>=!L(AZW@>v9_@06y^Q8^se0hI!u&E z=b6$Cq^h}dXL?j+iZxPOEMQu*D5GX{YiM8ExN$@EA^YRYi6>cy3B%WP#Y!(@iV8`& zvdnQ#-gTva`9fM^lXA{z^xHK!Isz$Qh`F+z#}xT+DvpoJ|De&I;4fOt@=|(8Q|89W zdXtKe!?w|{(`mo2rWKkg^EFgcN6algZrF8;7c=b9tFC2)`{WT0`Tz|jq*L&*z=*XCPS?{xG={&M<+-C6Uz$%+hfTZP{1&@rcwIUcG6e(sZ(!8H+6^m5-b6wdQbMmI4!S5?{w z#tlU#Y^Vk^dt?^iu+fA;hd*j9bHeydD6{eF=3wI0axrkXl>|}BLzjceEsZL#tcf9T z9IwVae6j&3MAu{9Fq)=dsW7cpr&s{UKw8wvJd-hr%`eiR5y6 zxX|m2)O*b#Up^1XeeDvSTc7;SOz3{3LDF;{HGZuYz=6efDV|!z0{O5xy@W za6=*czq!A~Nk$Mmf@m;8W|QMC&nAIm`v))_bl9lX9>(KH(UW#!)_I(5j^dbCW!}M0v)prXH8;OY(D0 zTKi@0Tfrcy;eFc$AHBxF_xBUmWL`~%1}duyl5Dpd8`^qoPNep$qK=mGUsfm%#e`1E z5YSeYM!YYLcu+t38@(kS^ow3Z@SC>g-i=-FEL*?3>|rk269PZ|q!w$QDUd6VMYX6# zPPtRl;oB*2rWTbycm`X1*3SWg1{8?%3!Jav1T-tSv3#|H686(r(O>of+ zUAY&pO|=Px2;`SR2*~;Qh;TSW{)npDB)9H?zTJdx9+N*%b*D&P>aN8auSfc zHq&DjN$u1gtbb#T*?sKk>TvmIGdk_{LVR0I{Pm}dh*jAVNpZ<1BGt@&ly%g%So|3? zJ^w@5l=VEkpB%6(cfh=d4};1-%C!J1E47D)&PhuVLUZG=}JmQlp1sVzJl#70vxlaoq` z^1mt$*iJF_G<>vKG3tK4+cmyFk-=oiO(bjKTxh?ay)JartO&aOk+_Gv zioWpErJ%bxqT``~oeI8JTy2(m4`m^Nr!O2y8JOaF21X>TDY8BuWTodRJ&mmOf+hzq z;Sg?5-F8`e3;(dBVR@IK(^4FNH>F-Fc{5Fe!X4#%NS_V}nu`X3_s)da?EXkI;8GwF z>J-0Q6Sf?|PVdv~wJGsFfU*za>m&q&J%?uzMbcqmqAhEKS9`Tz01=nS@Ma+pkx9ZJ z45ME|su0jW+4w}ym^D{-p+qinixAZV#lj$SE&G@I%l|<)3GQ4IJ*H0NVDx7s^n$`s zR>S()ouI3S@R6PwOEjjDGVXiT#aw^t#VPl6zDX_Tlm=mNAblbau3o8Ui*GqI(O5J} z*|+(#kw)T4D*to`PvYj)2F8ObYBm(Eokc!bi+{mSU%AZK8AIXsHjRrKbKc&W{QWY$ zdkTMX&n(Pr64Q}7r*K&~3{Fqt2nsdC&L#vts3F9-afo@{7!NA@>r8{v45P8p8w!4j zU~vV27tg@1;C`4iI4-XL#@y@cbpnC0P2f&HM~yxw*!8)3ODc{x+^w1bx;2}ct#i7} z^tjV~7SlH5xufD%KptG7RzsVKydq&(-UtTBd-`pIuJdTNHx&3dJmIR+u+@K3>C8}g zX7G|Heu(YI%Pv;dKTzrzQgM|vK5bmEg0bMKt&Vf3@zQo z#7X0$#mVBYCwT8f9F>=sB`@|F&HT+T3UnMlMFL6QeV)h2-E-i>@NxeVkSE;klD=-q zYl~zrtv?KQwUrNbs(Q>*dKh$@nN?kL;!oh#!_vp$R&%3<9)i)+9(qz0c`fXvpf|lm zsNBq6TG~)m7#xRVFD>7fj^`iQ#q>XSa=o6gCJ&CSs5m~l2}6&rWce4s2y^%<&b|T(V-d-z0qwN&+{!w*hu5{OX(xKENR#~ z+s=<;Voc1Go$t*cCQkDX*GLck_BxTFItoOVy0rHM5iWm(LX+u&DxD;H=-8}VNmiV> z(u|+W^cOpyCFpeFGAxW{CgoKTgbLqs&OpwKU?^C|8X^%a5f*Aex#;gmh%tGg1mR&s zYt)bue9U-7hKRDF82K8E>|RzQW~kohX(q9-Grw;Q7*CPb4FV_|XbC4hzu0+OCG}eAgSx*{ zFG$M~$5l}Bc-Z;hS(wdSAC=%Ve0(1B0cEGDHT)snDOXiWv#AN*#2*c z&_4V?|XIse#FIN_+REHNzYHyE0kK z5*m3$G|nj+RUGW#%g6`kQTNpB00BWhgIDZ$j!04d`W0 z7X_E1k&H!DvK0)591sW^qOGB7h7}@Z5WKl6bktu>-zt@%Bfwhq;W*}?YygQnZkZ`a z`QHG2#=Q?Ycz@s-lF47*l4G`2nzCHcM*!_S-M;*l>`QSEyJt0hHN(x@80DMYH+EGl zIDW9Ac#r$n4}AmUM_$zf;bwG;`{18;1EiZrS^(VZj{Nrl{@|T|VvK1r=Bs75!JH|n z>^QhRkBJjg)*9T0oNQji!tUQ)K3fXiX(A|#V1QxHCVe2|1DXOVgxd~_wuP4h3%2}k zYpU|?w=hb(zV5t^=8IB6;T%Qjezl{ZUQuFzn9B1Ve$){$k2EmWx7HUEm5vp_Ryz0x zKD;?fw%x9M*5Ko3BDp#$cQWSA&sbwz-xqQyCa-n6w4f0Eb179OdWln06U@Kc&mu(h zZE+}*!yF5vv~0^hnu79;Na>z4Wo0USIx)35L(C00^^zz3F@Q42ZO_W6W4b?57Z3L8>h*TZnvM(=nh`H`!#slhj5(O@!VqaC>iyii%YK;ESz6m%k)R^J;e*B z^t$7Cq$y3Z(PQXomojV{6Px6hXm}McOfWXM{xbu-fqzbSy&4|>cQMY*s)yU#A)P}C zgN?o|pl7;u1TeU*nniE^FuKCP!dN}%HC}mFT1Xy8k)-p>5&P)iC%>#`bR<4*Y7xiE zB;<_QZhz*W>#h`uq!{Xzi=_W}5Md9}i<5N4_IRd3cZnWDP^iPt)TjnYl_dgWI$AzX zd6}Y!i|>|44BPEW{TAIdGY7Vznp3i;>;*ptkMtq&6&us|Z}WG=e`MYFs<aW_IZD12g`SVj7FaT@t>Evj<5#?Z#_p{KJkea)VsFnK)mv)?=u=O{dS>7lsP>3rbc32@Z10(TeP( zfznf>iJOxuE>UyT0Weh}HM0INfwrhs!_*kB$=pxuQ=zfl9@_1V!jxD%^CYq^sPE5U(*)w`UOCu-vZvk%t~N;wTa3VcebWyb%Z(}O zg0F)grAF=u-NChe8JF-Ic~i9_NXRg{HIT#qu`fr3e%5?_q$FV!?XRM`-$PBQCfbiHNxB|(8I5CETlvOF$Eo@vGIA1)cKpDO zYGNPW&wNWG~3{ax~yfPCYR zy7y<1iGfCi6{#BpBAVtBJ@_kR(73Uy)3=XB;t1ZlVc8I>K-ivTZ@@HL)EpOhY_!qKjj!Xb0Y@jVC=+5oowXLT&$7v zA_y@eTGhRdsd0rF_hS?&+P=YGyt;SSA|B8OJD$My+SZ?@+SKUteQNy1Dha%_EqW=G zz2Gb}9XyT>upd0~QD{9YGs3fvz${=7v+rqyx}L|Bx)5RyI(-GXABe|z{rbn)6KfXX zZW1&{8P%dQh1L3JmHY~Czs4r~V3X(n<%`i%mxuvnXoZxB{IsIf)rTZ?h7Ftao~`3g zAszOr$F%x&(gQd#7*$rpc)8pI_p4_7phoe6czW_#bE8C9Q%uyuwV_0m=$nKuO0qta z^ZR?8DHdO;zOR50WX8=qYKFGfC;9&mQ{Np<^&kFy?2#SW93v#MP957Z;;8JEmCcbY zvbTgBPDVtwtVFVR_Dc44jFg>~mHE7Vf4}E?p1-;-xsLNW=kvMW_kF)s{^i@0gJ@uu zHRTgt3w95ZA__nME?L}lVCcEyP$6x;Gc@+GU;pRpQ|z(S_S9KhZj=3Q=T)|ipY&F~ zD}AIm^2Kj0&_fYYM}aTdED&6aLu}Nler3c&nT1w%Wn9<&UgyYd;*myzH(O0y0N%El zN0sG{D=>pS)UfCj`^&8gS8HwiM5CFTA9Mq!+dPNke`nvJL*%A>;NfGCxKBxqDV_6N zs5^#mVrok^dd&o{M_VmB5m#F8_Flk}6)%M=AUMacfG=z3trPN#{>fUCp{)p5D^vOI zl#_lc&hDp1YWggXL?i#Li|Ho6ooB|0iVE%sxD}=WwK(Y-H+g7Ek8_9Tm9YQv1zP{} ztyI8;HS4$=>>6BN10hCw-@?;?Hn9+CFFdBa;;UU6vlDrO9}n_a^k#*h%jm+$y&-pE zVEzVGa?0qaZ~XE!$ib-F8{Zl5IZ#g_5@VmoOaiVk&VM3kX00<%uxX@pv)b<}B4?F% zGi-5tthx;Kq`MvK!weYayCo z4O~f)T7+U={vlcXefCYFPkW~q%lY2-`f0Q8ZU^X(KmN)F&il{CHOQT9i2-P9sOwkt zGp%rT7sq>Hb*xrPIUk7?j3RjR=&D4nKYN1~cUo^R^5)Q#t_11``$U6@NC3IKpOZiq zH!$8D!|v`B&ij4WB$`Bg0Ykmc$k4=9%#a`N3gXpg)Rv;JODvs)z=NBM!xLAy`)bk0LA#q9Zt{h|x5UUt8})^%TH~P;i^6nBy+64TE^}?% zzsCGo{v*(}b^RKZ^uWJO*nIQPl%V_NSAxW(JEx6rYnbT?UP7t&F!?wi0J&vXD0w@v zitI0gq^Q)3hgxcsYz1%E4b=i}9-z>%8K#XMb-Or*S|mQ4Q~P9g2Ka`OM9x?CZ~$7@ zeiMX}9FCd708?O81@@EgFWW5nOOFirdE343_($(OsdztQztAlv8pByqH1`*-F66)( zNI<6gsuGMj2V3MM5{GWk@gYR2@W(X%U0GqCUG?vL@6A`LJwM`-;Bvk6guC{%I_d8G zS!>}*AM77JjQzb8q z^?L&I=Yq=LUl8KB5i*<+06v_^*I7Sx#<7~eb9IP`7%Wa?g`b~9Butzz(Gvm%LmeK8 zwl&QsU9wbS8||F|*&TZ_q>wNs3+sA7%5gBC6Cdoa<;KX5Y2?8zVqzsweDsI?%8vb*me!~*lBMED8;bMfQi+>KaXDoe$65ec^tU^%ue>q3@S zECK~o+w8{pdRbhW_`-VO9C*-id$!f8&-aqY-;eo5@Qk`jK7dw zL2DQkFB&9%cp>8JarDz#?>qg)Ab~YX0P&OOZ@#l2j{W++p1yN7uupSXOMBGgKxOst zJ|zzFX^!vinva{I9zHiP4Rlt&n3=`j$6Lh{DODQtGBDLTS-kh_F1LNd%7lMztP7F*>AFQ zep{#p$T|Nvq~HB-NUwguUCPV1R<1S&s?{PBfOH&Co}q5-|%rwSg7W(`m#Yzacu6 zC{CWIuGVkeJ)}V$^tDv{?`O~A0Prvp1o#Q>7mKXIZ;xh3$__;?AbjPbv+Nlp+iW_8 zV2o{ln{sI6{>%{#YxwKar461j5@mXIS4h2Fo_F@%t=?{R&6MG!0nC2tc_DI;UG+%G zE^w}#ex5B!A4h{;?fD|Ou2UH}$P^3Mdyui(r169aH#gJk4JkY3!mU6ULLwhY${mSr zRL}+xLY?PX>>s&5&uZK@@uBzC*S;=u|79bf$C-=#h3g?dGCwcRMdbDwKCOjxRwUi`B%jDTzlMh<=Biq6FDTY=aXyWnb_i^oC{h8 z%y1iQ-F>|4vG|skwE;HMV|-ZYJ>2f$P-+!3-trqa4O3qqGhO8X^#Q5|PGaIjEse)Lxqbhw;aEmtGrdQg{|PC0jK%jGemJ}_#l&o3g~oT8K8ZnXy`#q9CZ z*YVYRy<_`<*Xo!pM9xUz=PST20Ne)OIf>nU)-L5SB#Ivf@(IRfj?X>qbw@9W)k@Pi z(W$4)N{BIt&%nf_}gkvuf zJ#|>5OCjM$@NV2oL*~HfNENE2bH!w+gZk zQ+>^{HiFh8en(X#du>zeO^W1Q|5o6M4_qymQJpIgVt-02FSr9397eugQ8n|Xmbe(6 zmlMN?HbfgA_?%GgF+(n9O*T;>_*C@~HuEIYEFm5Pb4Kqh_p86OR=aRj9?*^G)dG}I zOYbfx$BpzNdsb;jW$#*{g)e!ycm9Wg@JhL+TX!zCUYVLi!nJGam$gwKxiuNZdtkoA zz$O8_y&Nr;Blv66TvC$4=u20g;fU;V^#IN|a^O?Msk6D`ty{?fySr|83_nUea4wwC zv6;_yo3;yVa&}!4?+5G+YVLP#T#NdToXLSs;K?=V23!lHhzE>3-E&kP+zJBap!Vl% z%n*iLWAGzKPNXnxz-p7zUP;&Q0pj8WFKyd9jTdvB%z^SEo%hqfvDGRYJ(GZP_G00* zrBP~v!3hn=(Z%-bXRmo6m}?3O8Fytz(nm>N>qKZ%jfVLX;B`WglJ%_pA9-4}uP`SR z7_mlD(@=U!i&T(7hexhx^!}LEfVDU}@ax+H(*S9l>_}MQ{b;MX^cd zz5gM!X>R+u*CMEK?wFGy*Qr)c8m>O{?dR54cfDwJD}l1Z`ai)Lkj+JhZ-=178(}wiN{cK6mBIB}D`LVMUcu36GBZ(vLndWQBw)=ER1q23>4Uiu8hH z>f)6^g_eMx$^=?ZzmxL5#_pBj+i_`)-I}0Dh4g@KyncxuuP-l@wcPBb3u1{dL()77D>BuC z8PO54{bVjEqOK4nd#b?ss=;Zg#QfnW`NNfX_MC?jbzq~)QSoUXK1@zet9X$@)Pa6Lzcd2BzCOA%B zT?yj36`r=T7VG)@z}7t{$BKHx;5=aSf7Lf*Z?f{|CM}S7@#ViQ&F&4-qQHjn6w33J zZoaB5Za%y7Us+fupn^7ftFn$yE-lDeZQ@m*nmkjPgpi_4O-(g4 z21g4t$%+4) zTt?;1EJlU@L{(zEaR6Z_G%xA1&3|2EbM5hIScY7sgD%g{c7lS1oJH;s=tdFCcz|bm zk3kZA;lWeHs7h^fz30!L+uPe)Sy=@I$%AQ?-)SyuJLlWHe+{%2TsahZ#8JW;cRrdba2g4=ND+XWV z&E_czGBQM(wz07>Fdx#^#{K++I$((C81E5rV#3$2qFGumUc4|fTj=TOVGlZQZSV*z zLFBN`!_cw>Oi3^eUW{fLE553$4oWRpb(c|4P>`IQ-1C>#WA=mAbwnp#pQ?_Ij-Q{@ z5b@8gg`lhS%nMI3BIFzjoX)f~Ofo<-*^5?2r?>w(Hhc0)8Pu!|sfgKAGk3)A1F6ZdH z_ybW{-_v&=ADd2!RgBWNT-hH_F8DWC)kK;-CC%4T>y8{A9tGjU(_j7pbiy@;9n2QC z)~8Z2=;m~(K^Z6oPE?QBa&4#bi{|q_Uf3&{2cced|GIxXVN#RA?0QH=CII|=u;t>9 zi;!6}zNpMc%F~oBZ;-e8hF%ErFz9@b$iEq5 zId@)3zLoQO$Lp!cjg1v!4XilkS*EhY??1rR3)sZe$UPAE=MZqI zXzAj^0Ek(VcNwUp&4Gc6eq^8~Mjc#eG|XFQu)?8wdxSI}3u2iuwz${T?K^j&xvK-p z@bcea(4{K-fygut2Q@9-2|wO#qCUz@dssyllveN8nKkTQekxsVEJP`Ptgu zed7~YIO`gCJvDjoh4*%iqgk9a-Hi_Rqn}|d=;x5yUdw|vM0Xput zekB(Z2oNAt`q<9sLnjCsDf&AJ9h}#xv6+nnyhnU))CFFysHR4G{=9_~-c1%dL8Gq% z91YP#Xb%cD^2olS`=Vhjn_V*WPq@gN6qB6dsH2@{edj%Q-$U|Z-Jo@%mcR5Wk01AK zGNgW_tFZ(LU}s=`C<*ci$Z)$H7l@Iy{r4IQoq-N#{(H~2NN)O_td8&6 zWO?Z^FnB!y(h&KxoW zwtoPw)lffZhH#>{OfOne#8jTCUSy=d_Cmxg&Kypt%L04)J!@r=HkzP+ho{Bk3Qosn zo#?vyCfpogS%xEr;0eC<*pZecq0aFYgmC0oZoBe@l>#B(?;nu|M!1d;kZyRd?16C6>q6pWi3`tp}rgzQN8&q}?r!36@!TkU{ax0wWiAv;pvW z2IQxSlB#3k5OzVc@{I|G1YryyRE6kaYTQYJ2W^yXTD-0WA6u?D;MqPnxtiY;;R;sD z)^fjPaklNdE0wB4G?fC$dmV&F;WXD+RsJGnMu-UEzEAcPfEO}SUTiNmf)VO$I2cvv z?FBTJi%&RSOH;;ui(Uz8kV!SCqeADcXl@PoA$?{IRz$(ASP#JqA%~gQP@F#{0;lMLgWxW@w&s73o7A_zjUubs5xbsIUXo_ zmM*l5jUxD@aT{@@g+sRH@HqI}gwRja6?F~zZz4h1s!oB6KMOAKk<_bzMD>z#Bk7#D zK#9lb-zc?Hw&8$c%MTS~JZ~MzZo&gp2()#L7$H@8>(n_vI*nga@iuueHL*j7i30qo zx}V_Dd<|G;oDcs4luo&REr9$d|Awq}=y%#XlshYggfm5F_1@o%C3NuE-lEHP;1Za9 zm$h5KUN7psD<`%w(6ilnN_RM7(^xaXR{B=Oe4v|W(l7Zk}OSDKuk$Le5+I*?~)C&Mtb zmIHu&vG+#!pEPnLsw9%}-IqUSnVLkbkoD@M(VWi5(hN(|x0rE&-2yNP>?)ysioWJe z;*LykK&xI5Y0oM_JM(C9rJ4Mchsbt7YJ@lB+Jsi!@NQ-Hp(z(Qd)_>`2VrV$ac!?J zP8xOX^3*ipJbv2r-pNL=HT@o>PkcCYSL}XafA~{Nn64-5B>rOu?#!Ifzsn6 zwpI+z#z_QE#b6OL-iX*O=x3vfi7r7ZRHDu_`7YqAMdNa34$(qrjNE_qMTWGr^-8e- z0}efUd8FYe!t|Jm4*7B5ng&ycZ;sn(tqY#LVFV~(C+627g%pU>KW?*d&z;Gklmj8a z0FwbXM%-@keFejZ0Phd}08%3X5mVrgj0A8Bfr@D9b`Yaq+Xlq#2A&*$S-1Ca|4*J6 z58OX2BaQ<}42ffaH0;{r!6A+hHy-^r<jJMg zsyzxUigTLU01Ki9uqgX`pt(8?BF()AJ1gTv?y*ebg?jNqMbQPa^SU-%>oLyF?@<3B_n zbqUU1?}6*djYS_z31dtc%Mt0pDJ+0>Iq7ohTvj+{U=|PnWWY_Mg(-wI0YlwHINWnO zlOnrc8_SblGZyB1ezEx37y%c?tHK5u8cmW9j~Lf8^fB*DR#UdL1NYIxi}BBKJooYF zCXe*1q*MERb8}{zqgOcQZ9zggyxUG+{W)P@*>V^pVlho1ozEn z{bahsM?k9N$#0uASDcX(K?ICa;`3D)937qFIVC*h5C7ijAz0%Zwy?*lWVsIEuFz0wxtUJ5{>x=ilx27wworAgW z6Y~`TRwwg7Iy{r{oj5Cu^iBWe%WeN*yb)RXz0pTp%=A}t(G;@X%BZ6w=5A#oHFeXL zRg-C1OZWYnKXTi^jb-7o@o|TgI7vl8< z*f%D@k2F*?(sPQB&<(Nvk!XmYY%7WXHt6W{RP7U=jnt!h!B}S4&n2_Gd`ZpNNltlh zzVf2K#L&%fJW>Xc%j&1}*Wpx=hIFx_jphwhI8+1Qw`n$`zBgn{l!BXb8v#BM&~QW| zke^Pk;=8Z@n~V^z@D25;Gi3p+jPe+tGmoNv1M9$r)c^bPt{rwn>O16lZxW397d+JDEmAY$6GXMK41+9p_D?G8c&r` zJq5hA!unVXBY9J`*JnnCi|{7AigD zKgH7Oc~Sto97fjudk!kF5fSOWm?x8BqG4CI9oaV~w}F^kzTS71jX4uOQIup+{)@sq9|j=sNIG*e27=K(9^Bqu z_#=HoV_k)3{oh~`EsS`*Z6Vpk0mvuXdl4U{*x)#)~!5euLzrT^udIvmp z?1B-aezD!Y@`6A8hX_EZeUkd$}u@?|@)o%m9W zH)XZ_Yq>Kf6ho|D3d}0H(9r08%Euj_%;1hQ8?BMSmF7#FEtr!B?u-JvQPl6&ezKDB z=Em=9IV!Dvn{@IxAXd6z4uEl`kF@8NFLvHq*~z>gwjn=0j*jl|h&Q z;tCUuHGSRf&1?^;($`@2U^TTH+jH}+<`gh%8ycdbee{iHCTmA$R^;8I+?Y0|lWh&t zgH&W{U7DW?m|IIii+jt>gfWnZd@lY6d8OH^L>pgkJhnE37*Adj$AjVSt!fc} zNTsAMNfej2GLE=AJ@uFNa12|&2@-gwB?-ChcFeCw7CsbxRLYoTt*TS^%`qKb!7}%r z12WwB1OwsiEUxpPfGJc!VVMP3?3~1=A~mlHiGh!ZSYH~u0;dPoO@fN%t?A3X+JbvL zpJ9?!DNO|wkcg5Z)w(LPpubK|^zk2?3;+1*10SABH!5ca+JslZ_`{UZ=%1o@Z3j~I zMueYsuOJC%vl}n7ms5RT12t)f$iu_1hlhBdUQG}w*#VnjL8?c=#%15|f#u{`M_%H6 z%A7_iF^ZFr)v@VXLWFtvH!Ig)wEFmNn?+%9c;J;vheaaMV5_Qt0s<0#d*2VnfrOVysc z4(8BTowhmWrcz#Nrd-k%A^hqOA2_Rybp2svP=wfT<3nF{SZ6s)-e884vvL_Md>NEI z>j%BOX6PHB@D#xSmV?jt-7bXt9tpNdgDj32Tj>!Byj(b3;Dj{Q`E3NtT5rSPjY_b# zdgotP5rxEf_9Je&39DsAC?>>8&)(nqi2O)p46idEhZiEcmV-1Za{KG9E=skl1VHaollQ+O&&5{t6;fB@hZH7U3O&hV?O|+zU`Zqw7Y;reEqk zR7@s7vYG_brWqmoG%|Dk-S{*?OpRbNC(zzig8C422wc14-2y4@PbZiypgy9x1|=qF zI}sKz9&)n9B1jiW@*+EMgH_bJZ^DvsIN8X!fdnF>f)><;+()DYX6T^xa3 z6Fn!)(DscV8ys8MGC&DE32>7Q)4Ear?$3kM(nl6Ag#hbx`NRP zY-1avCuR=Qw7h?@XG9PaE$*4qtT^gJJ!Q(AgG7T`fliLcQX^1J=mzFF6$VDx_83Al z6ETO)tYu~r0kkVoer?^+Cf5O)auvhBi?K2}x6z6Mk7K#56AvlVA-`zH=FOTdlfn!b zIH|%PRXfY`QI72a!UAEQJ3q!q1+f1OCB!%qv}Wo&rXP7aBc3yY|0RUd-F*B;IT*3~ z8f=(~*0S8VfILpm$%-FC${Ya|u3zowfJGxeFRJCP6N~_?fW1}N~hP@C1hGI-A zk2{VY5+lP}te_w^jAOLADNU|%PUy~!5b8z)lShF-Cv)pR;|z`tWdLG)BA9fZ`Koph zd|(iCF*7&vo)yVrO2&YsV)%zot+0nZ>sU0)%`GSl@bHd++d-+%V*UCth)`~xdv83k zQpb@H%=KlgiGkob?}nrVb}FQrgTFRj|G~CYHQR2c|7A2m#**4eivh{@++7|X`I8o2 zh`nQk7ecpd;ytsHW*SlaX}VofRxuW!+zmtG_9^@6=kYBemp(% z|1;}5nISg@x`P!m?iL1EwsU3*3||?%KAkM|Y+2ocjN9<`XY#ZfrMoRd@$vBy5fRPJ&31zs7t8Ed!S08> zxFjEoSI*S18p3`vM9-4Ok1G#k`~=0QHi|x7OJV%a2{+Njvk_p9Ye6#>g2kJ>Cw=+S z{A?XV^yVqQam4(RZ*J(!{MY=~38^9b#r2QV*wpLi`C94=^7dNuY@vWjlQY`Kdmj%c zm)NU5wwxLco8Ao=e36oFJgfjNy#kM1SQ! z8+p{ggZ{5B-e5v{`0^q`agB_PXGcfHL`C;kGlNDDzJ3~}Qh$olEHWf*b>D%yyy?3^ zruKstF5R)XM~qDA&8SzkhczKik6W%}MuQVI$CC6#cFU`=?7z*39u|K2LPd)5@;Xl7 z)wu3RMiHq?u;M@7U;Q!}9)~6XNXf=wLli?hDzvT9hT;$MO|wftdl)V#-j7gJwf z|G!gWT#E8TbS48PSb11N5QafP1F{g7yqW;DGroEQOfQ_yxGYPtMC!R;^ND~Zhs%>| zY8IVf9MFM0BoEv9@E>X<*sk|0ToPsx!i?T`*iL>xS>b|fEV+Vz`sMl!sHzQ)3Dr}H z0?sY#1&yxflW@wOg_mScJ~0Tx&YX4vr@0DBf4&jKw%z`9e~H`VR>re9^{$mFdxn(O zhFG)O7yn9$xcx86|E_S-L=#!-F8!Db3@P%z*KMr!8x6^G(4Qw|if|opsWHv>R&b z#w6Tn`topc!THSMbsbttv!H%?eORX@;A-@qcpWHtE-2Ahv=T*+aNFpDljFgkIiKyz z@?e&p+PBwo4bRTI>I*cZzbxgHnCKn^7Ms2tdU?ZXid0aF%I1jv1?YGN7d$FmDW28& z3Oe_3;L(3O?CKU0G^-;_pZgl{Ix*+g7t&Pk+Nk(^5lKp~;0cy{%@)gWuSJD>5WE=C&zot{oS33bgbMXLD}%KbOqRNe;~J9WJ^JG&-jKTv7j~vwP`2x1YX_ zB~r3Ud?3^4-~V!=#E?YdK4=?J_*uR+GM*`lbHN>2t-+&kU1)$N>@+U~|BsL)iCe_G zVbw&ry;;-9=-orcBubj-dZ>KE4h`n#tq(ZD-{i1$8n`$lIi-e~gX)3}aX&Jf9SO>b z;n2NH6X%-LJ|VJ6lltX|{cnO0h8lZZ zIKAWWF-)cJLX~H(!djo;T}A@Ehjaq~yn2!g+&F$Ejd5T>oyW}sRMKwst1jZl__Tm| z&0qZdF~A%)7+2rb;Cg$I#lj9{6{&y}wA#}K3T%4Q2)eDASR~cO>lojnWd_yLTlhIX zdvC`}sh`exPjin;t8;VU{h&4%kQ5}i;kRuF@R659}~6eWDiW3Uvc3i1*9YA z47*J3^zs$=@)epkECV&*A~ilevWE}L*@WdQpKc%lcorLZ)>H!+jR<{!af&64drisQ z)B5#l$Ft#?S5bZ&5L>tRNz$l1C)Y7KPQ~t_t>DU^RSiE?5s+I|AC&? z@iMfNueIQtF%Rp?(n;N%^Zg{V6kl?&W?FGrCv#5AZ$L43y7I|fpEeeIUd%?$%@3wu z%b%iwt!}^vx#d|MI=IqeR9{0mocGQz+wd+fCLDK&*7q+;080^PPyeu>o{_)TG(&u!vtG`hzXE7N%#e~w+_$MpOLNL4&p&MPh>;L;tG!N^wq7TT&F$WpTP6(|2=%MSo~(-2ggMF&TZb8e8{48e3al)hOfTNv z>KPDfgNrX6q%<+#=EkjgVZ}+0mxZ)$Nqijy+g(XsaOQx1Ua0MymLeMg*3pX~dd6bs z=;KU7)Kp~Wd=sbyVRphv%D|0?3J(A%CSN@sYCE$gDFbh!%)ty+n#|YT*MaMSO}4#U z4>*+;nXARUBc}XlzB(BwlZ8@^iEj~UPe@04C7Kh_eCukxOSW&@Vq#kN2tx{7gB_f( zOFu07^-JpQ|AS0=R+yu9k(Fism{y?!e;jP1m#z*})^J*38EYQJgztr;i~R8U~w z`3y1DyeSn?^kRef({T!U&(wb$CX8x-8HeCvZbrTFJ9XfgZ|~DF>oCg>ROVckH@sHr z$n zJc-;As~>cAzfX$px^Mdha%O^@xT0N{FXDDi1 zvR}{C!=U}U?5fV$R#Qt_uL85jBEx%jxww}b7#_GQ-<<|&j*7&39im2g1+sO5!-}BO zy7FVzP#vqmhCb>ItRk4c$Q7Hl+?|txgAqP|fq;Y6*g=}~&ln@SY>|}*gIB!x1{Rpb zE$vB=bB#AxJjs*=CX=t$m`!V6UQL>E2L0Kp0{L(Waw>wicrN^fb>Hy7;p_Lx!itG27xN*?$| zU+B~IFWV_cF`@jyt9^nhpcg885AYH1(w$u$bIN@>^Y;;Q`1(gB#deF@tur(!tkkqD3Z;oLUt@Z9ZHt-UJV`X)}i2E<`C zBOs5=uZ)<+XnL{dz;bR3KxTN}55Ru!b{85Vfyk2K{MlkW zos}-hiK(Q*oW2gGy_FXA2Cuw{lMGqyNTMt2mMfNA0`ovG{aWjN6Jgs;g^qRC+UfeK z{4|n}2d@)ta>4m>i-zO3P!d9My!PI`&jLvYs=KRJOM)6 zr$2K#L&to%YTKQd;fwEh#o|*sAuN^=yiVBW<&*b!iIIn8 zsF8)UY-DlY@DUW{M0Ic29ikTZ%jBF7E^ya%RNC5)h;BDE29ox}_uoYtk?g+Ri#KJM z{rqjRRqH?%9-bNa%7+|=*&UvbD+*{hd&L1bF49W`c@xcE4Cd!tu1q%c@Nb;}yRZKj z!AYU_K+c{$mcz!DZn9m)q%jmnqO(e-R?K7NC;5FrJaNO@l@saI6aX;4J)Q<0y>KhM_wJ0EXwEiH8bjj)o>U&#IT zvl{GKd0?3%g-GrQ12rX5!m}~B(*4kgXb|(fS^*pXvEK)W#2*TwDZX}-Fq6jz(*_Q1 zd~vnNgijnKZ)63>HO3Je=0@CUs~<1`>{1STmrpvSh%`(HTJYy`lN2`BSD0WJ9x|k` zrTFs&DX*Q`N*`teC`TEeY!j+#NYXnlJaG>2fp%u<@ZLRDo$3zy=LA0$r$#O^(CNQ~ z*q5X?Dq4*c45o1K->GmYTkF$v5GCD!Ai<7!?Pr9VF?SwaA>#QG4!5ApOKrRwG8YG4 zvlC_^VmF`XBYvJJTDVPzJ~nV%DEI5W_ZHn|%nYc`gxWjv?RrgV37v~KL2{)6 zRk5vCGK2=gfsc<5^H5uSSTTJZzq~Gt$MchO^?L3z6K#)W?paJ5~>L? zAu~z1e=+MlIE23>kIkP4>cXtzeuFeRHGpyE_^QchRYAO@f6Rr@$HE z_Th!zXS`f3LjK}EpB;Vnz%Ox@ZeY2@8YfE^5;1yFJ7Y=rcph~J-uKysQ&E?J`Sw9eD3#80A{Zle$d>M~zoGk&=hol!Oi2mb7o zOh_FPWTiSbd9>&Ty1(4bCRV=S$JW~P*A;s2xgH_d`-viEv1|M-jBsQOLZilIbpJ4| zfSR1!7f5w*5jplsqicRyS{7XY{p7Sh0Dae45&y2L+;q=o)t;BF`&hLN0{4gwr?0w!#(ov5@Tg(i+h1UiQ2A zy@h%qtohZmbm#>h-nJRW#OuhzBqjDaDY&0BNlCu&58wj-ikGLB`NEmNOKO#3;TqSO zX8=&NV_T5IG#{o*(qNP|a_6FBAHE^Lc?ikdIFeshpEZ7rmCJ1R7PFLY@7~Pb^FW>R%4RhF6lH5? z=;#hQxY_;K@j$C97YBA3688-DEQUem#QV|+8pp z+f#Mms1{2W(D_^3`XGkV=u7r?vKi|bBFQSBAaIWW||`6eBp7I%3Na&)AkGC z`z^XaM{Zyj7|=YpNUN;j{^8LzK}x2ke*#Q0m-tK<-6;DGoDr2Y?HAaOOOJC~(DBNj z0hM(BJ#6E*d@kFz)FC(1`2I(53&%4?MnAq=^qfGM#}NA>)Kj{q^3J^~1nQ6Pm)GDh z0F9T==!z5TmNE@4uH3hl!fns?e8Kq;0fxWy0%XJ*xb2#{PpAiGd`Bri9a(?#&J{lQ z%l3;4I6SuK7#bYPQb3-35?EKHK)tN@65mo~87|BAi1c&N{qsmB|ZK84j5dubRlso`QyhS&biam~X{|LUL) zGIg2IT{0WL3G+mT*0{EZZ?bI%0y~Q*VckDkyA%FCZ>3$zVP3MNd0yK+U&K}H^#WU@xPk34@%iLc z5z>*{Lan`Xuf&fmEZO(C37q2$7;p~?2d);+%Ou(BPMx^Nha9-$!Z*D~P&p4an_18^ zD%5xkml+ppay9s1IQ#1Qkj7rP@9@WX>RmTUr9JCIEOh`FGS`reanUgz-edSh_~Y|lHS z0ygDw2gp3N3T1`6|9vx8?!Z3&EmG1G0FN+FwWVJ@QP~aTMIJ<5KM`%*p;HmOP$*SL zAM|}@Xz>FGiObcanWCH2GOh;)bn!OZHf9~@n+7{8qYIIY@AcbNn$D~)&=}tuaTn0D z?SW_Trt-&mnNEAyFd{W_XgF<$-{YXBOvGbMj3S$>5fw&ABYUp643NfGvtgaj z&5csK8=l?LeE7zpK_Tp(rezqxkQ)ziX`UtmrA%X{k&Dt;ks`vfT(90ilJIR)D^?j2 zbssDpMrZso-hfs|OU*!MFvj}*8mIFIWK&vD5El>oq4QDQM)z35 z<20XQuiU>Jht3L8Bzf}}e>%~wOlo|XE-J~Ngn2^rl1$^a*yJ0m=nfY>zbLUU%h%$mrge!x8_7uN<^tc!LQx4 zn<3#bK(#Q?NN^*)Q32zbGg>(rD8Jn| zfx~%S97zn5D(tSdRtDRILJhm@^nQfCA-kp3G&{}P_^xnXrv6gd@o-yHy!Ut6JZq~) zld)lYR(#>S8iYoBr+T?~{z7dC^)ndO^P*zAr7NH!mV$LJPwOg?gmsDF4-6Cej*8(u zET~oC?M3|VPPUbu>!;x4=SGA*!qr#xI@(4!8jX3lBiwr$8_vUR{v6+$ z_nmxa%o4u9|FDTUJx0H&p(LSjo}4TElkahjLT)Gt!AK3WUuQZJ&N9WE~ z{%)e5&zL2`W!SoXb4MaBH>yM8_yM2lP`|4A1l6RaWokyC^*t@^Ry%-|Xe#c?6?bY* zoJ-f_+${21wl$nNmo8{bz+QTuXsv($ZQQN8f%WmsH6ScT1-x%HZb~a8Q_|b%s$vj6 zbVg9`_~wGLvp{v7F}vWuhWat*V=-JbHuQF{)k^pT#27NP`F?w?d{4Hu^FFr2bM;H7 zL`mweUiP;n)5=7Gi-sqsPJW!2w&e=+cLC6Nmtw1VaY@+B+n2w}$_<{aO8h-nNa)&P z3!u4f_9Q*eJvZ8ZzHl^szg(yTS7>V3{A~7A^s^4!0tRx5&<1O3os*1`mUtv6-PzOd zdc$ryWQUVvmX_XMD_KOUnB6l?UDD=lzVF0uwpvCgDO!*e36w z*{mRE=|`7GWUN4lh^;&RtEhs=ZA+aOVwKDZ2@h5pjnr4W2t>h15U1{Gp378{Oe|Jw zYrEy65)VBi6+Qn9ei!`Fcelv2#pVs(olk`ia+uE$n^t9C+pK{oZ?C-}PFZ$L)dYfm$3P*sP`+B72K!|5X%AJ!6&gGS$}q z2fjkoiXTxY=y6p0_!v_Rt&^(BC+nHnuUY%>DoxFCw*!h6n^m-iBb=~*zgdc$TNHz0 z8it)t*y|1n;~-8cierb~vEq)*U;H$xS8mnnopheQ6o0{k8(*6J9@q}we~Tl_z>*Zo zq~$VI*$EeB>N+R5fyb3#-l^Ir@M;sn0SGj0#p+Mpj<#+dTPp{OV#MD>=A`}@H$L)z zu(`OY|M}VGkH~#%Wg*jM8WDRXAAf{^U;jO7K^)hBuw8wxpm(3|zBoN#bni>8`O^qI z&pu6-bo)wswS)y>8MI_MqId_b=+-+%6tb(B!Q?o1o|~&3O@EK3#4!lXa|EU$-~@^S zalIpj_PVo|IlqTzMMeut|LQrd-c`yiu3kkqxp)s3f`h!T=!_4|ZW`YQc$eN~=VSJ)U>V-pl1KIwLuaJR;`d<%pJJIOFr|93`-*1r9SgY(1%@ifaLCVFft5ZCd`w= z#}E9b`~0wT@~F^Ye&H zM;Uj&jQN!av&dkV4Yb&cx1kv!jTe3NVr9U~PFd%soyA<`AuV-6MA4^g2j8u&`#+yt z&WHRTrrtU(>aLCU9)>P~5s+r2L%P$U2PC9RS{f-qT89v61`t6~Qd%0M8$ppyX;8XB zKtbNk{hafD&VSx|D>J{?dtcYJ)_0voN&$~#b$FY=!9-QC7ZzaY>gn7>Ol#6pBU^m7 zQOdpEt$~5e-!|?%S}j>2UaKw`0(4lPJqqR$472Lmst!Kbi|vpZlLg_}LXKFrJIE#F z%}uiS5@tFihSibtlxxSyJ>gmq#^9?@pMVPcZND$&jt#4y2Ta^SK9;z~9tUmkyQWBl zd};W*EKYINz>+wAl~|r<_@QL)u(*4X@YXyiCkmQxatF->I|){%D93cj;#MV6Llz`AeGyMTUY_}TaD&fFIfp@+9} z#7910SZ?iR@VIqjxi?z*XKXwPgA7mJ2mjtN9EUK>fz!hQ+|QGi(w(D@hjf^3BckG| zx>IUU9fjd^n~%Ig-ATHhrAgJ?#V>3%Z)in2zqO)#VTW|J2*TqKn z7$Tpn5#&yf!n%l5`w|!NHGirD7EB(DI(;M+luvW~xBNa<;dS>fHF$yG)B7Ez8 zVwRK7{_Mf`2rL>fAi}B|EAm@BbS`qFW`cJ@b8NhPpNcTm6M!aGI*>WwBM8701+HK1 zOw<~(+DS!BfDr}YC56MOIYlM5d+Wr* z9P-Z7KRyf2DZ9?*bmSXC2yd6GPTiGlsy#|F!)Z~V|7)^#BxaM(+3NLqrY{RK!C z%~Ge#kP3LyQ*iHYFvuS8YKWkM;1j9p4(loDW~cQ+&aJ}O@8`*PE$+u~P$6feBzmHWNhjXFZGOkAJhkO;d!aRXkC`e8EIPd=30eZj1GplyPq+Ww77{)A2B*S-0s6 z@N#zwgs^G>v&RO507OTG0=YYzjy5B1FITaoa}*p$FzruN6*T9KN{9pQTZW31_d6f@ z$y%Z!1e81{Q&RUN$-nQH9!>AZ6qF%>D5@nlXjO*_{U0KaaKwCNd<6kXGHd|HSjPZuZf;!x21E3^a>RP!mJ+$U>FxMGiD0!f8?e zZRx5^4o+clzN%Q>(l0GWXa2s%+Ofn(1PEqx8TtG>|M8J`9+B?td<)%l`JjfCL#5;0 zzf2m{r@*vnrQxPJUK6i|^}6RLuW?g`MpPv4P3@$1>0=$ zdR$ywru~+yb;1I^35qg>o!$$dl+7z7_3pEPY=EmXudmJvR>ankx(B}w#w?EILwA7* z0ieFP^s!U@*ze|1(bvt!VtFdvabMYJ_1&ISTxMGg=D>E_*O1rI3s+2+)3hGgI+zs5 zj1Oesfg@tkq{K9YcT%_~I?!k+(P&ViH~ZFg^O}2PRI;B)W1K${Wn+&NG+OlWIVD(= z%x-@UtPU7DY}EvC^nwFw0xVBo?6#XNhB8E?m;`Vid~B@8ok2i#L zHfcO|;2|$rkFzXT@u2zrjJUAk&t&+>9(F1%Ho{adgtAB8p=cGRC(EtN1msjJ=*in{ zWLJ1A&pEk|1}ZtYe!T=2ecOAoa{{(0fJziOYtDmA4q6zDO(fu@3Hg}&s&B7{$BDmIVVeX`e#*VVc#U}G1FJc?ImAF={~ z&hYb1@9l4KncQv+9~_019EIf&XQ%90U62^M>8jHOnOb z=$&)a)II}CcS+o|!*`)B#!8(>(X`p0FIBpqt*~n5Nj%?ZzEGkNk+xMX__!|C(Sl9$ zY^mxE4onOliy6BuX|cD!Cr499A6n}KjN_lelu35lDd9mBu>&NGQkkYta5)5}0;NS+ z&zBRDj@6vLjWkxUGk&+I0-dN<&ozcS`5&QxX;TiHv7wo@N&S}YbL2b~iq9nVsv zFrZacd0QJW%MlNmjwT>QhWJw}_|N_Y>y&rZ((#>P*~VseX2^^w1Ax{^Do$GOd3i$5}AHyodcnf^UUlduWm+W(3`mTUbcVsJfoSH`SWbr z98fwxd|9V$*n4&5QJ63*&=H?lLJ8**=jt7QX#FqbwMQ{_%W=!mhf@YJ53qmQ`NANP zW)EMHyK!n9$AT&fE2X^S-rGH?7gGK>osqGG-Q4iGhtij)N~IK~VTW~VDo>iE_$es~ zm(fM}Wj>EH+>l+f+zmi?=r+IG7ZE1l_D?>0*|(Ny{`sl*iT!0bOIC~JXE50^fuda- zS_4Q6b*Il>Y?uoG_Wo)ZU&*V)RNd$Ooj%~C2IR~9-H+07JNTbamtj_OA&QsWq*m@4 zX&UdW5~mZ;AiEKNguzIw)nPm6ul^_=q3Vm)f1u^*)mGv{PX%D(v{_)$ZxSBN_Js!x zEwDx)tiyUf$VeV)KhQJ-YkWglrid5OQD)QkkFSJZw1#1PhC;t`&`AyJi6VLbV zASLA^zM|~w@!{CkgAqxnZY!7){MXQ9`L!D2%`0o=P=)+n3?n5)v4%FP6FK_Uq~PQ! zuzL}atR=M44jWg_k4Y5_nMyf(ShBj3fTAENy~n7s{m5#Bu&+Cm63X3rEQdMinu)xDR-PQ4?Qrbi8y#p|D03 z1@qD9R~`5*iU`S5lvp*eXHNlC1PaAP07Z!?IyO{ zzcR|#zL#^mO1`yt$p0Dfr->z`QZ^tDQ%A=REbiquJafgSpZ$rUItbrYSf{NxT2JH* zg8Y|r^eCmAsz8wxz|X$+q|oYXU^1bLB8fiRmM?2hdbgU?8u9;;yZ?Y`l=C$rZ|=LH z12)FJ!DuLp5xgaIb(uN8B##VH4bG5?V+!h=b$hIHES>qy???6EJfO8HbsB?|v zjePu!rO(~(^70E=7^7zkpR~*yUm`Rw=aYXlHe{yi%f-?Hna2CKy08D-dDCY{n`Y%> z*&x+L^zFEfTe@fWZVHYNVdPK<_eWkM&|;W~C%5=O*%3kpjh*mTM*^C%vGt4pcTcq@ zM;yONL3}9cae?UcubsOy&?OVSVuR}RCq3-I^*q7Hpefietg%6={P?rjsibv-m!Uk{ zOZ=YGe!$j%D zs}{F`;RI@lR4E2Id0c1qe+SqOdPe6CSm>sug>?ugoQobrTO1LHu=VFROCYRQ5!->K zxsL7wW0XUCgTJRq7({bV=A-t#X+=XZ!wPHPb>8Q>R&anm8YsY6D6f>Gm;%mGTjTa$N*w?q0h(vhYA3V(psd)rBsWKE$hspr zSiEYWXl3T4`YT1}UBlI6STFaN-0-n=I_Fr>Ijv1t$R3iRYjp63= zaghm|#}2$ford6|XCjI4koFthVp8fVEetg zkZ_NIjYS&}mWLK#@qSMe`CE}0fZGtFVqMT`2x)$4Zr}(`V@H^tJ^47G)}PXBJGA&) zT_Dod&vd^4LO7Te1WOc*YF6xNgyQ2!*AnoWRb5XnNWF-3AzhRt$_FEJ`8{!gt^FVv zY%EIZJjw$*(a`ehyNtDO3bT>V%PgULbRXIY@QSvmQX1OIR>KnN|5ZDm%@LOZn*LH1 zdvgmd+j9fIbFJsU-ieZs+Vc^!z|d%v7?FF-H?0hn-OnEVWVX%A2q!B{{Kvu*^AY90 zhf2>&OWnjh00Usd?5F#t010m5n^)v>#hCq@=w6AZn$xaQ@H@TsZ+i<;9zWlnkvT{L zSDd3(_l1W4y0A3G=zlc{93DgnlbbJzbNZ1bdfu|i zOjaB4si;(ozfIS0THx{I)zed$oJRa2<>YB#oZ@5EAPfDS=uspV#nkCrv4=MfliG57 zd1HkeSJ*pKWlQ8K+QGvotKlCVUk_0{`&4vSs-YU!&>kfbc)U84XJ3(cd%A$3AAhxU z&+L?uHzy+!*M2}p#$bnf~n2FvwUAY_}6L?uOlDtFrAV|5r!Sx@xT8w!EkCl zX+;A|hm^ZDm^3|KR5Z)MFW>!8g9dMOw{jb`30N2>thh2TO{Ipd6 z@YT=4(Da9mD!|q2vhBAZVHM&XG*6p!l88s+(D23b*Q;d|5$snjggR=_c9N#iy8CGIv;>Y9injh*dBuf$GTc!94_9 z0LOK!GiHT=MW+>10AI*g2*d$@s*ve+kgD4ta5=*g0t@A+&pU3bpigW(f zeR-*xa1C?#P<^S5KXvx%ouwJG7>!5_V{G4t4{DhMKY`E-6aRpt6VTklgC|sQbcFce zGJi+~>HM!CXp{wt5bggNlaoV|8O{~^r?DFYma;qKALp5wZo%T}&6S3ee`{%RI^!lo zHj3=ev}POOtBgAkj5_F`PR#%WZ*s@My7`f?C!glk>H@3s8pBaqhcLPkQk*S?HVH?) zl_tuky8ZGPk>C~($?Q{-F=G~|NTF%Kv!*Y6T8z_&qr=1qHAgTZ>a|J!gmC~a0W>qj^*HGcvKKI8H6wC{%5 z+YBolB`%B)oc#sNh5e1jE#%%efI`Kbx+uS`y~NuRmK@w+ulA*cE>v)2>ubY<@8)^` zRSv3%wEPjWNtr)m3#`jIf=sf}THG^W!sJ9_wi$6TA{jm;dG}=8U}|4~?^Aq6Q*}^( zmT}Fi#m_24x{rUVpRH}U?xvuX6m$%*vj-@_f80$`!mP-{Z?VvcBTG=m1VKA(+?h=y zjdbfgK_!}*v!gYe_>qO9dikSRm|M-x>Y7kkcHepnWhUsa(_rLlxv0nC2k^nLXp-xQ zs2HTls515EFLuvO<6cpaBEMBA>zc?b51f86HpC$eS3%IpzGux(R*%c@KCRRyE^huOVXvqD7rrjXm6bjlm6hjaOla5n zD#OQ9AS=a|^(P$fJ)Pv84ZC0L>*SMHAvR!-%4~hg2_osY=IEupun?Xn#3K9xDf)Uf zJ}2zZtvL--307}bk<5rNmlnK-()WK{jNFP9jBL@BVgoX_Wr~N@GDlZf>z=gsm9eeS zOfzOV9H+3mWYM9D%HXVRCnQ=>{Jxc&M6#(Ug98P;#NR%-Zp}G3YhV;&5H4#v(~N^3 zkZ$tWzWH#<;@JY!18&@m_4uP0WUK8FLDYE-`8ZN58&pUiFo}LA(o&z1+B#MGdRu%zjh;+42N!MAXSn|!Ecd^I4i9U+ zu3Ys)kloXF`wl~atAhr&<40yd4_#}wYK=^L$;KPU@yFdVSo!7cyL0TkV8`xJ<6(wI z2AL@E6p!0FZ*QHvx1RI@6+vNTNdqm2*g~@y0qe76cQVX{I|87iLaq zB$OM<%sRE4Y%vFh_~s$6n&bl5&|^=?immp%%pa=j?Uy7La^B7XN!O z!0AP%Z1+l!20(6p>3hX2ij4`$LPHU zX*`DfB+a;IlOshkapyC$Cb22QI45hLH>uAaW8R4&hP$LzJFBW8doy> zKvX9l(yHn`BQZVM>sI1#aXAH8_kPQER8**N$tgyDxUcUvs7yequ&UR)#w7>dB0=P& z6iqp_n@etY;KmRGe#)Ow0QY*XVTKEy|U<5w?DPy2$x z;FOKV%D+Q^hB74{F0OYlKoTvG8Ozla6W=8F5xEa|C6F`CNj$rmIy1r#c4ZVc6iZ`E1*^NV#W*LF4~5esHNlj?)73iPIznF ze$mpc)ZE$95?dFqO>$xub3ieu!HXZB5)w_(e%w2URTJAYvTG!+QbDU;Q218S z4v)ggE-E>$)P7E%_vcUY&xq5Ls;0_8;|cTB=`5&0Ah(3ruvNLip=eds39%k4yi>K=nyl9%AGqc&IH| z(C88${lDC)H{dnpN8`1|y~XcZo~qU55+s)9J`qbDlm(_n z3Len*J(G$%%mc~1mG2u*t;HWcA01@p7j0-UGgl(KSYgcCU8R+E>+CAZ4^(7EB!Gxp zlIEbp&)f|*I#pVkZ4UU9$+}bFObLf_?UlD0qzg+$Ygvfnw7GyjkHXTjF9b$~e}{#i z-Ih2=XFei1WFR2B#S)y)fTxGmb++@&n^vs(Lbzd9%b{VKStSMVpjJEuw_hH!1pXk@;D@!m(_x zmG46HKM8vE;35~8gRz(e@>26Ac^*1X?FRg2$ec$3h!PUWIJ!(<%{B$~1AVUAP5@aL zka1WU2h&C|qSGaue?K$zp+h#h^=LYcY&4x)??vDTSTtG19@KX5i9XnEH6*eXAi|7n zO@Wehg*-}e`|EwLV%uzK2pd_~Ske&D7*-3_Ir~Mxq7_%^L)N^)aMbwhKsPdYgEbI~ zGEV8yyRN%C3A*ihoW$njPi`WgFI-Jf2nP#2YJU00&lSj5iCJV3)xg<8=y8ZQ!_iXy znx}607j2w6t86P3-DX;x+*7V)OnT`T?;34!MUxUui4QshY>gLuwDl&#lh&U+7%^JB zifNR4<2IXvWC^TnvLTiEIp4SgeF@_aU5MTx_H0v9Bw0p)9Mf1;?i@P-bp7tgSCe`X zXcXGn)$1^ar zMT(CxDz=9nF-5fcSUes#ojcvlgUNx3Vc=bR!kuipx3!@SifLmcJ6ZV@4axOHt9LKf zU#F{RTCi%E&>$60-~_%O?@iVGosUQ3dD<(2gITcYR~}li!*}Reh{@F~I8}A$rh-Hr zWz3341Dv7wIS6LXR~Q)h@rsGEz6gWaNdOB8bHnZyB9;hOtP<&*s^AiGO{}vA6Tsr*VBF?zyb%`(S)*&P<~pOdc~bBz^a-6!Sf?VFT3iGSOhB zO%TgQj!c?Ny?Tzdqs&P*AzO7%a5ZXJ+MEbsdk;q>Q5Lci zvshDNR^sl|^ozpWeIWRX@Zi@MSiOKS8#VI?dgb!A7lk<>#c$xgMc_mkN+LA^(h?bQ zk>OrtHb}{!94=@SMKEVV&x0N&|E)oH@>Aaf)tq3Ar9A0T`Ji- z`5&;Zv>msqlPicQ@4+3Dg0T!cRC67LIfc{2;Xy+G)f_<*G#aixkw=MI<;wzZK5K#t z-IF0{84nn~f-oK#-HGQU82HqxX49p!53lK{k_k2nh_4}_^-L+44(6~cbeh}EOQ=_C ztF+EcD|5`ppE%h}y-**^z0j49mP(?$|Bz8Xrg$Lc%B=Uwp0vII`V#HmxaIrJAxfVGm{y+1 zn~<3?LUqm`>tIgKxcj*|8?s>_-TW{(IUB+=OYUk3E4@T)`#v8Yb^HEgmm7_HKt=`V zXB@7%lHtrfU3J7m3FUSvHV7VPKh<#u9S1O($-Rx62f3=b#pioHJ}Df}0!_Lv>>WPUp4l59r{K6VJ)2QA-+y3}}qg)=dp z|9}*qcTxJqFB*-!f!y0O%Ag2acIpV(O-bI-HZR7b@c*@%+jJh6uI}_r>;QEL#ap1- zIyZxY6+5zKZu791_I!S1*H$rqi;8}h@4nB!G-BD{l>!T28pp9Q1d~BK?BrhC-OQ%u zxHekRXRX$@H}56AwPQ%+I>V^J8rB$LZjLP=cP6Px3le%iFz%G(xG;5 zT1FVH40Q~n46RFYle69MDM)j&-P>QM*DNi3NdjUaGeHGZw+|C@{lQ)A*xhKegCk53 zqYjSk=mbLnk%!tkKVlu827U=ierxZ45muHv7lg|vyTB@MGX~1@r@(DbMDi=S0f>4p zU(+&5V%$5J3s^+}ZGfKb&I|AxviZ`#H$#JGt43xXM!y}d12>vM8BCs=kgM0{ zm_Y72|EGSXlb^CtB4M+MtRN>N51oQcKK{P;DP|ehzX|L|T59@NU%D>v^r(id@AVma zwRVW?5$@(!f2v%;W~8w-l;v{p)y&m4 z=*@d`Hr$Ru^K+^#RUEF7+_(PzbsZ4a3wyP(ypEe=d{uNng}Pt+BIsLaAQlUPY_3d5 z%2RErCu7IX%^WHgLAyneSa8Rbr}wzT^CNW|AFaO1~e7_h-&`4vni+>-9^ z(n_;~Ml^rvtFHRp)gw-sXMc!3EwhXXGvKqhY|}7E78F%zWt0k|`^wU`m&x8*%u0wZ zITT$vc(sw)+1!r=3wr`qU@@>@1u|F2X)wh;ewUWOFzh zM?q(mie6Ua)ygWEQr#&wCHqSgpY3)!KKy(k)dR(N6_(UP?K|uMQ-9f3z$~?CB&U)uXjTtaOb_!5CXsnp<46_f@@=#8sDQa3t zOMIo|j=7fwFb`mTUFRLZ56jwQeVC98w^fi$G1}6mX_Gem`OH3Tu*QTyH$&Q zPL>ILv0Wsmk?4j+xkH_b`W~!)Sc%OMa85md9LZeS;OJ^t0;j%AU@dwpM;@84Tj zKkLWK-(~3Hqs-DXiaQr!n9h_0Jji}e=rJvm`$G%$m~@Z zu_{7g4>*cUvR>JE!qAP|;R)NM<>H7gg0+qB@n1U#)≍z;0aA6_nsGg3kOEYVa(aH>d*WrF8^Ua>t5LpJfdJZG=QiX9O_ zTA(nMYJi9z5vzi}*c*GA?$G|{+>*W!$1rVd#X_#)EwzDChplv5O0ns?(4?}rOvGk) z1E17aD;W43g~!&0q_sr7fOFrSH4U^=dZJ7%;)+%Obb|r7?Ke>wgJ!rE^S{ycVpAB^ zP4~nx9=1Mp3|N|FyhqR(n#Pe^?B>6VDvM(Mf!h_Dmin)u-UGrtzB*4VlVtP6XFIIn zpVJ;q#P=$Kk3~dJJJSBVsU;#IITz9Vw5-MaERQ)R!i#>*1|~Q-z2C%?G&Le+G;smV zG@bQddU3dh&4Ne}Vg_NghU#kC{TB{n-E4G7pUl+lCfweFWVab#5A3lm?sFDf{Q?S+ zR(YL|l(*Vu-O}x%)|=}q?CAOHF4EW%(=5e4dkpm4pGAyVD09+sd{;2JCI)O$23X@9 zH($oS*w(aY>DIhLyV{^ zjnG-!Uy?OEUC`%LX*Rej2EpJWmPwgY)w%;u}JEm@yqM z-XBe8{&iVUc!ZB(PJ_I_%ll3En~15T5M~ow

-)?peBvIDMFbT|WLI-Wq2y@o+&u z{=Dn1M9r!I;x!U6EO_h(;Mf)s-*=5cR%!v<)#h6!>sP>$rb%K)M3dgIH!>j<8V!WE zI-XpX-hdtl*ug~6!`MlfHo7{~d3FLQp7JNHQ@vtclYCBF$H_k8>Rk=H^#?O~Wsb|# zRIgUjL|TmdM>0;s!g|_)4A=5Q1Y0UA!-UtFO#!9oO})xQr%;R<*!8m*U#3#RQr`T0 zP1(@->XkI#X?^H{xi;si;_EXJwzu5wXR(hhCl`f?`sObQ&8}@aOE99a&xRBHLVr5b z&?zf}KJszwgJ&(ftzBmCJygCwF|D^2iK6X%mh#EOW{1pfgSzeMtXdBX6W~`*2J3&1 zX$;_%190J|igp_JX@06h&_DpxGjrTxqjUU@*Ys$|t=VUPvBB{~v||i|8rwZaQIQoy zasmbO2KKGq_5LUr-+cpJlG!>Huig?7N0J>RHmf+=5{cwoZdzzo>H&igG9psrinaGg`3}Q5n`>6WY-~I>hE`{Q`_n zPA7lS+Bov}|o*K%ARC%6BDl6hZ6O+qHtcNuM+P!HT zNQ237d`aD`RO*s7z$n+L38k_cd z9auutcK(){;9fk!%8@S0ZEE8KWdAPSn2qemU-_KhsT4lAP%oSCG>6IY$7MH2dj4_R z`f_Cm>Qut}zX?T!rId9vSo>(m&{TNH*7;zpKAV>z5mxPAH8$~yKUVw)aE z$NR@4uZjvAB1DuL;A5|V(8n@zCP?ICEe&cB`cHu&YhrFcJl5A)VQ8jA2B`0<(4aPD zsFV;6gR8QITNqCx-83B@mJa}iGQ(dSB^ETwaTRU5$H!_4I(^vyuDd+i2RK>JQLuiz8${k#KWin%5i(LDQwJ ze_4@v6(^JA+q_nedv}I&#ADTHvkzco?8Md>{hQ5?-k$u9{oeKqf$pBV*`)63``*6HcuHzxG!+C#X(dfPb3`g01^`adsy(V24PGM6)r`y6Td z2iE{`&^9n1vS^%{V}*3w%{1ua4hL_g(c^I`D0N9oRcRTO_#g>HKE@HS@b6NTP8l_0cdPoVvZF)sSf&#| z=$k4A+=^92BNPF@un5IemD6CvP&FP$nw`V%uykm>TzKvtv;P@uML0^mJXKye?J<0G zfwgbMEVt>JHHHr)U6*ZP$9U}WCRmJHj7fK7gGeIn=*egR>-~!@Wpy3GM{-8U2Hlbo zpuLL&+qM&X-wWFYhzroNsHYI(?X;?F<3vDc42b z7Vmz5R(U?y93I@W??z6Z)T-t{-$K0q0%6cQb0y6AHT)jHi4bkQTJk;yZH2$j;x`}X zeMx(%)4pcSy@X<)Ly05JJ$r9^tuXKATFVwHgn zOz9+AO9zLmC`fPY;xuXdc3eh2aT=#XT~uZZQ6bAl{E9RCi*69cv4y**0zTWVao>1O zf8r3@uyU9AFU=?On)|$^{=nG(nwT$Su0#N9lQ`s;c<_{_cwvFig_QgT4&tP^K&aN=l0jy-aTJsnB?&^1-~}QW-Iga zINrLx6lu&F?Sii6V}JaXU1k98y~kr0fi9BX!DVI5aWMj$6S2F_SA#+v34jW~N24K% zE=1ZKJ2&jU)vL3dulK+tr)I2;Q2!zz5h$;uKti^;{r?PXGnD^Xsg{87FHbJuqyl9> zG}d*t5g+dg&UHVpz@8=n9r`YHsoZ5&uvjvu%Ge=@v-m(r##7D2{A?`tuOeLg9vSGG z+v-g^*aiEmKc3XPTg|m5I9Pl;gm?bN%P;-vjNuN;aEC9ew~Q(^YtXpO7uFDuz5R$> z`P9XYPsTuEG{EUBz%)Ke)b8I;t*c9stTQS4J05tmq`s?hs|XqQZ$?=7rtuowFIwHC zH=aL*GVu2nkjG~?bg}QfUpja+SUkw#co|ke=BSA22y@h%m8=ZN#&XoFB~q_&a(ZI- z(@f>z*T4JquE92iWaVjpyvH~`{`!jPSn?(gr2y0M{8rh6ipYAdHB?}-J15>Y8=&1mmQckdVnWFcjOflLU{S$(nlFwvFE{Ip0PxN2XoHc)m%|GK>`nzElH8_lskQD(^2)QZ)b@Kc(%{u*<4zZU1RlXQ zvhtPd%n=7q)alQrM=?^df(+L3rQY;D)>tS3rnrsgy;84Q&u|9|t}9f!Bq6p*b_SM~kgn7PjZjGSI!_`*H5 zxkHj#H(%PXetrVUP~laUWNhACJRd_GyiUmr`~6YONd)FV#j_uSOjPG;86V;UJ$pV@ zUkv8yk%Y@g!f5OiRV0bNNQj|gqSnu3Kux`ShqTKOON9O|R=xt|JxV@fF5Uw8{XAj& z{Xj#U=vxFY|3&5o-;)>|l#0AbWZIX*@nI_Tpg7_f|B=!-RmS~wiu*i#zK&}QQ48*! z*22r7);du=m#UbNg&>vlAWS(4l3zTTnnC@p(V@yx$_;+`x(A=vR8He*ga!{54Y2yj z`z-=n0kE`nzf)!95ik#~THaYMjRawHVsNWyB>*n~2GYNeYTX}LaQ0v$rD4?b*LlE0 z+|S;g*u((gd~hGceauOt?4;&NF(QrhHn;mts=XwpOY*GXb)MK`+_h3FP(Y=fuPvimT3vouGs_^bjQE)VnD&!VY!LW zgTS^uf00q)g5r=Ynn2rB$UO$IyAMCp_jB65IT!bRy@v2)qz9Z=3zW^%J(`$deFJ}1 zS*7T<0Qh7r$(V4i<7wsyN8a&355|_WgSu?Ev>On~*Zv_?aYzQ*!z#zuK-T9u&F#vlIlXU#VW* zEvU>gtBQCw>kZggZ!!BFQX<5kEPYlX$AwiD@w3Rf^>Te!oC;bM6te%`2G-ydg}zBW zP6SN}DKL!NIU>CFqjGd)-!pok*ius&UMgfqeTK+LIF=0Tz%ljYshKxv?lNA#)Z)|J~pj-hLI`!b@i1Fq~2_?J* zoHv|arvMg!@tB6jm}rmyTHLRl@iJ@n> z;d5pojcIyyZSxA8+GXnox#Hj$l4pzu4u{s?_R0L3;xeL^c+ktkSJUnP&W?Y6{hGo@ zh+lsB5QOG=jRs`6l8C|ak4S|F!16Kq_rj81l~NoJ!rO-?89uH z#{;y5+QryrGd7Iw=5PBA;c)n2*v0-rGeFpSdwYwBi0B&{cAqe-&sA4f>wxSJx1W#z zoKl<Nq5OANE;4NGX;NDLh!b%kARZXkFsMY>XBA;t!e z)y-=7&qePt_4zXV?W9jU0}^-~tO7x!(EihM*LKCI+tgM%U2x~e=CDu|OLN#NVUH$4yQ7h_!6r8k)%l-S~PTYSPwe8L7w9) zM!+O$@I0*{;dkLb2oA>Zy@JwQPrtgJ26xr%-~OmQbgM>QMG@{nOh5EqijU<_=cd>W zDO#4(OW6K zY(O0Gl=^Vvf6Lxrj(+JC>R15l(^I>b())5*RkPZDtlsv+kPPKB`r&W!*~x}F%^pAA z;pJNzM1{xY0QCR+%Sg|9L$9Sy1>mMr%mRza&{Zxa7{Frxbj!&(5-8n=)v9UHpDS}t} zWexL3TXH?X2_wPW8pVZ2MHRNa&8QyjT)RxBN92K7Boqn-Q567wqRslieY3hCHYT?z zq~rxsoec_8?f9`d5*b#e)^k57DNkOk_U>+H+8WWnYcWA>Jnl5X~%u<2p2dz8SV!N~TJ;uDw?iv8zJ|vVS{=$%;I9OK19e&(s zg$(4)9+GVQF-M$E-^y&+kq&Y<&~B%O7{RN3_k3&YdAhq5FZF;5bQA67yJQyS|y}=g>YiMr_|up`Wydj@V^3YgGbI8X|9E{@8K6(C6U|?Qv8OOFjnj zLep%zrd`ClHLS{RFT8Q$O%IRM2W>m6y;{d{lfR2hhFfvv=}}FZo(7yxTwYyaJe%FS zm(qvok@A`tJ8vtz#wNKVLKqtQtqrinpT6 z_w+K+qB|AywT3#=(O-*DwD0PO;CN=Ox6R7z5wdyWEzC}SeyKZgWOK#c!Fu0`k+#hB zyB%kK?@cFhZNSm~rbzwm7l==r<3==H-jCle0sp+SSwtoO?Z3nB+N(MH2*kZ|x(+R- zDf1CTZepbD=9;ODtY6|wNwo4)5Da&wjUrEa8!E3t@--c$(299;NN9}_29lV~xEoA};9E4Qs|W&H8N?5-6@lUo5{QnaBGmVxI9k|?i0qcR zg;L(TcGKqV9-MRr%@rCb?1^!c3pQs$=R;wJKziwNO|!2M;Pw^p3#2KWjBTRpH2UmB z3@8Bef;)jB{N=86$da@_c*y@_tCD-W(Q2Ix65GsOj@&A8ff+;ZuK*kbAMqJC1)je5 zeuC;*EX0IUOg&UU=u8QNWM{zyj7*SM1C1rv&dz(*WXl}}L9)`pKl59Yp&{~z|L(&t z`$2n@m-5N?kHVR^`Cedc!+=-ar4E}aJ0XsN;!QR@G^-E9hTzjNLlWO*2|yFGR^*s# zm_d?h{{e_#p+AS;2K>#Fw8$Z>S?w^OS(5{X|jNKI~prGI(f%U0V z0BFs-9p5GzMr#6p3<u2>CPRihosyVv;aaeo&vFnyfE=V({LPW%5RD%^MaqL*Z^u_QEFKZqoZd6uV)ls6L^=}UuIzt*0I2I3? zrDFoHF_K_HQHF%&0QQ+4_Lgpby&_Y}asa?)zJsyMh&KfGsf1b85e#a;BV=5#z_i-d zNrm2BNPRwQv9e5t7e^*lESJqPxZL^FqqeCbQgzUyes}Q^av;>E);=5S$^i%++%VR2@NXDn}d0diN&f~4x$vP%QC4qpNZav;lAAfq=HNZ1Qhz3b-kN7 zAxmujaL()=)_ovz0v(=wXt6*ln2Nhj=hY?Y|vgvXu!)_|U{^GDSFVI%Ht&G$s@C zH)@ojyOG8LhL%#0mZ1j_Bt<|#O6dk^8Ke;yKtehO2?;4_kS^&EX_W3pK=f|E{@(X~ z?)6VRXU^IC?EUN~*0Yu__}h@x%amoVZrP}PO8>uve-D829w!}0kwzZvdY(2=%K2aQ z`oG(!l$LBIe(3`!vTg&H`v(DfY}3cmV)4fLr^7Tj@phlRn2w>1`79QE#L=YQTw(i) zz|y}-9jHQ-+1<`K-KX@Ky6M&sL|7~~-`}a?yG*1{n~+p7Q4hs&&4Tg1Audp-^*fE}C;4XN1#+`)$AqAw!4$BGSXzz9>Zf=hQuUJT z3;)4So6WvqexVPg%#f`O3HJbb%C^nt3pW1w*@0O*J#Q4qJ4Sp$W1j;?13p+NHcWaj z7NF~$UxEC_-vPH$qk85|zURY(#jvYc-n0r|0^uj%d1QdN#?|79Z@_; z_jFNs+LIpV*M%hiWQfOgsV|ebn3_Qddyt#Y3XPf1??iS1?_JCifNlQHlo#6hk+MkR3qVpFtvawtm#W0I!OCFL=Ezl1np;{9$dF#! z=uQ1v9j&}q6=esAuAjm0$_zyR|Ff(RkY#N&w_BpTYvlXB=u{L)TT2ustg-+$xpWo@ z4*m+?t)2ij0J^5s6FDX8<7H%#&N7CWj`@AKc2cBwR?JrybFiYGq}Hne^h6XfF15~{ z%!$jX0E)%+{T?5A6v&O^Dq3%*8!@4DU5}zmY#3|A2OnRzdJvH@gJd#4M+IkLY4;no zjU^NL-c9^_yg-KRJmBWd;22 zaiU1L&ig)ZEO1f|{(g17h{aptU_96Q1>0a!QtFy7ygrrKSiD|v|Kq%G^l?FtMu{BtaXmF(rShA8s7H&m-*C&c)W&e; z%as9UgY3iI@5(vx=r?RYWL2nVxce0-tN~$o3^Z2wPr$RVaFdUL2$>VZLiHJ|yiXP& zWnta_E&M`o!L%uhIfyz7p6cG25}TE~HL?@0pZO-3!KB(rR;;2lFRc0zHU_~k7t+dN z-(lz0?k=O})%UX6of!z?#-aJd4yl}{Ll~SluDKY$P8uRnXQn8L!6OtVzBd_a3_LGY z2~jP6a0IhKDV&y!y`D(mF&NhmXUV82>a4^TXV%&u`krfCWQ$|QD8klo6w!M!>{&BD zokYH&Absf>GT%D1djqBAH?`Zl6=lcXAz7411W`58R@XL(n|+^;xpmGS&UJ3K(ApnL zd^lh~4K$~(9(+16uJmQLpp+i$2*pUqp<6k3T%o)g9k-;I)7c1u5yP0oUEc7cHf^X! zzIu2m`l`e3%0pRrF)bdBY zJNBCL*07BFJ`eFBFpKlfbPZ3!e{CCF>W9qLnRy#2{pt6M=LtQW2dZUybPL|kAfA*) zgYZSMU^G@jL87ePsGK?4{X3Jd|K+(QfyC5k7p8Z6^R`%#;mfluiAmKvX`Y96L2P<- z#mq7Up%l7M9%5{mQMy1=l~nCVnSzf23%HsoL5h@!It^xRbC2-#a}CUXu{4)!O+lZ? zG|#pLw)s|72ANKH2NJ8d2rKevdJO)2Q8TNr$PdSMlAlcgTt45Oyv}92_?~>L8#b0x0^y(y z+Ie#4oF6Lhe(gn;%ZOvYwW+dk9-ycg<;1EW3l*h=yOBhwkOv=cw!Wik^z=B0WS>-i zv=^{7-+CDeGn|(UHgG?VR)9AieKu0oCR&ip!zd1?Kr~Q8ZL{k?I$V}?s&x6-IxRhq zg@A9@T$O1+Y^#rl&4#pcSi9*Ylhs;i^}&V-p(_WI=NjcF$;k#-Pyi6hzlT&_EzJizWN#DXcE_gqtpH}^;Y((1gXuHOVpk04m=iNA z6bKTx6Ojf8NwA}iuY=wio)bb;)l4m+&<4ChNX*><(|lu<4_}?2L5i=*o2x?I@i7w$ z_*Tv{^}i9Jxz52(UXaI4P^LqOyDZaLLxoK?7DoH$zcf{z+?y03b%Ma6#!qjddlsuY(e>o5s?WuHmx?Ieir)xtL+Lz8;Q` zBYS=TE>3|BLjWcR?l<^#&a54KfNG=b_(O~syiazUeroS}U4d`%{k{7yO%YN{jVlEy-Try$;}r{36%$YLURID`)?h>Zcy=1mdsg7M2()+bVy`s}M}}bG#a3dBuZooJ{N(Ix95@mwWqhxMdTlWIYxu_Abf$^TYMBm0PZX@iNGYq zbOEx?(k2#qh3X&QYvca+f6B~=AM>RjZuf3cCfzAir=Idsr~+EeYlGjzvYg)u$c5+4 z4}6uNsR-B1f0Gb9zb)3uAE!e!SIyfPrj-At>T7X#CNReYmt}ek=FpYTNXt9Ot}1DU zCSwfcmUAxVAyj^WsDk4PXsDB+`0l{3a}e}SHF%uP`N5c2phS>|( zMu3)jV>nrzp-OoviRxSfk_Dn`6oRW6>O(@NU+hZvk_2~iZ}r*M_b2q z!o?ZTpi;Uq8opKWnBZU=|B!sgINoqE)GmL*fMqkQx!G^YT-8UT$`DUv_V-{Q@BiIk zR|2p5+YZX|cdNV{13C_`zmr%>LQNW#oY!w?5F!jF&9-bX0Y1*MRhGB-B8``$2_LW4 zX%h6O&+#>tnXdN2B2lTObVHvR<+xjllul-NAQW5f2Df#P)CpST)G3b&#%ix`$quKw zqExh$RR=|dCeMc{%$3^?dhcO;cNgO z2GE+wef99ccT`?jYvQZms&#?~ti>NL%7Wl22dSM+OyYOA3v#wL(=p)VvhF6TC)g&{ zq&A8;lrZ?9n4~Ng76Be9(+?~(k8G;{lAI~M5(!rYKE^mx+KN~nh3-A)?NWAa)$iCp z$G>EWw{uDJLnVn!+c!Hj?7uFD>@w6q9>eu!+wXO}dA_{IoQw;f^$Su_GhLXLAr8Xc zNQ7fX zeo*SuwoQk|P+~J%@6E^cN?Y;JRjfb`5K_Yq4ekTCUgVfai?ySw=p_P!uN zBzd;Lw~ZgG z;K5X~p8)bPvQjin?Q(le_50_wb`q}irl|%%r?|qu~?U&GirsQa}js%`HYX! zL|JJ*o{mr5N@$aOLe#%A*Bz?S>bdCHX}aNfph*{)IJk|r30pe+9NYdS-Dx^L##kuh zbNx;PAW(|;zNZ~eH4@nQgmK0(@eF6=JkGA3`cA00UFH+%ZXdjtvvoC*Tl`L3`%I~jmItT z#GG6Gw16O(L>q-E3Y&6edyh(?4lnt&JCSfig|pm34m@~rRHg-Kc~7juMRQ5=z!ZAs zQ#cMG5btVr9b{)TqeNI#m8=pvyJZ5`&}(lc@Yz=6e-HSKu*hGX0N6eos1{pV#|S$D!as#SEPHiJu7-oyNsedGp!`xfI){X@!9ULpuL>XYSBCgY^{<I^0;N zN-!8ZC)EJ#PTryF10R?v;&o*0@b$HMwpNC?V1=c7^a;e=Iy$oF&HMbpqXGB0EC#iI z;L$G8xzbxhC_t2$2@t7pZ5y_gMGZU{6<=J_BPLY_;MFgA+mJ9-c@fd1=&w;F=#AZt z7%FAeMx%Y^zX`HvvGNv#hToyrv{VK?KOtIwRtm2ss~j+Lchy2aB=W*d3gVqob1^gzi}OE zb>0YLyt7bqx?C=HF&?n2KCO-gDFO0{ofXF)w=z;qu;9Vl$d3;TAgYfIjSY2WrHfrw z^19b!f1hVjyq^XNA@49?hO^2BjmLc)Q1k8qQ5eeWSJgpoC?m$!z9pV}IEL|s%WHSB z#}(FGNNi_dO->YuUb^?fb0}OQ(hX)hBxc4%Pgg_d@DYq+jys+Qrp=Qb9N3Vpb@uCC zufXJT^IlLRYw=R>S}LD8V@f}e_4Kp2jRBu+Irs>UHue~cDn`J)y~dP^DMI+DB8*Jg zcgr3d&B#f5O-2C4B#0UW%9szw|E=wOVsyWFs(-{zGg2YF{?nR%3tGS`$NpOqWvkqQ zJFg_`S9gbFQgwb-W3PZANnfb#rj)m`i_>DV{(10hG0~g;v$)~s&7Rm9?&s03@p4s_ zWQYDpRIKycO|R*<4RP-y1oxlx4sYFh`w(+0zLvHm2GjCnrEDT~8>(tv0;o*QDa>GcC{3n!fIrwZ^8&)c+QAWVKlN9!J9jfr zL-I;%LobK{$wTx!G$YkM{GY?o2GdrJl(TSA5-jK}VauM@*UhvQj3D?L{TpzU2h-7$K|7wH@Dy3cd*5Npm?X<`ha^%N5HR(m@1@${wQ#!dF0lRxqBM(gc*donT1 zg!WJ=dCDj=q+Zm2i&hwGe3I^=W?dC9z`bL9{&T}-aoLT4DVJxcjnw9}`JBXP5Tk&Y z85ITMW`n6vmO*YiHmDqCxPwf`r~R3L-BqPqd^;9*dBK`UGWk~P@r|(Jqeo7>fsN!S z_lGpd^cgL&PRtEIyCp7rgiJPCHZbB+YwqH0+^Q)Z?iV8s zdaHYeYh$&>KzLxkPu+))E*&UWicWb@7a|)uhB7_OQ zt0-UW8@c|9)!eTnSaeGc<=-(rFw3e?>~DG zG|u+PK%LHaCBlD(obSpKPd(DJ6L#tA>aeC@H1}|{*urJ{PDB3Md2eI=2NmU#9}4KA zWrmA69S;^!2hsfA0jcU{D5w(x4Wj#5LC)vxeu_8zZmqpGrAG zv+h9`hZL0y*WN{Nwv?&(@8ZNvb!#9@;ix|C zj(Ew;L0e<>){C^8jhnSjxa?Xd z;0P4|^WHq6F&od0XO9{^MZWQap0H_>SB=;WsP=ZjqvYa=kHkA#CEHn=-ivboGh0*` z2=B%&aS*VYFjS@rw5KWCzD@G{KS_=Oc*g_>ZY92*_aEj$_0zG<>xU`NH2(M5I8i;r zY(?jOk5afhCpcPt8Wm-sXU^0AVp9hs@DLtG0H0^Hl#%FvVZj?@h%>XscViXj-i`kg zT;D?coF40tb>5#8DDS3ICUWlZ4M!dD<(=pNq74>K(V>C2qw0D9d*?+tA>ToA0bL zC^rIi)$#FTfa_}eeMR)&*XhKi{IyaaaQ!Lg;|wqn58l_s_LvJ@t~PqUl~-qxQx)|r z%gD$$K0XGb^A`t8mjAuo3_LhUmg%-^;IFlm1(lF4z?{21D+>k(#-ruL%yG+0-Vs~G zsM^a!US2*hFi>A#UquDaKjZi1$SaHg{lZ_JaA37AvjJGH#bb+7N~-r2$RVzRa&~Q0 z3BxG2m{=Vejc#rIeYW$}tkwS#pq+5C`TpBB#AJxSD7EXQKQ~LSZq8_*&Ukrwbu7IF z;IxgA?2WaxcC&X(OyantKSMrmpmTCCZ1i+>Cl(ifgOSVr`E}Cvd#7#LJ@h}*xB?;Y z+{-~+{fLuYt{@O92LC>N`UHL{iHkVd68H!MQD(4(N>%7kr~LJbB}Ip{29kQvSom5{W&mp!t)$A1H-Fp zyV(QCJ^1?zaTnrS0P6Mgbc-ryb42z}h&_YWSp9-b8esBxb=~53QUA%IyN<^7jasR3 z+e(Kst(@7PUmLQ>M~Tepe}t=cq@GbWmcDMga~S=T2rsrBQ;qFs(dPnUf4Ak6d&7V) z9)1O;G;nj&-01;@-x9k1*ZJGlsY7ws*gI52QYOLKM}(@5-x^y!eAtPT{f!ZstKOnI zfTmD;JNr{P6OT#!N$;Q0;9cKW;UiZUxZ^k17e28O%d+|6Dl$Mvkt9a*;`G%2@)$?7 zv-eN%fgh%Aj$gI7%g2a=Uxt6m!2o_wm(Ate?|3ES8tdB_ZVj3v%FD{inwl6>u{|lD zVrurV5eSBK3p+l38|VyeCkvAD6z?s1sX`a?*Z?arN0VWLgyydycz73j1BMZH8{^Mb zmxMAwcKP@3-+_X>L4|3+?_WOBbs2vbv2cITsVsnMf;)Pt5da~Xl$5l$xA%8;du4wM zXQlu8o9j#!fy)!=g54F%gK~3o8yFZcGB)o0rg9tXomf>|n24qW=5N;MVOz~RyIrqF zxvme886wl;<9CM>d;ZIn!Wl9%+MbPBhA-P9a?;~6UMKLr|8lS*72DeNHw?$J^`QJs z7+&yIQvo_gVN>;o<=e8hu~$EtUBCxx5cHY4hP zzf0f5*1d+Zs#=RRm`Td z%rDwjqKzE}6XV+FSeI;bUVBvWk0DRnv)#NY=&en}@gIfDF1Z{t-@6EPP~Y{Z#Ji$j z=K1i|b-wO2jMl8xj^9^MygoKyqO4co5bJY_AL(%?QHOCxqXp*Dab)1|_XeyM(aW?# zTS3e}gx*J=yY6luAh+CQ*xhLFJS9HB>|NQ?gn*;r@0xf9jR81WX8~VgYt#Tx78Dl3 zAY?xkZ`joL(7XrJ8km-1iJiITz-sBEoqzQNKL~rO=gqrnNh08nJBdWkOzDy{7ql7xR9IAb&Ym z2@(uSre7=Jw`Xn%Di9oCJ~D^Ejssfqjv)jdcq>z02OAtCvacqHZ=@e)4}LEz=DLSN z8NJcbw$n?0&udS{!DVF;`)Qn588QT~aXHmP*H{SEN`cTKfW-7|n@^>Y;3GIcL0mHY z<>UyIFev@)zeP|px8QNZlAzdUM)HZlHA_rF!@Wtuw8d4cPoZ%NC zT2Kg{ScZPPt*}N!1dT*|K!rxUO(0z$!=lc(m>79JY;FU=`5fuMfA>L3x3R97y9^%f zCEMzOdCmrIptA0`2rN4tbi;iqTN0KdU~j0${e<)(`hx0A^ISwA`m z!T7XPAXs^ar#IoD-q^4)5|*N(#}z-8XPTKwhchoGF4Ldi3BrE$U82DYe$+nPASzOu zwjVBs51e@HMTXxW(AIrGETTp9OteC#25{sEGs)Rw_Ag;gY@{_Qeo2#galneqY;)^$ z{(KL}uy0*s{lD)Mqb84}OrIM7dCSC@5{_{QB*-U76HvZ4){hMYF0zAQ`>i167%#Q;H-Gxz{!vF5^ z1@};D5~GW;V7u9I{~p#6i4;)+eK(EE8M}WMhl>SNgBhv;QCb-jobdT~11RZAUs->q zxbx&b*g&q}3SDTxMLgGp7aapGWda!~lO-)3BJoatJlE^t09E>Dr-N{JGo3J@tF>Pm z_ZlhgboxdUJcz}?vgJPZxmYuk$^N4{RENsGvVQ6OJO7$J-kGjy2g~I58N+cvo4?Y! z6r0)k`cnVyZte&z%DMRR1>U;_#ZULF<{00;%5hNoHYI4>=YJ#C_dLwOcS?|v)$F%s z3JJHAU-fVMz~Wa&1+lF5tWvGdN#e8ps$0Z96n{H!C@~8RE)8+GNu!x`kh`qRNE^C6 zNLNK&MR1HYKzWzZuR2wLagkQo${f5vY}G#VI~>mgArz3j^lM;7J^>?A(*6 z`~|6~k{|fDQ05C2&edMa5wXC9NiqmyM#d|5E1)TG5IA%QT@1wE$upG0Smx(lWI zzDe7YygkJ9~Hb4ee_KhJ%ul8ws~cV(j| zOwCYvCoU>ni)C!A$OQwqoEpB+%KPyT&F^1@Zq27q&wfm1jSbAj8XsL^v+nr&+gD!D z7?kquF!}tp_g2;Afe4YSyOn948u|03o$$0PxP|2Z=T4ww?JQ?>@NmZU4jrNsXBaC7D_WHS37+#wYGWp#Gal|2=!inFm`K`_Wj?bBz+a?ggrR=zsb0| z1;%aj_tIW_dR+VIq_`K=PEhC+SedV_WWc5S>H`t$6~sIIoX7utC9Fua_S^@uilrM> zJT$HIgO|;hUxJl~G7qGdemg!HYCIQf6Kne=9$ZUYTPDNqJbi|zW3BtLGx`yg55K}1 z`Se}l2Ez;2`7}>D#DrLz>;53N4-ql?03TgP4du76B#Kr;^Tow7%M$csBBF0q;;3+b zw3(J@yiA&|(Y4P{EEG&NlwieI>Uy!sB1TD>RR6+4bDWz^K*8V-8intQGGZZ!LWFd` zk5j95T4B3?uspNXbY$Z&eHZ%o^G*EfX%Ni+c?)bYIz+%<=Kg>k#>N;G54eTHG$h8( z$h6vK8d8bbNrixd2lAU$mgHZPVWXRRp0}Aact(VqX{e`Qk7tHIrxjRQ4SSu|I9W>! z&u_VIP$>Y#Cf;%%q!>$#n~z>JGK7i?cRV6U@#Rk6;bg;n9xsNDrQb#W#6|&n?SNi3 z7n7&(BQIZD@c#4|L4Nf&oFsfDU#>S633ty(G}RG<@8-lIF*P7@ZcZq zW5p_GF;+BF7>HiY49x*v9DLi`QuNeHOteWT&vVi;`#;|McLV7r4`7)%O7Xjate=uA02P2RLAa1w}8C{UHAc1WMup8neHTA6j#g2G^+PF!x z!qb3`=~9zNLXnSI+K*z=>m5w|7yLT`mPM`-1Kr@h@?vr8JF}hnfrQ^MlPbNEE|H2s zw##`;`j=7@3RKpUCP}|gU)-LIHvbn3zNC7L?NnlJZiW(9;(p;u92QC}?U;jc7`!EkLfs+7TK}4U5*}puAqomJ& z%btGu#mAZpcxZf5{?}J0V8)_39r-hZFL#e{|r0>T#Su+zs$I4DxT`%G@@))9Pi4X5# zs}w-Ncn!dKi56sne<0&|R{x?;FxhoR7udPua(Lz8!6o>uitxMA?zY_nQ^9Nr@)qOK zGs$pOy+6T#SM~`d0gTEWFN?hc18CeX+@p2Yyo^z~GXR4BjKA+kiguzz#79C{$1>9I zC&G0ePhdkUXS@|Lw`d5^FTVivdX3kD@4@FjNYJEnrWlj9HK>_}?F0GWC#BXw+3kGX zxR8D>@mrrKr0GzY7zh!3VTjJg0nn6&0|w>`P?K&vV|s|xn>vR)p-C~l>porjB9vsJ zOEHNj=DpzGumr7Z%y|c8>hMVZ(^DrLrMLHnNpL}#jMU{s(hrxFAylydTbXmLtn_uZoVF5x zgDL$e3LAEngy!OSmLezNkc3y>cQK0J->{GNEyxlEI%A+EaiLSKKi$48GvRU3W(RGa z3Tt=6w+oBp@EVCH;_M@)Q3|3D=+l&rQ1f+RO?CcHq`1O1>P2@sI}Zgm!~AwU>+N;8 zA{;nLQHU4}u#Iwv4uV{sC8UO&o-;RvD0Znr&-m>&xv1X5eN`a|Wrv-#&oT^g*VM5FAIK#Hp3%!{hLy5G>2YESek zCKZ>0*?x~3^PUF9Ng8u(?Gv zR!MijuS_{hgjK(3@skWH(x~a+6;F177mTKaRboK2{k*gx9&An4(1>wT2l`lHjEG=$&YTX*iaYnMogLx9=r zPcLk6l$V&r{5A^fIeI!c`WhKx8#0*x;D|U8)a>-A^ie%|N)D_w`d&-+-^V(X9De7z zdL)cJc#bcTMHRO`oVjs3OWJ~J$`W^jMY>^a(jXCJh4wKaWSm@mv7@x%$CHbpQ+O6<27%8qTY|K{3!Zqy}Pgz(16Iay-r z4SO-r=5JNrDa8yXSCrb_;1eDb#>PDq;4b(3u}M!@u|-4uXxxTk&O+*9Id4_~s-@#a zLrEe1+j^v3wecu|_k%47n#YWVM5JEnqzJh_Ms+8s#)UOqD)NVg!NG&~m|HvD3x`O>sSzMj$JET$8xCgCgF#qGGDY_Z z4%w+@MwF(%*ZkodbWQW4XI2|?Z6>XODo=IJ*O@JKcp&DkY9(oa&hrYlaL~7x`9^=@ zxMz9NhrqVsI_}F}M$1?ayW5u#^qb()U^~U9J#gQ#d*X@7#-wV+HvR#>RwZ0{8qzOV zCW5SjV2NqE$r<|cUrVdqk9IuI-WT_D${xozU~P8u2^pEHnkYWKDr(g>HN2-2b6%N< z)Gn61QU;?zmfbtlTf+%za&7o_#7FyWF{M@S)N7ELs~& znu~igY&O?B^dTotfkx+`dqGTWCrH^glz##nu_Uu85^Us^6YX=d4)jxqv{pB_L2L6< zLmX>$DcYr+qL_?yM`jt+LkA|AW-Rt#Hp=q(`Z{DL7LWB3#K~H;ENJw}=aG4zlh|!~ zws(Dfe4tGNC#1W{8<*d(@y>mT@U0&9`t#%b+TD7x6TV`*OYCk;)Uqc6T5Pj9z-K>N z%n6fpoZ7BkH40nsBYh&}^V%hAwz2Ul#22sIwBGH-0xl_xsUy#iDkUZ{?!zPTS%N`9 z?(KEjEedkat*Zb^M0%aW%R~IIr_ENyO=i@H^~$8^{1*`_@GlL6bzjRjph#>kcNaH9 z@t@+pp}`UGyY~ekuoyynGcK!S*(wjiRyR-i$2iojBl|H5oMdXy+lZ&i zTicnviLO5=^dC>qA|_V$$114`6RXwZGt}(Nw&^oyl29#Py>GE~ES}rSD+{xuVp(J} z4@;?o&;|B%bnld(9z_mHi&U$?8(-Mnv+Q{YP9-H#X23W9p7tf29UM1-%%c(5l;4gy z1@_0)o{SeW7~$Kxp3q66)U_L~f$91m0mw8tb@SW0W;2>>E15hb7G{2!APGg%M)5cqw>YlLetJ`m>^ z;3QMUWo@}n=Y4~uDM$4YZVh3;kI%_e?jk!jCmYle(4M%m9A37h@oy(DEPMSB82S!w zUh=++84~V@N_@g-R;bKiMXIR1*Wk_wf`{|@CoL}FDI!cAd0$Az((IWSjNUYm6&p6H zZt+}O-j_AsE=Wu+zH2f6w0YoCR?x=(634bM_e5REQViDHGh|*lLXO&fdNU@!LPj)yPd5wF|Ly0D!P}gw%r8%CgwyKH2_6g?d__h6#@7+z^KRSjjkDU z`kgahf#lH{u^<#@?QLzlbC&%MX?@rU{u{H}=gj00lVf94FRZCWIe+x96Qi|3CdXtD z?wbK;k?tzY$pfn>U~iaR3lkIA4DFIf4;qql>#S8y#F2at{4St~`tR;%(U`2ET&o>N z?kP@EzMR#CEy`u*RD4KyBl#wLflO}8=DMcb&+9f04$9nJm zto!BdlgZ3XOEuPbIejx7G#&zMJg=_f%aYtjeZ7YL9x=&D9GbZo!j0qbJKe>mOD(y& zu^!ETbghmo2oKdoIa^I9So67kSYlgqadUiSu@Q|FAx8yY2T{{x$68^o#g`@GjpM&n zsX87&aJ~?HYSLgAQ&eA*ioOFphzD}o`wv)Eu$5FRi)^=maKvTEufrd4z_360^ak-$ zI$E_)2z%|#hpWNDZ3Lki^3;cwOB_l;NfxnUs0xKDdf*E>4_hc{!y=GzhFx>pmVQ44}i{|D@HzT2F_x2=_P9uOi1kLFKDCAr{yP zO=hcAG<@`8EN76&O;1@ggy+r!BGa;}HRf%BQj?Lf+la{3*~uyUX(`{Zy8`$iC&X`d2G-?2v)Hw9xt3j zY*x3`=>plIQ$iSk6bGvetHj|ZHNn_q;p_XkuG*XLXWRXAZgnQBfkVuXPqH&q8-RI6tFgq+{hq9gRoO&k}!T7trZ!;4tT1XhPm#I4 z*q5!bo*PC6VUxw;1Dw0Zp-@}U$JIcm2folV#|8JJnE8<*qT^y4AF>*(NI3|0if>N- z@%l@I;GPydY|Pe4H651)F*1oS`4t3k$KNgysACp?y1^s-oE@7O6nqbNC zxQwb#2|bk%q#_kD4FV38gi2g@Lr}*hOF?d>3-T+SqeN&UOhha-6zL!mo4NS8@?E+? zeYQi1Fsij*vG)C%hVDi<@~zt;xuBl9Ugc!nncn>tmNe7f4uO{qMJ0QTVxS+bwbX8( zep2zw$65tvWmTrHCqKA^w%}~xbt5*i)M7j;J~+y52~9Jp(A{a1m!>;y3g2JMkKCM3 z@&mA`pXas2OLc}93rV3!k(|%U-6=?H%7}q}cDhs;HyeS0`>)_U1`#OF2j(PQy%(p} zIariNMQWG?tEV%wp1>pjS^h3Y>g+qIj>fgPXMm7@WC*%s>OkB-(&nxHxV)ZpO|%rf zl4neQ*c(brO~gR4o8w%q<3>fs(>a}NF;;?h*RLyB1YPbwBR11Kcdtp0j|d|UBtCay zB3KDR4=0Qo!YrV|IO8_&iWWuhH~Ig_s(mf>Rwezuv#sx?>+VB6mBAGAdd`y9lIhrr z1+tZ{cntbv?%&6RU!P!n>|ZR!GSsk#YKZ0Mfcu#_QINOkV zU9Ita_wzHfk1EN%F_o?s@`C$6ang@FN zC%z2y_b;^hJYx{`3Kajs2TvgpeDLrI)zs|Dm4ik>q)MkC2ziw85Hp`*o9h|N9W%?@ zFFRVJr3~8W-h<=_!>ijzv``XrO z`CCM=-TW&JID;A0)b%5#gp=)_rK#&HEIwVv>`2aZ0!;LQMV=%vL(HUNZC+(l>yHVc zbkCOAVyA1Z#mRfY&lliqzNRPJ*KbuUo%7V{k|9`;xUh%p($6jMV{jygRv-F7HKrp~ z6tsrs8%{v}MtQn);+}Lkv4-8OEPZC%C%=Ps;6u%f3~exHzy~&V3+9?J^SDF1tx;aS zWGt*Yh=*Z%<2C4ZZkx9-OsWkbdl@K3~f z`I4YX6FVoX`^_g|o@6Pq_|7^t@28SH$*1$jle;>SEi+A5hBFQE2}i_l?$)|s6nNux z17+xlh(RWqz>=Ii993nN11p5bwl>IxDv840rC($t26*Kb#cDZ`fN3!&-xJFP#H%4dKeY3S+4Q$LNO?@sYR+!QCZV4!{ukJ`#EulLA2_yoQ3w(q`ua44wbsi!>bZe zd)1a-jTb>>)npOf^V35^Twl1v_v`Fmesu&~#>l$G`pCiQ?yo~)OfYti4_$)X>-)4@ z^yC4TJy`$ZykmT?JP9wXD7fgOQj^RUc^NFrVYAk1CmFnA<_^q^#KD2F-fvR77-HF( z_89s8guG^7GDP`c0!00IGbe+fuL(zSO}{2jmq-=57yMd9aV@DfPbW)P2_{#4l+|ZX zul&IfuPUtvo+kY=?v!{K{XC@|Yxc~9;ptJke&L5lHlSXzA-F_Ik<#ja0wI8*9v>8B zv}!xq-c!y0o;GZW`$7q3<-90X{qZD3F{&}}qB0eo@wT+;l_DV#bZav^N<$D=Y0ba` zEFR@2PuGw6sVEV`w%yu1p`h!)Zo}GQ{~>07`@P-rV*aAOem;34Qw_MH+K(c0J2@F1 znAKKU;G`A^R#i8@Yfx#c;!voDg3b!WhNLL70z`YunDah55I`7Vt9qBA96-*H(#!MYoZo zFq9DNT$))^5?*;^-|wiiY>wm~+dQ;e7Zn~*)fnl@gCCDn+x#OC?x7*nfRfV`Tp za5YkrAd*9(TKve6HQ#jFc^N-a4N?ijF>ZdH1g{&k<$GaD7uf|8^;f#Od(YnkY<+bY z9*~p_4RJ365w8i89*8A)u4rTLqYw5w`FstHu_M+b&i{pevHfrZw;Bi{5OKG9hRc0sV#5yV}+c0K5h;63X`cSa#pqIavB^cg{0|*!b`s{XFeFTd-8;s-MPck*Rf6 ziVDUPSj;sI$bX7}-ZUQMG*`U`(y-+C4~`gA9tGvWtf;{!m&UYkc=~JELI`%>J*Sqq z#Z+!Dq_Qp}0k@AJ2)jZ7BXWHZT9%`1z~U3mS@yIxnVRw`#)HZu3|9Jdsc-eT$aBp< zUiN#E=WbEVSl7zsT%SzuJF(`l!N<%4DWop6{6-G%r5vz9U7Z%6udI^wfATKQR1r@Y zZQ+Vd9!3HTHP^_}U&wVo?xmM()$vCUQp_XJ%6HN=9L}2sI?P79@|xb00#!yrbpgmX zD}9f4yv})Hl-UzPZqLhMT^)%I%BYQscccC5QavG<{d?ZfOVDKM5wsHov7wnrOxTsq zEYX2kU(c+PT7sUA3~x~Yp6%0_>2&Im(r@le&3Q!w5`Js#!0842LUBe{FXG)OPZ(BS z17CQdUvP1e&v$LxbE6_1`psrDD;cGq?pt;F9C~IKLV-YkxKNM!Z~=$QtI2^d6Ux*Y zUYpEXQ9}_`5(eZQ3`*Z<4&(HHeuY#%6MT@M5I1itBgu=B{>8rH-y;CpZ91r`2wqQ4 zUlfhC024}rFx$RvBC9ZMTKp#Uri9tnXG+08cx^}qt4t#)L04brYr&j1jH58GDDW+H zJZ9AqOLr|XmBSBhQrvO!AQ5D;FVMZa3JM8}%ss!*H8z0p7e&FWlTbyY+_c9!q?huu zO?byF6r2YvJjKj7&~FVm9oat$29xuj*8gn=_suC-#76zfb9{=4A+M+I4BE){Suz{E z)in)?^w7>8+9tgi1gcA$0FG_|B24@U1iytbAQTuM8Bc%DMx@fV;McIFrzlF;Xl=p% zp)>}Dwnsd9c|s6%LPRya@OW4bA0ZIviVh6S5bpdY7^^M4)y1$vg?P}&u7wA z=kiqzN2AYArZW)|Kmv&>UWZLIQ8yIX#e}H13hDw51W%|aYS#1>vZL?YJruS2^oboT z5w$tF+-LRPSHe5>QLM}EVi=5yDG3v9xBH8@igd!Q?|?Qb3&Me_0Y~m7OF#AZvy;UTTSKdm=aW?n8pKeMu2+2 zvjZ05Jj8IZ^8Z8AS4Tzlyi>=Q&9#0m*)amDeFvzHHpC=5)M}sfCOe$4iFZz zbA&^f)BI}ioG0wvIwru1vNBI+qHhy9@4e<=G$_m{of(6?0+3{(Kmx@v!NzOTULV4l zq%Wn}oyX1u@F&^3YEot9tH7&l`FaF|&QG?<$4f;>P8KbB060{M{Kxs$4k{4(9q4urb66FX-NHORL&`>)u(BoZnX1 z>qlIrWMM!}Ho60?Oe(16PoESBC6;PBV;9Ltj);CCl&fLf71E3d(qCBCTe`giGKI6| zzE*qe(fV!b=v*a^?G(w0F;W72-VI&ojfbR7ckbp&w+MevbSc9Q{b=F=!j4MJs5K0Z z_%F;P#Bf$0aUM}!VnKhx)pG__D{}*;Wia92o@805g#hG6SGl7^{S`GGOjZ*lNP9>x zhMI7Tp#&VER|G#IO8bz_^Ua>ot_sqqI?R(&difDyv?MTD^;Qo#uN_ir-1<6+;8Z9J z^G78@jhqmfuzj4bM~Pxds-QF6ggW!#-hU)AUtt*Q38;`B?sEcFi$<5psnz>{lWjz| zy^0_MEGaYsV>GI^tK)AFChoy85Y9%P+G55Th9A$-wm(@qs1A^BICv$%d|3EC$ERw1 zTz0`W?|XrJQ7<=(;i(SWo2Td%<(~u()S6SZLCFz{#PG%o`&~vt%UEwQ9m0gMAeRHd z1orA-f*;g#{#r-LD*7gXx<#(61h^cIRkZP5GagBdpwE$URj1$8|1l7V^`Ow7K4+{y z>9s%60Y;HbzS{9I5>4^RongX#gw!O;iVe06zuN0- z+Pw31(UIVy@5gVu(y}Z}Z=w~1eU9Jv6V+iTb6~)ld5N|c`g`8|>;3J+wghlp{wTTJ z9$yn_gZk>V+rdgyI+LV7?a=9%^tRJ};cJ1t0a+VV_^4B1Q;JO}MQjPaf74gmHfe#Ue8>vHkJA|H02>`82>-_Cq&&wZb7cpgnCj zS@`)>-*a#iZ&fO8Iw`Iz0d$=PbxiAWq}C6&MF1a^86(mFcwqb*|8%4$cU99=5x90p z_JV6c$cmYe2z@^bL&*nd8h~i`i?I)JQY6{- z%^u(7MFaaz8a=81eml>T2rM#!u#`ov$MXZEHXtDYNNo{V#oyd`;DH0K1LPUX(T#zt zze#^_Hrx>kT{BYBl~c67HdqRifW`ibM(~ZZDYz+g^(Mr zSvT%Mc%5HubD5O;g#cIad_i0G;&5m}tcQ~u*35X_z1Beami?ANWSO2E7cJ)8atMd( zG6GayARZ{f$$hm!ZWhD>*iG19m3^Pyjb9mNi}^Ejz1t>OT)UyStS5kVl*Ii6Bv zR5|21I_E^HcL2VXQ3^ORyr2w!fplO%n;CEe)H=yY%4a4bBa0GUP3lLxstex+9PJBp zJo6u4+A2eANOg&dVesrH6JXpA!n2^Q`>H#V>_doSu{f5zX?rxwLUMM{iBW=(#{YT_R2;19bQJBaZC$*~pHI9rNyhg?<5+hFaVK7PcBNJJ$jEvx9Z2 z&q1S2&}d$g#K?q-gdrX6u`7uRfQO347UcRH9*u8w+Jg#IKEOtdA7A4*!xExLb zK?meyK37ynzE=(&hf?7+Vm-iCk+3_Qo{unBadEdmPsOU0R6y>xXYCQP~6KlmG-L zYh6qJuTrR7$B+$>#CC>$@j#CQ#8kW7Y@ckNe&FwmOS78K1%&at?{e3j*sQg4af;h& zGY?i!MnT5%Lw>R$+-=n>Ge}^u9*5?dBG>f~E{9T;HDY?UClPOY?rB4RWeU4qKMSpH z6nibX0?ZpGn8Jeqo1eT_w`Lu&E_=l>pRhS7ShjPGgQXg8+PrY*W+l*vqsZ{m+-Ci? z6344wOD!e+rzhd6c8eXWs5=->psXRAxsYW)a}Jg*n_nnSQ=V`3eU4Vuc!92)kkCnl zf;+pR+I+`&H^e$KfHAWw_KbgoW)iyeba68ED3=v+Q2TErw`P^dHbs-cE1tJ)}iqmhzxryTOG4L!`^zWfVq{ zn}is>=*GYSF}HNzp~a+Ss4#P7J7Uh2WWp*lOGd!TJ#NTlWdgg8sugF09Gpobe?wt= z!C52!iY&DW7$M_TPD~Q<`siLPpH5K|)`h0TUZlN(-F54!GS@k-IUOOf`+#&n%Z?B} zl=yup@N@zpEMZSPrU>-pg10OiIkM<896=^FI~Haqez=scrDl(nWx3tMuuGf0PC*eA z85ZJK8v1#_q@%MMU;Q|5bumyv{15K!$Bkl#9ivLuzsj=u?2@`?;*&FHk+T1duH4#4 z(7JC2`X5erK#Cob+;foDc+>3jzjXy~yeX+iXt*C>`CK`CsXK5yG{4u>Fb)0(il|Mf zj~HO(E(e#@PF6l+h9#worYB@Z0+t$jTB2)s-~{E`!`1cMLhMx4JOKTYwf4*1xt^Tt zKVkSu45>$a2SlbD)D%li{&6s9v?w72*2vXkxTfM{xW=w^B+CWx@8@H5N&Irz*Nkq7 zITM9qKgPb3Uy@~IHlr4Gx&I-RLv{=~v?Amhf!?0C`WJN0JyLe|!sP)6SD=nZ|-_bn*9038N zVlj6_g5tIc{kE=xS^54>oN`C>M@)Gz49X+zD5I_Kp+xUJ_~~tT)3(%`GLu(-(_|Cg z8>WOuC-vm<*Rv+|*rzp$d7In-tMqRf`rv=-*Rw*FZe%W>6rykINvl-XgZ)=%+(KtC zLvN0u$TT*SoFxC>uZQB|#rr6SCZ4W--7%QMgYEhHt*wL%ILq>yQK<2n{XcG5F;8DC zP^33V2BvFj`JSuV#hyF+Lw3=5>;kK>PMzC6e_iP48zJ;chfVhSFDum_JteDM9$_Z4 zFBEf1y3bQem&KA6eAx&+QI&YJKkXRwOmMsASF=}ZI7k0CNuntFoyknR|LgJokqR8Z zHgclk{sO-5#9kL^3Vf=}OWF-U#5XFW@{zDaJ{#ec956gUH(Ir@n7^cZ66eq}gxKay z?!orII>v^_-oP8h^o&maJVxlt3SLif9+%{+`VP;c&Ze6$VQT#i{?5X@0*t5a7IsG3 zp&RI*oNW)B*(&Fx=>tP->+8+u3p9|&@E7vrb24$YnKHwpY`fbh(%;Y4TuPUvXA8*| z6|*ptD}-Sc)0&GwyD>)BX#%Zox_axN!&+7fNT2|yv+Yo7jN5(u#Pe`cAF`(uU2{{y z9r+6j`1M4tQ+TZSiZ0Ws8%eK?>D_rw{Huzl`q3D?&TbtBRGe zGE{#AnT5P>j^C$tD;0m5?5u*uO@1So(!~NxS*B%1!T%?e)mm~qeK*Cp8`$sLwQuG;ZK+8^56d~9zq{Bc0 zdAt0Pzn&f<4S&nM9Fb|TEWX2DP5$mbL0BH32y2(5BY*&c%Qp+F5+=dC&Y5O^>3k@h zGYw~p%$Kf?UW^r7%i7G2nA-}|2HTIQXt{5wuWl-np3#PIXpZw#vF5D=JF#;q7+1s& zmuIM*5vBl42@n$bDgdc(ZO_9wxW#3iHzvSbTK$opDkoyV=irGP*SSwf1T0P%Utar^ z)w^K=+^P-&UswG%ClQRHIL=jtpQw7UOTV2h8fC{Q+4?cB29KwPfiOW0dk!qgldLY_ zpg(z0P2I^c1#nnzIvxRqMvZ4?Ql$8~`XujR&qc^hnn*W0@z>9#)hX{4xqt4Cx$BYQ zzQU!oFa8OJmq@UCO(pe*_2F@g_gZtAhI}g3O@u|x`V(|3q`yd^1AwXD?O0hau4&BM zLCh@v*XQQCaycH-WMnx&Of`UMd{CnY$XMgQf3&pd@_q+9=ynlCWvNt~4uj$_!FsN# zTitioKwO*xQ#vLriEjnj>|%_*7tvwn<{&aN@dKaAq${S8?8i^1=mh}_LRj( ziwHsL`V|wP;OdiS6^I2McVFb3veSe&Bx3(8zUpHDv6#YjmP_+74-By1;WKW>0i5t07;xbc;LD3X zrB=p`+A9Tl95P1mj_A8kKqRd=H`4ro$4NxDyryH12J<>2kQW*qVdd4!PxCduDQ0&N z)#gZ}b3VE=b9dExtxk;nXJ;gOLUcKk^(*WpI9tC@$UmSzF%`nAy&_vkk)JN(I$v9n zV+qp;x{diyXf&?<59u4+PY(g+KICnLWrgsm-?=XV_OLT}KupeVGXI1qa{-@_;fMk47UO4$br7qyfI_ZH6 zh0M(hr)L`}b#?{}>+9=k>KZP|u*6{KTK3C*aC-SOJLFd&y_0OI>q``v&)Bm6rB=WH zT6z9ZZNwHt_^&hzOZmIm=WJgY{rrC|x&SNyU~jRN+0rAQE+=oT`a^;If;J{oXQev$ zFf$x_`PY>gI4e5iR7a}M@l}D`MX=oh3$A&W=hN{75v)`IN$(50=bIIiF2Q8oZ*&^BczkR_Kt zJCVmj{8z~?8ccE6vw1gKfrh?!V2UMH412^T{xymb4yb}+GXdGpLw~9azga69n{^&H zDC&jJ{zFz)jUiA=;uFZRYPBf=8+;aK|Hq9L*Sxzq4+B6sJ<5v1tjMjTw8@F$%uGjn zZQ_XgtG4+~SRYV}ru9Gj626?^fZ$>Otwc-KEa=4%0H*zO@Yr-@m|Nbq22=zh$Vjqm z3W5|-M*r}@BqPayw?-pWVW0%lX8fK*mGaInpa#%Df0`I7^_79iDLF%({!s}e^0nDu&5P*tcG_wKMn!OK4bOY}P_5jwO6m8SAY5W!$at4!m~ z*ZYCs`NLO{NS_6!#oSjIwZFdwU!+MnDQjz_XhFvM*bYCI|6yq@=SKyc+md9_bZ)U! z1S}g}te7A7l$tf{GgaNx7=KpoZF9ggujpUWL~L#R&yH5SvHd>Q*KI{3LCHe35P(O7 zNO6~5d`iE8-#Dz-bfq9f@AVyNU?cAP!a_hGQFXb1-)+4JAN5EgPL=m0LPR?D+VK33K{6+Cei5;A&$)1|R@;TskcPQQ8 zIUZOfUl={0;JyUd<91R*Dq4FXqF*|~9AWxXmSU7yhG&VRQPdg&!7+9~U8MzT0B~8y z0mBmk{YMV3`OemH>VLW|w1`X8ss$DtJk~z77A>y4A2l0aLDN+z@SPk>SuVha^|T2G zG9pAd3a`2lrcfq2j(}SL9=Qh+(tsTC;b*R&_SiqylkUy3+VjWv|JniPpIvhjojo!+ zS-(~Pat=5L7=$r{146~n*EYnYRPf}2Aso_GPkPcUfGO~hJ7WZ`iG&!r?!SAQ?e5F5i+LK_k{wgwq==k{?4zIrrVxUkmaKu&M z0`Y}%$Y(joRyU<*}0EU(7Bv%=?p3z&Vf6X8B?r>5$$g}SG zX|XNZdd2@M1lJA%zo#2tby)ZYP75fUaG1u zK=*&n%zUcHqYr6UjoVi*?`+@g)n@^7+@PS~N&i~XqsTXGx-AUMX?Tz=^Z=YhzN5$Z zO(%yCQ^ravuE_6iljy!Zc^uhdsrK+6#HJV3VcPYYa!)w2m#wX6fLZK%rlPT?uZ4xy zqk(Yc$#I>j^U55D3Wi@7d@lrnB9^P>uve43n_k3^n>#y0jQA%pI3(40Jg27^p zq{gNpD)i#Yxu5;aL$&8t8Z_cv+<$xN{irN>tR#0sbJ_bE*ixU`LR?wuTv@+4AI&G1 z<-VX!k7e+(Q*1gI2vplZc0T>F6Hr)!AKS0TT%w)Sx-xak+$XTy_SOenJ25RvcIwKYVUGtH(6iO@l~td&J4 z39Y}srE;uZypD#q`o$z_VRWOIe;?Q_Rl(Wd;IE-;G->jSX%1|!mgjbBMCT*hJn5!f|6$<2%mzMy`0dA9$x*5WJj zk@&wwyOGddZ>Eq~eIR|b8?Wun|MIyJeR$>K7n>zaY+~yd-Se}nNef@XVOx#xdi&3^ zy62BME&H5s-yBTgy}<m+^9`_Dt<$=&W9*}y3U4)prbCmdgvq<$62Um7Y-DrmM8DBK|+Q? zPLk0Ue)rA5bA;Gh#*qQDTvRRBq#d%Az3t?q@m%73xI;M_**LqJWO}~jQzC;vQb%M4d-SPAPVp{X)r4jl zx_gfWD?yhO>iVxfsdU4NEJ0^2{y$XRS1E=ZnxZeA*s4$ch*o3%OjoEBh(>aeP9?Cv zZ`AD5e7oRezEFuIGcfq1vOUS#KKAIQu8;2;H)iUQ=4PbUlWkV|!AQz@#xKL5^n;m{ zw#M4CYn(*?UuKXn_f=c`s?Nvx{i}RqErZ{DbJ3S*%TVf>O{+ez{X!A?Dd*z&Iu}lj z+o8eJP+8&i{e})&p0~>zKuF~hnrT_vjA;4sp?5%+QDQ>P6Z7_EFxIv~Kq>6$xo`G+ zr*mNeoyRtriE|3J^aS#|mX#g?fv-aSVlN5@DIU1n!lPBpYBytE;L7iJ1`so1Ii$Ll z%|8hS6;&ovD0izAFlGs|AcPATDQ6Q7^i;E|(LmdVgcPIjj>EBtzF7KhSSIyM8UOq! zPCR~gUb6*p;%lRKjVWAUHtGY1sW2%{?$>{N2z2F<%j4f_FLwq$sj`6NjD^TBL;(Ug zqiMQ%ah#bIj)9rz&k3Hlz|sV9br_4S#=PMARc0P49i{U9azvfPaBlWfH!NV8DO`mO z)FH@HB=qJ;TM#))PDXjQ>rfJlID_7}Q6Ie2WEd@GMAbonoOylLjXLniD{w_q1;BO# z0XI((^fIjM9?~oVa-3E+|MvdG?_suH-xDUcbp|IwVRT&{hxZSkBhsq+omD=k@!{?5 z2z(TDcAydsi8Tv2kuYAG#fBFIe*ExNZni2=sid3m0*JJIYtyPoj3AK^4-PB9n$u)Wr|H-p8cKhD^iJ3ER%0l2wO>a{Bi+PHy*f7%NB;P*E z$#l*$ImS!=vDEP=5nkY;I2y9>RI+%;a9!+tkROCo<|?itbAKTC+Pq94wTR`}+lx(1 z2K4H`W)y|xL5)*00be=Uf4eN8zLp}7yC}jacb^X(GxY?ieJTh$lULUSbzl$Bdo<4e zM7UXpGPks6p4SOgh78<0O$yqD>^~@8Y>k=}6zt=YWrd9#OWn5bp zqHQA#EJ$*E)vP^IVI)_^IfaVkd>tQNg^K<_My)UmDmE8AFsPx!DgfrvSpRX^dq7M| zeo)wTbo~x1sawc)h^(bN!uqTfI)Ir-RhEI00pN=6ZB2xUs^}mcrByPJ`86H^fjWwD z@86Bt(!g01KzxfA)GxMHOhBN;TXTY{oAi3}Dp^CpcIGWwnZwsJ2V@qRtixnTvU|2f zRH?2kq@h~~J!T+2saYlbJtR!qZ`m@n$oPvfA-(=a&NJzdKA9IED?9Ne4qDxQe@An? z62Ge35$!v*BO&W9#@Q>a`DsQ$Pm*@G-8M|kZ_!tMIaRZZ;zf|uwnyVe8eQIg7yDH@ zRMXA}^)q?CW|lL<-A;R;)f-I{3E$BamnVxe)XLpRGnBap_b*48Wo?dDhc+$tj^+=j zEK+6erg!YUpizE={zagZO)px?`Y!}N7b>t@morlj>ukfw?I`Dw?E} z)&sx95R5d1Rg2105;vI2j@ASBX9d`e)1oy}<>>-eTH<#_9bnkP_&R%%*rH99rc(DK zmlEIBZ&v30iS2K7*m=85?!hbio;&{fllHAU=vWXik&g?}^A+2?Cnrt6^m66l&#sZ5OUs7cN-Td?wsUW9;(c8(VQ_$C=Gmd*=p< zki~8TV}ZGDk&S9}0r=UVICJ&CA%%g7KZ+NoKQA7RF% zTEwgtvWG~v@u~LPg>)!&!PX&zno@Uk7^Xa#iexbkburYW`A-nWP#G+|l1w$Y5chRv zw?!;D^;=-=ucDJ*zx8)}>I)pPz#=;<=w?L}wT)~<5|ffWFN@i(j=XKImm@iQU~R?~ zj_HQMi5Wk_Vxh2WXbeTp2Qf~r26SRn_Q-W(7>Nlayu_i)j2A9I#8fEZ*_+ba$hi8z zho@zSJ1W2#OJVaJ{T~ESAsA7hld*g@KK#r?ot~6T1o`mQe|N{T7K8_o`Osx zbzMN=OQI@yqJ$p^w*28%ggLGf%@>nJsuB#MH^6^djc0$8vUEWm=@jcd>n z*C9>C;o@x}KpAgfkaOz|@R}A~8N!d}gHL;d{E62TKqkkZS>+>t#^x#xIHKT$t7_Y& zFVEaSVH3P=+K{SO_Eu$6t~Y6!N+*@0cwEmVy%7&FwLJu=8}TS;5%x8IiRdZPCoEzI zc!1KI2=}HrFOUQ^8g@KJZst_-u?{UAdZVDQ#j7gH>B}!SP12oy{E?ayp1j|s;{B2S z{FxpEno4XwFc^FajUg8P;f+&e#tTjjUu^rsr~Pg^^bLf;mJ_ZP4^yLSD1#-nEkW>| z!y-VO{7i28D?#WtLKBwl7%@PpHjnXcN31i#P)<1H(>;K*RRG;aq%a2Dk|S3HBbzxV zX-;?f@5S}OfOYwoXmrd$j1P?WIm&O~mk5bUcL@~C1zNq$moXZ+AO|)itWcijZ0l_( zSO6%);BiXjZc$2Biwq?6lQu+_0xzB=!QvJxXo@o_=)6}vQtKjhL^k0aMrrGBq>%~% zXw@3GO2ZV=#f}$RpU}0RFIiuFvAgP_qWS4Ln%01Ec^^H z6nU~rk|erb`a+75EO2)!QBL>^ail-t+OVfn)wnrJ`W+Ou+e1KeW*kb3wl{7*(rW+> zI&THQe~W+SE92jif+m!_mNIW}NVQ~Hif~vNhYdaXL8}TNOf~$fY!EmpXzavZn;DLb zJ>Ue}3eyM6lMU`fCF9Z8Z!Y&e#GC+s12;oIA3CYigSzhlJv%k|bC_&DVWouD@B2r-kUU-Q4ap)ODJ zv(sXxT^6h`s`Q$+-K&MQZw+PPXkokQ#fO>ZAGIHksv5SemMN!#M zjX8}N2T!gIPj!M6ha4no0~oBl!CDX?l3C#KX}g8KWevD?5$0d!$=kG$HLSsj$;(m~ zT-;_~JY4K2lYw+@Y1|rtKDd9o<_X^FW^byB?W>2baxp*ftIX_|ImE7+L{ZWjLeTBB z->a!484sJNa~~aRuX}v=t=4f1_2H7koGWtS8_lf)(AtHYJYzTuQD#J98SeW~n?iy* z@bPe2i#)9;NOj+t_}(-7cc{NHMa2PrCJ9pdnbY-!5P|(gu@=ga0FuW`{XvvGX@|Gh z$YLR;-2GYEVW5mnk;S4%5;Sc|U6@9tYjE!l4VprH98ivb)iRR$oTXnzGP$K$Qw`$C z1=i9fUGHCnvN)IMUsWn#%t&i!6x7khSu0v!`#l2- z!eoKDPeUAT|2p8~N?sBV$YI6#V8XmrvHD#O84!Vzh!LT>D+*z>L9+p1yleKtEZnaVvNGCqc2b zJA%fPyBT=v83mCYwgWsiN|JD>VmJOKsizHSE?|{Vo!59IDadgJ#*`GKhBrE@&i!Aj zdfNOhgHa<%+M=vi?$^3jjhIjj+*640OgHg&JS?2n+V410>3N`eivugendTdoiBoyq z9Yu_#+2)#o7^?2-(E+v@s{@_LpfWCoh_uU$cnM2v)phmKbNa8)`I%Hr&W&__9i zdy)Bwi{1kmHnzQB6m(%;MigW_DWyMPi#9KKi2}6Fegw}vO5*2>24Ra0Q??%-d?BSY{O&o7R* z8+Ls2);(=PY}KjF#zqSwrVFp`+JBWSW-!JnzY=jgkbDBs}Zn@K^}2Ajgi zto`Jrr)(Gmz`b+#e<^1!+*mCjcxw9FEfQq9pEcAGF`pTqK2j$4jA^9?ET7~AoZL)0EmGX(IEmZrZeBEp?LnB^M*dtDwZ7*#V_AMOYQ>pB+0=dMh5i#RSehlkB`QxN ztRJ1jNIhrs& z|BYprxfm30Y9`7*UEOGT*du-WTr^9G=%IQ%^42+Wk`+JRo;CO=g=0$GQ&jM`jRS3Q zwg3;7#x_X*)y$t-%qc3GivhA8CP1RGY-w)`9>Hc$aaHggzP81cFpOg8J zey3u1!@Az=`-2(?r~Z~Aw3@0mFTMI(_BG=JZ?tJ zmKyq;?$#az-LR@dMrHw|<2T%~v^lT(y}tik1a-PRXc~T$PL=MqYdh3~OI|FL=&$iD zvw+bWsrRmtXixXKR4I}XFgWio}UU4?4gshF>W61Uj1&Kew@SAwtqN+)Qy|X;g-ICZ2_OVxmrsn(tCS zqo&9TFb19ibTw5)^EOWV3gNy5uAQ)1-S^+G0Fa8$TdYLLJCy=@b3e+UM4OlmF67BSy;rGs5?4&_^!~r3_IljT-9Gj=tsKkO<9-UO&NCCz0R9| zm5xYO^M9P4jVaAfY1|F2ohGBZ@9g~N1!MgB(yHHs3D?>+IOZN-z9;oghcjEQoE~u} z$^22Clff%(GALv`lSh8m@%@qfXbBmVVu6!8nv3SU^$&3HfR$IqRY0!`Zk>on)x(

MD6X+!J1@F(}%!*r7#}W$zqnl~F(1*vvSf>|oe+CSknw z=Swq6jG`*U$0%2#Cw-k8`b+F1vgq&B<7f<@opprEeJ_QRi!(9BV-Uj35Q3drF!xhr zTJ*&Qo@##jqGUbHiQcLwPYO->;$pBh)lJ&Jgr<_ZD+mUh*dxiSVn^vO;p+r1m!>Alb0 z`uvo}J1=sLsB!E0KL8>9Jbxsep~pBU-Hh;SYsEzXYt{P~f{r5jv+DS&ayP8q-|mKe zC=Zxwa<5X!Kn5CMaQYE|CQa2aM#ojZPa2a4B?ro=?f1mP`t+(|Pa-=>HGheSdVKL0N5=H$5}J1l z+>+5t$%Ly0ZsyX+-+|76C`Wfc0|v14{3SFY^tU^P{)zj@;Ax|YN}Dx@=i^2L=#cT{>ocV#>Ev*HF>2+a*34lUSg|HDX< zDqmV1Mdw)ptK^qB=sAzZ&><2kG?VS?6{u?UY?=W6pGe2tGU~Z&LQQTP998xAHJzUK}8$48Q->noAyL5%;k>*xWS z4@n&(f1b9&M8#Ki0{}fIjNE4bDoYfzekpgu7Ujoy;Yh@{$*+WhNXG6Ff&PIT6zYR1 zTVw_fSL%RNNDmPbK_z}%m3i96MlGUt7@NBesItcme z6L~Sl^9q>VSE+LnZSPb!lE`)-SR^I1G$83sCd5qeG2A*|C3?2aN_w9ZKgt zY4GGuD zhG&i&|AW+Uwv-h4$)xvGokxSsTG$$%NV}Nqi5b1GN!2@BlM*WdH39Znu&HrqxJjayS=oISK-K<8Xnw|E`vw5ULJK1(8zZCB>)u z61$QNN!8aOG(gr!+ar4#t+2}`barV?dTXTXK7IeTr$(5&;(w^#Etd5Pc|&-9u1Oip zMz^Zn=(k8hC@m1Lh-6K0wZp5r={2%?1{a>HLu^VMfaR<0ajOpa)&DC`E13E#dhZ0H z&_I;)sU@(a3AZg=uIHk}Zu-XW88#x?hbd;*{uxIP8;}qKaFEYsN{H`qBv4Fw<_I2<#p%6S$87!sSofvEb}&Y`m1l|Ij#`zgsu?h zXwRAvp;zLB|2>9)GqN-`4->EM|1Lb(`yWEKT#X3x?o_PebpFxctSfrY$yj(%P7NHZ zw8=UYk>gjH)x%tjuTq5vu2SI$Go8mWMM%MF>NELLvEW4d-o3kFnFa(Tdf@P#2G5?A zc@q#W*=5zZ+yw+BWX;NO`<_DM=PoJJyXz1=N$o~!Dc-5eLbDR*#-t34_6a(4zmuD^0@jvLviWMOr{gAeJRlW3^7W_C> zxhbKX;H1ziDO+anKN9idY57Q|Fk)*8ysB39X8`)Adw!q;ITby(=luk8X^+;QRvwfB z=5yqA1q4FCagU1pofEBXOj11vW6jD3TwRfW(Vka4W^6VwV2KQ zS5dZ#q-=(y__&Yb1){e#!Zy8$|5f>|WV{Yj6^d_A7e8}O;*d_ML*)ArP}?AXm6H`S zvBBds8TG}>$cjHzW_46WG|v6zVg5i1+_zp&w0}|8xHzB`5Cds60)^>COM=!mkWnHnh%D~tBKYV&lF3bEs}+3l`6KF zwJ>IkI^%{!sOdK%?lJjvw!i2+0c)tQlo=Y@QWsSKZ@Equ^)t_jGfDJY(PKaJSLNu% z-K%!%HCzk6CryZ?3mwLD4YB*!iRayL#ZJ#nTc8Fyo+H$-QP>Q68CVhD zCT7ffv8uu6Vh~%LG*h&)51JC*`rE=<$)f2~pLtWB<@egajnM?+;|70IJWtN4<6gSo zQ>Av&l7xNGllAosQj~m4Hpih;%Iyj?i~yH^F;})vo_>Crf1p3eUQC_-1|-!F=nFpW z=`?WPO4gCh{rNLgwkJEUPp+$X!eF=(_37iE6tpiuE6XwaUn|=PGfn~&21hiVHC7?B zGrnE-%TdGKXV6nLU)E|&%Pa|Y7jcglH>(Dqq3J{YR*T_De>MJj0C=c!>0(PJ&MyHZ zFP{uc@0=3-$56_j>8V-&2hM7xS{Q?*^ys%2F67e_Ii{yf{3Zogi-dYxR@5VGl*)-P$92qSL zy$o4Nizj|y(D9r9#ifIpk~tm_STtv|s?Tx|LpdYdr1N^%J&0lvN5C~tkv>awHC!c_ zKF!rsAJ1>1#w3$M_ub=F!3S5okx|Zr%@_cLYf2iyi;sz~h2`dclOc>(1;hlv!pR-^ ztl1ZtJVuADZ(fT|Z(es|1lt;jV{@|e;SvASEz(wh&yT#Q>mNk7eESbju$b`$2;!pA zKLWmBcDU7rNrU3{Ms!7~Jp|A>KdPB8MT{m($5AQ~FykJxCSZ|*#`q7kEg(Ch#eHSr zrM@}n!ULhY<)&u_rnR=M!d`v8V@kQXYklD1v8VNEJr78V5#&SO6{3YM1Rxl-KAY6k z|Di2lPo8{j0h#C_NH$-%dHZhh+sN$37gjd-D)!4|hK}ehFXfd3nv8U8pbe<#WSSGX z;tg0FuApM(k|_!hdM+xAa3L+sjQc$Mh3MqfX%@oa*NN88)zpA{(hQy$|GmE=ympKB zsZqp7*NhrJ8e*tLud!J&&fRXp>H+OQQid4ICI~A2el#^4h?9T6JdI!3xElJLtpRIs z$vTjkS9)RS$(I2=LodYwLRd3#zb-Z$xcGjk58DlcnCHi2u&;-W=A>_%@l8^Nie7B( zCq6j!+r|_S_;^s@j5eE{f~&EITO(ri+apfeF&(3h|a zHa|?Z?TB8jDtKi#JbcrmnFKaewac5Q-ni$>3N%-Dhp*m&;!!l+I`#8zI9%tbo!8Lnu&EKR+uLx1Phb*rQvFt*z8JW3(JfiX=x!NT#vh78E;kH* z@J3&M+?-xXtbQ{7`&yiO*5S|_p4O=F;=!V%{20aLcDTLP0a=l94dZax#ZBt5M*;&!Sr)hObG?9>>VX%A@kMFG++{~~7BFUKAX{wdNUok6 zWBy_RbNrtgWQ+GO{`vH;HBWt5lL7k27bp*NX;Xxlq=w81yW&@>9UrKsy$zIQ|4eSZ z#N_@D%Z8!J_-uQVV5lV$ovFI(%=VdjP%I|CWovX?;bR2A|K6xh-!a-wiIr6+T1R{i zNU^$jYOCl=LJR-E;ytLA{rP?k&Lt+s6qK2JREBJZ42b?q+3>d&?Y>vd^*F>~2<&vr zHni^RuCn7sr)B(_a7vGgI9B|J;Gd$P00DB-f{;)_n{R)*nhNV+M^i|iPiUa7kdl_wbjp0`(MVxk4n^|sVFb-9{}e+4&mS!Z)05_iO5*G@G~1_u&( zl+|kasn=O!?b8;57u@J-V$vy82zJ`}2=+KsSqXwQBQM+=9|Oz8{q_LQCO(6NB}e zS|nf*vjl9ujvbX!wJcirTH{ZM-r`|x%F7iHiY;HQiWO3%EvOhM3QZW!eZT70WW9}W zNeL?XZqtPD{@vJ!#lGC7E;+nQ6C1XOZpD^W05omdz!SlCNDO^?r}l@QBWB7uPrs?f z+;QG^HRNf599_xJDYDkNe(IfX{zlM4AI z6?LSPY_O#HX0C;A|Eh>J^2rIazQ8emx0eyz2vs=im;w(g#e3td9idU#$yU5*njiF` z+D3h4ty%t34?7mfwOnnyqabNWKoIKARD^mHg;Hrxu}KM9H5C`)HS&wWmfGprU*fQP+Pfx z6kH8C2WvV{e%tS^mfvil1`DH(&CJM(fKRA(R;BQ#iYpKi>qo6E(I^AcdcH%5-jC7M z6BS2UI2@fE`#pAolAR~@4i!lr>V1VfF}l4pCh9Vvq4d9sk3rI$;memZVTUmk2?3j$@ zD0~UxGc3<Ie}|}_vNZh#z|-n+ zClvOy*Jb|+IS(N?E#b&Xy8n*%d8>hLrIi2-rY&`)7jit#6wD!9d_3Ogy-?2SKE}x9 zw?l=?U3VTmwTGZz#Z?iT7uE3r4Wlg$K@!JAtox3Xqy_#Vd;$4wRkGwnYYT?Ivh1b= zl{O=8fzHml#HUuN?shI@dbYLZS=HQ^`_eV3-h_l_%W13e1%~(dQ(uol4#uBq&a`v| z5;aX>m^93$$w~ax&FANy;XT^oQhL5d_UM%ri}rI;e6 zP(Oluf?O{kM<&&kms{MHqyeSyi6(3eHI%ynzURx`*)tLd(&^84T|E?>m1acqxfysDI1c+dM0MXZ?@^MC-}+J%%=Y60%lEkzb{zxtmC>`GB{Ur73Zb^r1fWL&uRONI zW=rn~L7&<`wG0>hsd$e;FMb~OsB#j_OZC3O*$AKg48X#j6!vFdEj=OH95{H zS+s8YC(C#e=`?asV?`Qea6|;!Up^l_F`$J30sk%%oJC3SgS8CKUJXY^R@UUkx9{N7 z_3<-&@NDSKg~7%;0xwNOY>mlCQVP<>Yy!5q z?SO4A=cvxPAv%Mh#rg=5k$L|7`TK!^>G*q&D2D05onld@VP#bnj~|&#=H=t-?dbs^ z%os3?&bWD67TrDD1uClQmjG*=Is1NpF4vQ`BYa_NbF;3oQ6DIz(gZmcO@=vp2(APe zxVpQ$cL>`^pbmBy7GHbx{u(O<4v|dkKYsSeIr{brMP6Rs6!6#O=4P{K$*e_)iiwPe+;I5t zkcIJ|ELwm5Z5Yg1>);%SSxk#(c4nmZa7y`D9W>AduXYCd=rZ0-Ll%uc{6{%61h9Dy`Bf0@7Uq(n?F0bcb{|NQ-QwTN>$-?v(CEx>G=!O@3?R@0|0U zd+#6j51&W3@UHc)HP@PBj5+2Gk(U)mMu``zHle>%+(E?yVED8mw26gh@Id3<<}7NkmEWaF{=D&o(C<=MZxbv&(Jc zK&j)x#*q|xE*aG~`u+!BZ*g)T)lRClefIc0&(w9v@54Gu1qY_#!;|Zx;fl11bB(<} zP3IRy6~jDd!t^MCA{oC_$d<{4GUREDIAH&b7^T5R351A&53tz?|N8?Gu`KWb3PG4i zhJ241_z4m6EQSC2;Q#%rF$t_!D$;wQCaK9f_M=g&n`(^j=BJ|~pS_zo{&#DpAhGZ;dQ@dt zQ+n6+@sT+{vyk-H{#`dybidWHg5%@k-Q7+1!SU`Y{W7;Z9D@Yg+HQi$#2GjbX1-hznvyqf$^D{&v(>%R;BI1^X$jF*w%E(L3 z$;->j$;rvbcfB*7YqTi6F~F2>5&NZ%*ZX<@K}}d{?r#rIQDp%|&GoVp!+kXTyeDB2 z<62?TO-K%$R8$*_O&(NKR8CG#J=@%cwQ-vAgJAYKg2J%bH#|?1+H9I&zmn80d^e!7 zAX#LU>Px9}xXYto!0JV_;=Q)2A}f}ITT%SS&)_g>{VmaTOzh;qIud>ZQq`)o6nm73ND`iyrDoUa4Bx7XK=j7~MT zkJt}x=@y>_u${4L7|0A0YMS6{Yt7L;bHKshw`Ym?h3c(k@@CvmAe+0>^@^eYmO4w7 zui@f=-seCAp3=87B#>CK%`;=6k&O7|8`8w5IB`Wzc6_+rtWEgme$Mjem2rs}MHXM{_G8ZZc`4WnuaLuDcy^<(ynQ?P zVNh@5)~g}!1&pP#A1#N3Z`7eOV+c`8&NPUmnUhnN7pYwR&mZYI4tQZmdK;Q^Pj~tL zx_wMnpJEhE(%`gL^5$J2{&95JRs?U;b&tpQkocIoJ~1(u&!d5p^WRZmBbG=M5Zl!V zTzOaMF_3_Gk^*b3JPy{{>Cd|)82*jYf(mRM*XKuG)*;)y>duCA<#vT{Cd1U4z?YZU;O0*4;Ukbt=Yfst4J65aL~k z4WoIn5KWbvt)>TXe+`}%z3-Cj<-JYf8tK_NI!U}H8b-YhuatoWD|$4UFknpRT~#M?t`OU3>$<_KJXkxuzJ8uN3^esl<ZM#KBE$wSv<}!cr&i%5H3S4qVNJ)Oag0k|>a@EAdM77P5 zj)Fp4QH1U!-KIkoe-MEOUES#_KL$HN>Ns(P@^P1!X!-82iL&&((w#@i>sMJzsiSo` z^O~jMFJo5RR=4GvM{XA=W)63=NSoG0thtZ)>$e=o?!7KoI=|z#k@${k-p+U)87cLL zbm3x&CZqVrF8GbVq@5POTa&Be_6YJL-}ys?BzZ9+(=%6OYsX7`kS5Py9UPciC2cq6 zAb7PsnnlJkQLM>rVX+qw5O5C*BhQk|B^I^FQ4&2z8GQ>?+Cy-ul8gWGTwYyhzQq~-klR>cxRCBh7f%7$j* zyhhze(!}B(uP}jTbvzy`DQ&*m{j2XvF+n-xfOFeb=gzdj%63&ukfJm2!SffNUp6*( zMUko}@6dLQzvbV`zb<7LPZYXm zwqsDKrV)B0L{D+^sF1ipKS96G@JIEv@5KCNeRDq|&MNdhfMWOOFHI?5}~ zKr(XZ{$XGtZj%8CLPd)kmUc!S_j-Etp4EM(XOsom$Z^`o#p?zj8TS=K3t+*0Pr)5`;nSf^R#G*q52v zE9NPrge6tVa-fQMwfM?{qsRz>cs$Uj#vF*>y4iyuBR3LCk5xD9y{=y*g&I^ApFCCX z>Bw@{M2HyGK3U-7=Wpr=#*7YRPrI`d>%g8R(w1E9ED^5P)eI?k`qJcMvHDi>dg@HA z?E?JfbxXC&pVmSoHbMeM*2O`lV3(|Y&+%PcB}S4%vLF7VESK*?aew6I)+pq%_=z|+%*AGz0BLj2U;59emq zCROvZ2rA^`z7$^Sl@ll+e7!(!WG5=P^Xy71VzkGsQpU$*rW1lJ!c!aAmV~|db(F~B z@6gjlOU4Bv6iiwSrCjVRfch$Nzwk#TeRdp>dvkXdcsD%v2wMMnQs^Sp{x9;kI*Oh5 z@3m29&^ZD=fUsFJQE#^h+R$)u{j%7AK-^LA{TJLuJ}NO6vXKZ9KFgolHAU8SeerZg z2>SMtO~f(sJxAL)RN(V7PrqbYmOJFzj@PMhPT(_n00n<$Lg?xIF)?fBWoSQ6TE(LkSjzWj?THWz`pry1ohSou?&@% z`VVr)SJ$9HLIucX!!*3K$4vYb)Y_lAF23DTH)2#IE&a-jzunmN{WEu^XJ9;#a2sMq zUsr!ahz+)oW1VLGP9Z2C!oZW5CI{!{``Hr$wjK_9Y*^&=P&w5H0`CS$saGqNP@-RQ z9YqBmSm7+kkB-bBy07Y1i3k=_ZDOjA-oM^_aid>xxOMd!OD3tobt;On$IOU>kl` zsnoWbT>|e(tmyDW5L;47=}M0K%Tl#KX4ASB1iA>v*7hx z&5lEeS@!%f1=1l?+?wy=OO!T$vlHBQ@63JDs2^|^ATdRJL&{~(!Tyx5eJo)WXP|~6 zI-~Vy@fKUvy))u^y4Av+*|q1%2m5{l8%L_L1ePRd@yNMnR!muf_jj7zr+%h0MG6Yj zncPOO(BfdbN^6`x0w+;j070fIHv!=jZI(U(pTUH>P{A(sY{Tyj8L?rF@=niu)QZUW zvfiy1Q2KM0mN&mHlzPc(E>HG*y5(IXx49(<#2#%p?x}FtwD4u;YNZXIebA$1(P%eo zwh3%LBKo06(!pc2#FhGTg%1)}QXctlY_huntf}IIkT5@&r+QZX0eGD0VbF!z^Xo7^ z_a9Y%>b3eBxkc3a_Mx3p*DVUOT`%UB(_WtrJ%vRaxVzXP-i3U1h=C+`v~>wGqzZCa zxYuR)HClV${;lY0|2CNN^Hb@)i%Uc+F6hi5M%xm>t1K}1PZO}MjNc$bCCeo5Dzo3z zhy>vedNC#xKE=9irVz;e0am%SgMM2WqIIrDI=LPYe9MhPa&{%AB_{blBsgKUx@cr?2vfSwN zO%LNn2bT*wtAn-O8|vY&eT1ZHT(Q#}&44xK(CZ~NqETc8=0R1Y-|3)g?bfozzUlbL zhog%{KtMo5lpGfq7agsnp)or$@)8Ncl)qM2Ny=CibaGvmQswDp!p0D>&U1`wjLm(qW3eoKJzz3d`!>B#|M}lKR-VX)l(^H zY5#Z?I1YwP{mc&c_0CYdD1m+=-D6dCTXfMxCEqxa@e_{=yM_I^8hxkB%OgW|`3@9O z+~%F5DM-)-o~?d|Vtr=jqi@Q>!tli9mN7H*fH4kXj}WMiz-slP^N2@cuBH-?Y zF6vf8%ov!nbz!3L!QP&&2=(OnI90$41=}5ZOvR=_nX}R)_TdA1M15uDDXr9VL{spV zXp0E^9_Ntx(P+P#uV>7)m}y5P`ZPANnLkEOZ%nY2ySut_va{#w93JC-VF}a6pvZb} zK&aD}l$5lrXk%uUCobu?EYk+{bNMoG$Rc@#R^rhH=x0MvMk7?Y==Q{hi2A2Y8-r4) zimV6X$*+5zLy_Z*Y5Uyd!NX}?VYeCE8LF3b{vX3;FFYGZ4$@OfO*Ov~wrXs7anKeZ zv9j{Fm_t=8x%kM*$?56$+Wa5?)xk1h1JSG4@gAehT1@hrPE(xYNMc@fh^G1=Y}C7V zMf`wvT0^`1egE)_i~OUV-!{n?ecpb*eD@BSa-5V4Q&F*6w_;=Vwrjop^~}b0Af-mG zDRZ5!Do^Aax{7yYr2&msr2Os}L>UJTOggr3e2xA=XexoY67~(I>2Jum-My8L3z&49 ziOI-(PBsTKGhYGH*h)x68`VsJ24QjqTal>BFg2C(Yt|jG#l0Gj-4Cp|14`FHKW>(S z$-aInaM5++zW>X0HCxuNwrl z$LT4xZu57y&u#BjFgw+dp$Md1JyD%kJLG@+rj4(;&S$P#-T|LaEga*=vbKddi*tqiB6j^zWS7 zdLBn&JXQrRWKeDIl}uh8!(tJtARj#+PWM=3|PG?W4dIk9( zbwX=LPdqq@fV3p}GRr|%w?n>55X%Aqy`@5T%bn6Pf2{MS!)H@x@TFG?&_;>s(>y?;?Tv^+RWxNmT&}U~vh)zw1=`GXJi_zcvJKDrD z!#02TfyPB;Uf?}6SaLbMWB_5rwg^w@nQat|_{G&?O)5C~j)l2T6EpBp{LbrNCCw`$ z7G3i;1S}Hf^G1gJqDrd={O+k=2Jt2pJlyY1KMdQPlKgw%r}ahpE$t3@9a=1l@Wh_k zTJS);@=v}M2W_;TZo7u8P!ELF?alf3{eqKIKa$H1rzloLJbtf|@XCDfPfQf(?)sFU zmG>>+b!Xq{4M;6CF~9Q5BR#gxz&=lS{j45(7q`|dR1du^mDPo$NgWUPyg^Nlc-uZi z0!@@C)B( z7tr;hgv6GZ)?icai{=V8<|dP)5%6h{yh_!$zP*4B!sNcF9P#1Wk3SkM@cYWNDzTGN zmqzB!fc7WVt}H+J!J{3VvbS!i5BJQ(88U1k^yx}E@BJC*?*q@(*%tSNfS72od3$u0 znm-6bAwX8d6~7=!znl~13c;jw=o&1;RvD)+-bha44!;>ocM%Mf>n14X#TF+sIM`ms zMHT}PNrA@b)Pn`i9YBE)yawbQZ!4;T*Pj!Ns|-?XqC5=N+MOQPpKb3DM-9(ElS=*| zaNZ!=x@7e?-~$ZR1&WBBc+`oAV4&w z-o=}P_?x}EC!d*hnE_&nX?GOQgO)O%w)LL}uJMAr4W!RN{J*PlIfc<^$g(rVTo5vc7x)Ndt%j>_a71$1V<>*ULoF_zsx|3`vIbC?yd>nw{Cz=(bOI_v9tU zJ`GJio7%56Qw@s`EIIqA2Va(&L_sk1m_-J~vauw2ZV-IAX?sk@LQrf|{pA;O#eU8} zMK0uS?bM2sQM)+@h7RAs(BWXK4#2nTshmGYNMKkCXwQsex-wk;8|N4iWfTfHShF)O4-|fNt8_-xXaBhEeix7(5tn!Wg2_?SIW*r{ z&T?Hk>UtCq&VV5qgxVCp1LyQCjKeQ5)FofuZv`|?ACyI7NSwfy>PxUXAu7fc+^^Cu;+O53XgdYc#!*b6GgbBv$HGtg z)RPNyymm3hpaY7em=@Ly50RCSsC7Hp^nZ*AyK>d@>V8xmQ4OOLm!$9MRw{FHEP1v| zGPy@aEMdhf5jm7@_NCBODnp%(ZwA|A_G-p#7Mt9-nRAy%JEFcGCj})TW5~d>qbppzDQE#3Kfxw#i@9?#hb zsi@om+*1s_NE+R5GQG{NK9d~mudlD~>gvK{)@k&F9^V>V-4=y{Xv!Zwi7%#dYV1FR zN$Oee>xxpjh~H1$tcFfOTE<*MnO4nD8{om+Y{dU90mT-55A zn!Tm=UR+!k=oVAOnwVq)Mfv&9Y7XVWWu(}-ojE;C8pADR)Dr?d<99zL<`TJ;kno)2 ze2k`i`gSlUxpkZS;8kqS2_;qO*Sn$jZ+${RR7r+^O0GqkY_!1aJEdkb>Lyu?&Po1V z>X2w`E**Vh`A3PAEBbQS>yGL1qjbV_t)(8L>e!USK*}DVCAXZ|mi<_;r%(zFYw$AK z`ZDSk6+?aCGmbzI7DdW$vAS5g3f?M5AdcL^D}T?O=2B!gE469;`2lw%_I%LL4#_ZNUTE3si!X`>R=h zww=hLOQTb5AZUhQYP=*Zv^l-D){Qz`%-WiKgb`xOSXpt|2JZ)tfOCOVw$t3YO(HHL zX=z?#aBy+XWjCdq92bQHQ*SYhRw^Cx1Cr$r0R9wQPv9{ z%Db*H>yj`8M4%draMs%#3LKM~u1Ypq=VB^HNBWCxNfN{MkxfN#YqRlgu1&rRr#HY5 zMFSg|HZ0k)$ka#*%}jtG9urg8mM1qvIk~RY`5WwM4|4GMg74_HKX_W#WcR5i9|4cG zW9qN@_YadR*qZk!E#;@lLN#t53Kj{@HwX2Y%~^Fur+{S&rw`P_dQU`3)aVKe5`7Ng zj=Gbw^zk$Z@NcJgT%VUZpCv+zQy|3LymBS;!m!76ZHpZ&*n@WkvaO8zaL>S|8nsE+ zB=e`TayrESTT?}!H2KIUWPh9b0s)y8K8a-Ek=_fxrrg!uZ}c_S2@GY?k!xaGy&H3| z{U}ua-P5Gh)o`8;84#}t+TwUj8M|zUb-Hx2jtg59x>!zIo2KUTbhKg7toK)kZc!>g zU&yGJ0Ltb-E-dOIEIBl?ciuj2?tGXf<&|KIB;ZeMDb%a?Egj%o8#!*d`*=2)eyT{Q zgdX_sKyv+Imtn-{eZn<{{LTK$WNT81O-Qmnv^wmjk!pPE1}jyhUmm@u3J6a-5@#>0wx7(~ zM6i|NL}|Q$bPtvV%-9$6DbEmU2z@HZvuIl-IXA{Inch(B!74Q0kxgn)-;O|R$YFf4 zFFP@lfa@N^mC~muuNb%{y6c&pkAMTyIJ%dKu>}awlTd&JjQYndg zF9v?mBa2{?BN(UEQ_9bf4A*O7n0(k2>1%+QnI26QDQHV6O2W$W#8ymT9$~&!<=q4RDDmr^^X#h? zlwDFCAG2x*1pC}u9MUuz3+LFktxqA&emIO`k2N#fkk&3$ir-V)sf-{b>7@V(ibsLt zI5GK|M}Fk=K?7?tg+{D{35cs>D1PE0FbNRHiE(rPutcW+;M!8k-+41` zCl)hW_MI4PK$-g4i~WT^_OEeQZX2MA@zt7TOC)1TC76vH!3!CWj31O@Ly_zCWKt2H zCwlupq9P2iuRo*9SQ^Xn_ifS}#7O?NE*m%kn}sIUY)anG9sHq;;rH7LnKc4%l73|D zuV4TWa8jbF@Y5`=*Qt78z3Uj9@*z+X&J4zgnwx0#TIOuJk#zP(5+pW z2pQ+?-p}_m#=TGr&CJX!(P?sD>q3Y9WSyUMVft51Wev5-_3$FJKfXOW=i^ zz5O^fY{h@+#Um>4o_+FG2pwRPdHkRZ02Oq?*> zo%^9^BIrg&Muu967CEG(prGA@ghrW~jd$w|WHx6vdP2rN$5f;BTg)sw5-?8YuC8qb z*>MR!&0$Pc=GkE+<4{2PUzd)+cUGWdT-6N#Ixj6 z^6K&C8!%k~%=jp$*%s*Whx`8-+E)0BS@Mx$+w`sW|- zxox+krVc){9{EX_@Zo7nGy<3M)j`~oPlVW`=2%uf$%DBpn83Ym1BAUG|3`Tu&eU1$Zc-IAnJ(%zu4PVq(V3JE)GzwW%<7`J1X> z=^F=5&54Yc{=P{p_^uMRH+t7jOn+$>EbqxTh|owRiNcCB1|DWZzcOWmIyKdegUFOkq8XG^sTA>1)@tl5M6XC2S)Yk2|_$B zV%6^7SrnR%EE=wq<^MARcmX2-PeX@j+cb(q%Cq~i`3<7_YqOcMm0h2k4Y~8cq#$<2 z|1STZHzjmUr~4Sawq*La_sCA&{V1gWqfl^mFr(3hoWQg%m!u41WhpmCR4IjmAq6yK zT;`sa6uyDa(iUg_nL4sZ2rsq~3t^gh$CI`ZHsxgx6x|OTvnww|xaWK%pva=)V`3B{ z;*W}jE6SKT&PKZlDSIskU8^Vi1TP{JhN6EF!%kTsq#62v_!Q_}zr**>e>w_E5m@rH zxb`9Qu3eL$>Fsc89^^~-)%}Xi<2)*~-{w`aSk6g{W_i#A)Ntu()Xg~m@(V_iC$gO; zq8o)s#8J8xgAQGm^v%d^pM$*ME$j0+#lju;iBj9v&U{WLM6N##zf2?vAtYLFxY#C0 z(H{O2X%k?iq|9u@a=QI-wm9LsiGN8cVsD|Ff+YtbIL3yB!#eAk0)?dK$RoKIMO^k1 zD*b*} zj0|pH4+^2*(>FRD{9ZT6Xh1!mOciG1d(K&%;{D7&Rs|a6NkN&M>ApfVK&VDGZnJ+$ z7(k+bfkCX;+%{4hCWa_(zb%3cmz2^??xw zy1E|nVtCasKnS?>k3h(`%pm}ZVXSLgor*xJV_iPxBIJ{p9nSFgL3@*pG~THAounOHoZvRWPAFPQTlQe$w zR_Tii@!D<2yjAncz*Ot3yHbs7U<8PAdKW;mmE&3KBa`EX0gi6GxeAYYl;yL82i{=g zs^^d&M~=JZ`aOJSI@^Q)VNWK_FzX1a{=rK7Okvh^tWWZ;6^f?fG#Ht-1L5Y_r&zk@ z#HBp1ZEI#R>X z-9Q_be?WiD1Ep*&{sn~BSt8LtgTg%PSq24A?uc@7;A77|Zn8YbOS9cP;Z0U}YWL2X zJs$+kgw?;Gv0TsDJOcTJ;zV<)511-)qokP^*GACF^e*yUd%nvu;uVoasrYbZ)!*(P zV#*S+$$Xf|vbvb;dKsuOG2=HzFKWZ-yewZi{wXO7Q{33)GNSofZ~0@WyI-}1mX-IM z2={#|bq#Fc)bfjblW)KUaopmw*#ZNhXkUCToLdW>ukQEQQ>c;`8q!OBMm&bIix!AR zID=2E_-&5rBj!Fhp8PK|@{1UDi%K}}@m)>tdp93Z9WG+bv~`*`iAzkMQVrpz;!22C76=cq_q)=bpqbbnznw$bKGCW}tkQ75A3KoKj3Ua_FbOdY* z&&mEV6wIiNEh}F>3h7@he9a_@-B|yi3UKAYGkIj)kIx!Rzg#FSz7sBYWc|ZCPqOrF z!0igBiZI_Vf7rM$fJ1pSd(p3suh#vwYg?EX~BlMx$n!Q)lf*Mx8n{5GU z$xJOBAx&iL$XPZ^Tqr*I@X0WO67!G+reb2xN0N$WbR&vAn$)9=Fck;|J_J3>i0>RO zKP@h^&fn64D)9bW2EYuYJPFX_D`BkVVBmqZyzJF!+6ab>Xw`FLlPVJD$TEA>Q=Up$ z_|8z|=ZE!0s3}bQZE%eUTllfEpz2^UOxKHgTbKzGkqCd#Lw7_hg}+6nwfQUe!qV1I z=(S~g0{PuOoTjiOPSCpoFTh98zq&Vzcot2Yr9yr9; zp#YMVF=F`07t~-`-m-9@hKtzu*J(k0r6PPu^dm-0JW?lels^7v_C<$It;UUl&7^Nn z*A(_{6?-^s_B{RGeyZVwW3mwtD?R-R|6D=nNimvR`3Q23;~}m1XN;h#Ewp6L%=19b z*UADB1LTtGU&r{sO1F7D+08n8@|if?6fR-bwOmBJ-}2)GSlbXn%60uoH1YB<4-#0a zVR=*ThQ>!i<7eq(w&;Ck$4to#t8nz5wFE$2w$TYKkZFUrYA3SYEc63%cb-=U8^@hk zN_|H zKQ(1I%R-t{@aFN55$KgR`O($!QH)Q^BC`^?)&EnochQ^Gs|0b*7)r=k!KdIK;MpSF z2ZREN=gxS<)I33%?$7$Rwd19C_GlP_pAKd`yZZR!e-_|EryDPNP!EcB*Z>Os9h&BS zW^ip^6byYk3)0LEP2$<1j}lmPciXd5NBU&uTT$U!QQdt3VJfJ;2%Nl)c$P$Fz6F{ zUt(;5PRn6bDi1n!ZI^eE%X`^G1L4Q{7ZkzdJU4 zsJr5SPEtF5Q%`T{ZHtF@Vc(&hZq7f`fWFDws3ftS@=LVdi^(kYj}y_)-!fRS&obw; z(b4VhP8M=o&kKP;R{PB=E$nwd=8oh&*nu&iwrR3JU08DQ5@rt-t4iGiA@c z4;U2)i3M~ho>^D+!-v7);nJcaSdB6T2A4;Qctu4!Fy-^OC3STLDGLv5Wfb)E^wibW zfjsN%>XOFlxH56=t^Zgz%s3Jk zHSikl?5X9K3%dTUA3d{W$?@ChwQ--XJ}Z5b0Vu!m+wS>p3T7#@ejl{pSOr+ETSICFHB+;yKuFDgt`-lY5U<@Qg#1SW0 zRiKmV`%bF$5LfA2)u*$8d`0Zdg7Y3XvS#2ruz+*-JXXg4V5Z6N*mVa=LOP*pc&)sGW{ijp-$m%XUo zH#L5lit4kXGxL9(9d9Uc-Do&|YcmZuTS|m(nGqC|2Fe7r!c6u77VQyD8eD zzUL2l{u_FV8VwXj}| zLB(FiGW(I6d~Qp7I=^^F<1m$3cyjIEt=I@mY3A3GV^1nse$Nnd)zktmPQ2=NPDhy$ z87}_qYOQolUY8ex;%L7c{rZLLMr?AYzP|qG=;+SQ&c)$U_flE9d`kcJ%`_zFcvjSU z)j#f6rrL5qy9w=IwHLWoCjxsuP*q*%UA%Ri=-W2i*Xv%)CZ~U>@KTiI3J8Y2n^w8v zAR-{s)suv0H;fOOn=J9xrL9Y+*4jTN4C%F?gsLz-&PiqtNCyT3yy2HSZ$8Ux#c+D$ z9QW1oZA3;!0CzHQ^) z8=^;|?EN3E5uj23Y}j_kXgO!Vh@F8b1~Xz`VZ2VERyzPCtUM+|M~Vw;ov7y?9R zV3MnBFZWs*5PncRV`A9r)ok1*w48|{P1M`r^#^^3mHRGBYvPR zfR(`P1TTM3t1J^g2}<%DT{2vE`XuWM1_g;0R%Y;KsL24~i>?7dR3e;iR|6oG^yXzf zVCfBrd{Y|;e*r%Z%%rq5`M~QJfdbG)Yu3Lah$tjy!k>zzNS_>z*yWIRD{|u9aT!*s zeudayoyQA^xc9&c!2Gn(+-zNlEqJ|UZW;W2jN+x{gq~%JT*QpE1soQ^qq9+Wqkz8# zaE)CaBru3yjo)--`!)n*bU-pFT<}3YHzNW5&-A}@Y=ngW?aMATd=p&S&4s*19EJHM zcsY2mIc+alh{gP1^`mt4OPC-WQ^f8LxJRe{ZsoU-;w+$*!yW)|Z zBwtVpg6aKq)EDXx#RsVz0#N%O|EGn20ZZgNy>8tBgLgIlb0!>RIB~-RbA)9a0RJfG zkWsgbUYUeSpiz)>>$I|4PUYoUgmJNS9y-B_0kCV8ENgo&2Lx+zXpHb|Xq7tZEXOMo z;NVRDhA=TR$45ug)6meEJ^>Y69y=YowXYgB{8od7mV<@qo0+2`TD^lb);!)-ENpCS zOiTwrybcTuO!;MIXt;KFPY;^Vj5wq=5#v=A(2H_3rT0d+x$!r9{gb9Fxb2yA8u5vU zP$5B1Ox_n5fxGWGIUPbRG&}5%yy>q7VYY;d4HMyyW@a?WgR*jR!A_1UDwEg7jsP6d zB7v9b{SMS|>WA{a7d;}lA>t=bHlHGLANTwD`%_R*C@CrelO;Vp{Srl%zDh}6-sScv zg_4p|=Y4Ec6wFEB@M-z))Tr&gFz_^rg>s}&@6`Pe78Z_Y(w35yMHW+2Rt7%)s>;gc zCJgM|+rm4fi&1uP(QPcSJrpP<`x*H>&Mkr6|I?>WIy#Hj+TthqwY8VvMN4h{oZ6ZNBkZF4vsV}m|$Hq`lP!_Lm%`!JPHwC<|Xz1w?Y=OZuomjy0 z4Qyrw9&Oh;kF|gUKp@uS{${RAl0oS&hf-+O(DiHy=#9#pHs@j&0-E`E&1YagU%U`) zo*tEiRF{?6Iyn`WluS)b1PoqYx*8f9!Ztv8?iXXxCeRvnd|nNb3WgZhc&@bxonsuP zlZ&eOeB782GZtc>2!QKL;ENseGMY8#m5Xwq@4d+4JL6^mh!OO!{ny{z$+Aw_z#6%;3xss2>Azl?(%;UR(sNUJh*YsM; zD6N%su8#TVjouB^mIudAp&f(a2dkT8&35tO*knSWIKAQAY18K9=%^RD2)g)k<;bEU z_Oc|T+79mOEZQ}_vWb#9W-6{r8TJv z(ZPXKT$Tv0ocv~RV&4@}S*N}Yhpn60lonm3>AWoP&vEY-FQ1p_k4v~Qx2TA591xB5 zSy>c_7#J8RrGcObQJx1J0gVrifK^fRb2a3mkE)^5ho`LCdN9Fxd-sJq$Eyn9#7t;> zt=Kl=*gVN>Au;chzN$<8o8t{Z^g@%k+ zx5KAg=;=At2S#mN8Tp{q?B(Y;D>duL0|bdJ$v^xzf($gucx=24tAaikOx5y9{LZf| zPVR(2d7s-8bb8f87Zke*fI>U3D{OaR$A#`p9EM+1L^rb;d$S4eAM@>Jg+9Oc>wNWk z%$F(Dqb~JdMtyyKXJ= zW(-TEYPf4VoRr&b-ITX-a}*@{=s@S}>uq0{G}z!b3If^22@^n$D2)e4Avg|w(eHz| zDiJATb!z*~fXa3SfGjiIY>uMFjcVq3lW5GPvOwqAR1)LF(Mz8;poSW9zzwxAqScx=PrNs;MV36@WfU|D z*c@xh_#3gx;)EhBrT(A3n%TS}&Nlo6)Kd8W0H8+-7rg1S|1>+Hjk@q!o`a(q{$I0odkqQtAHh=Y|Zc#6v)%* zI)CVG09du)(f90&e|P!jaM?!+AX_CJOl47@4iQIz4j4bR?lpqX+HI_%geycYF?M3p zQ&YTe-k6!2gWWPRG6EacV`f(qQoJU#u`;VP;Dol-gQ*PO{yUZ}tkdj4funcLT?JSZ zJKisL-z69_W<06`&}8pph#w><3FtK^VY~ku_rN?6&zkMTKbkn2o71MNYHIS)(S7<| zZftLFe|~-r^HykhqKAHfI|Y7@-^zp~!URnKGkVKDdAnhF3VPApPc+Po&BPztVT6V! z#Avd77RbnJm(OIZg7DDP4G=tCwCI4SltJOS_N{!tOj~>5^Jh2{WR}4dTiD`NCJ@GD zg5DT@VC- zST<6gj301uC{I*vli;+k!dm6Fl_$RSn7sl549LGGFkAmUCMxhsPTd)l6ELKS@03^O zCiw6UePJVvYoZVfL&7ce&TLB79x^&DqS6FfF$)1>C7Tf0GAg4e&w{%O(Neh1Rx%>$ ziH}!CDXR%vQ^gA&s5IzAK}AKaPfSRdsW6pmoc0wF z3vJNf*-f3Hajn6dYL_`zKp@z0*_kc*6FXJDXHVM{KURGqrp6ZhvrQ1am9fB{D5bh7 z3*ut}^C7MTlYt%$&~fp|IYS%}8v;f*6Yp2lrx)}4rSmH`i0yu}_Qlt8-~NX2t-2ga zNlBGgRGgfgEPbo{w|9*_z2V-Jm)`U>#k70F0S zva+&aMpN`!WjonFAna5wcsI^Mw0(03GII~5g`!}!susVR^1KPx^R zYFcOsuu)}!se!7h@jf8nLUYvS6#9LNx~dqM8bA{-@F(Wx=Hg*vH@b+0UfEQDFn+k( zBRd%L*R;Lm_B6@=+nibZQJ(bqb9?a33ZOyu`r;6D|2ON3fgAGZwml-xd#;`35-wIU=D(dzEQJCV07fnJsn77 zfA;JdfT*C^#aLfaK>;Y1MzWxdBs>*XpgN4nczEU~>)&+_TYCA)plu{A4HIJh=~MfI z5RwTj505!vNuVfq>`&I#TJ8YL9%u|PoY>Y?wU+f5nfU4K?2aV1#1MI4NHqa0n(HyT zwYDpLvGggYiR6H-UXh96!5nan3a_dbS4a>YgV@f&Unq5EJ@P6Re_Z22ADIA z98$x)`Z`RWi;isw!G*jy;AYa zVdGgM)tEb4n`5e0e=%i?ep?GTE>88SoOXw5umw^gmCnUXk-llc6l_$T7?4Twvf%TI zqY44V@kIF^@}r^$2j|P5i`a&F6^BL>bfy=JUK{?UIG5rkB4tL!{lI&mztbtd72a~= zLWb^usV91z70#J`hVKXP@jzQkegG)neoCr!gE<{b`J-fQ1c@K%ZAe?s>}1&(Ng3!l zYQYEpGC|#E@V5Qj%KQMnW^P_bakHh^R2iYeOqq8$ttMQtf73UlocYzQ*7G_tXvf^~ zcmB)d*|q)OCQpPNSXV}4?D73;yu0$Q5l4ZLmC*!GjF6U}mB|)saBbD1 z1P5{EYs-Nik`M>yp>Zcz3;cHd*8Y-eTUrB?B8A%S0>m7sKwj~1Z=Ef%`-3kE;Pv9k z&DX3sxPG*j(5o<8fBvmO0u2(xOPTr~*XO^Ixw)6cWAU{*F04~E`FRJw4^hEPMsVrB z-H6Nc3v%E+B=_|HKgFb~JN>%xzmZvfic&qBau(I-s;cM>D$-nniqnqAXhAR6-G7u9 zB!M=5Eb$Hd^=tY+JOl_w(1G2Ij>#0vK#?2JhVlIKJugXVHrtK8H~TNXZ%23N2V6_G z=KMh*0O}rO7|3WCti2yZeE-Fd-%Izq6VfHaTe<{j#6@p+rJD?YBAt>`4`aES&11g+ z1!#9MlZqk|3GW77Y-ONzA#F6KsYb3}@lzwCDMsy4w6rn8ab?`GaH8jB4YS?b`$dMz z)mD-M}Ji*WyH>)nqi7Qr+5ET|4sEMjDZ;xQt9cPZI89ed$pZY4?T|le+W*Of~GLA0`r7b zX8$vpo-`;6{qy5<3=7(z&J%n*?)n7W_@Jw|x?=Dj7_&AFbjE=D$LztkVXV_0{VUtf z{(j9-%Uxfdf-}nl_h7M$dMnn@sVrBc!`aniaQBrTv{tu9d}kX@U6j1E3l)^~TX;b< zh|4Ez4Dzk4unW22{`Nw#_%P`3d|5HX{gmy0xl!vHfsh6IK$e}qefFO)Ps*1X92?dnyl15Ovy97C) zgc3@3w}f;{bLejIpmazJQUcO(=nm=buDj6pz2Cjxeja}a4|}h@*4%r|ImVb{bJfY+ zYzWYKRY)Ve*LNzNI|R^$1YVvCXq{j}xRS*KiJ0pQhFqRCkkK*OVX{t;L_hKaoeKz) zk#g+;8(6mY+RYq5^OeKYG|`KkL07a;8DH=zXmrp8eD1xj^tiu>TEMr4Yg^bjfT0q@ z=slo#IT|)p6a8bEpeZ94pyizDFW6)3q(+V~-jMqw5SO|Z6B5@QUty$i1o${y_d-On z87e(&O3*BS;{7~Y<8Nb0rP0|N1Qpso<-zaP4`JYDya*;7bfmEz}EOKs{TSDw*>i^P?s?Xt7d)Ks7v_f^g z1Tx#eZ0d`EeXr6UAbqa5vQov%WwDI_!aqprX%n z@e_9fm6(;|GHSSHw9-lVS|^5|D^<|f+1tW+C+6i;ZQ~^EWu)JXEX_U6C#|zpYzzOl z0>>VFgP0t}K>bOlK(T<^Pz=8pc}}X`yu1nrat3axX50Ubg>^kDqtm#%UO@vpV1Jeko@92m*GNxafac#T?0>nJz#zuV}{%r&4Z6BCI-eR&bor{_r1d(M2goQn!mEw5Zx_v-W?Lh>8QrFpwF;Ra+_#q0G|U%#+m z{|PSPRuzXHDJdzSb#-le=6D7J?p#?Ft>1mQj-LO$DS1Opd3g+$uZ|85pk-N8Pj4Q$ zR8>PuJZNl=Y_5$H3)L2`Z16vCe@s>2r-`grdtSjUB3MJq!h*rz27v^zc$=OBUn=!J zVIqLb`Tup!dMXBtsmSI~OwG)~#bH)yIW~Ju*$hAdZtx8CY@E=)YD6FznYEdR=Af?RzwUKiC67y)=<5&{UxbezOdVh4A zFSV*t2ZcBLp{x0@E!Fe1#jYPe=gZpK+h=R-rHFi556?+!7EZuD7L(jhxWXS2R+A$I z7d)%27Kz53If5n9)x_QZT1QdEsv^AVlutLU+k5$78Bn9IEroWD=C_Df?Za%E##TUD zN9{<Y+vI;<$s!Rn0<_W$negpCD=u>F!sJ zxB4ZB=eWwg8haF$HDteDsLxB&0bK{RqqNR%F&o!=iA9f~eY1-W5)pkHV+Ux`3aG4_8*vcg&ocFgEsa&oE9g-m^{DM}t~*#+b}TzN7C{|z zaW)WQRr?C&+as4w!^icVnQOz)UKUiW_lbL@`uZYft&~2od!?Uiw*=~~elJjE2%D^W zlK94U?|12Gx$1OMUjVvsASd2wGJCyjL&Jj(29RW$=VH*^%Zssmp!$617?koOy~oe3 z>dY9UEjwjYD!8!z$zzi>{?>rIR=SW8Kauz=#g5kFPr=l4ZqPG5oSLg3siG1=GUsf} z*eyx(iX0>kvQ*jw%fI^0oQ38gH;0u-Ywk;L!h2W|Jn~;ZGjBU_)JA#U5o$F^*{bT* zjW2H!e--t%G{!=u^#4&ioHhH#G^`oX?G^6ujX5+g_`op(L5sMTG1DeL^ zA0Xo+Biq0+0hs&9C9>dVO=^!$j2;zNh?>%FAqRaM`0hvFYniMM;KZYy_c5s)9r5o* zD}S&1WDDQJi1dO6sgw7qYW@J%c#3}IiL3xU|F-D8vsCo>K`Fv&ItVZl%esfZ<6K9b zvw(W^Rg2%(qX5<=Xn*PQoSPTRXK3C0Mr5y*5=qg|?oqXM7zeRV)OVVRRpRzkBX0ZO z>4M>jS+AM0343i4Z$2_Sibc`WDw^1pI+F8tNxO`AdOpsY*}H5UcM()kbegHSyk)NU zp&NKSWiKEk*1Njw!5u-?9rw_A5FR)k0dn&PUM;JOo%Ov25@{Szi`6l^?ZPr0BV=Q;B&+n- zrgA&^=P%`V!q%u1oo;BEFFZOj$%w?~qrLOmQqtke_lK4H^)ehI*USPDlkmX$bX^Cp z)u|F8oW%BCj{s9i;=zrq;MOQcf~X8*j2(%J)3jNdJ#rn1U`|OXtJX&9ywl%}LS4s2 zN|5;A9F|UzpI^3Lzbj8|0a*1k86AmLE#vA;|*qz4? zKv-B`{WE{omu!bR=OTvB=LheXnOx3BpJX-T+G?S?u^QR^Ob{F3#qS;zJK+frOF`ce zrE}G@@VD&MrJjT(X?Id!O=@p03%2)$+0oY{#feezlj?GOUUfpB#_w98f75{=5X7yQr>oF9y} z5_;1=ZyRp`VH92dMsNhi++pinr{*5uvIUQ`EfIfGG11KqCiRh&|5F*SY%;2fwwufU^Q_3GmcJ{VzQZuV|t~^n(DX;wk)@wfc zIIVa}$(4Yr7BhZt3su2mQUC5&+{O%(8RBd3Ai3K1-#ymomljyzkFlx-`UP5ODQD z-XfM(=7^I(IEuK;Ns4)n42jdIpZRnan8QbcIeLkyth-J2GS-%J^sOXbV&%QTmSTp~ zVTzG&ACRi9uL9>OkSf0tv0gTp;Crx73;)L+53%g0E?)#4{BErCg&%Xbg7%aw?@Nnj zUqpgo-~$DLG_enl@3|ZU2PhXv;?FZvP*ts;nSy3}-zLBe^Zu4cnNtAvMu6#EHNN@( zfzCWkh!jGhgL)TO2`}>Q_)S-Dj77*MbtE2-DjG4fS6@g}eMpDgOoYiOYJ zviJd*_*B-?5~e{y`O|xloZY6$$OcwgV$5@sj*|$;*1oH|)BXo61)DcZqIas=T;qcw z(9Wll+&nQK0vVDz6L^%Rxc=Hkk>kg5LMEl?6UK10y?A-bVwLEY81Ke@ZhqG$tA;C9z(<`o?;BYwpR~ zr|HE-^%RbTMZ=>+LGqYono8);lHfBS#Ut)%tH}Qpp8Emut&NT$4`=G}2uUA{sHl#| z`>$D;kfj53Y80QHM4JvNLakaUEXth5xz(uauA^CXLLA4;t5+kxD(EONW0tllBA6x~ z^5D{nRBN2hm`>E+&9E|3iwtG^4mH0;c;g@t)>zkQ*vkxT~b_VNu&n{~Xd>E~Y)J$DLTXmHl zjBwRRfLDw?F|_FTIOo|uP@DCzu=Y?tep79$&D@YDl;j43%e~)J58v_8K>BwFc7HwN zQ!)*mtwyzOB}f!% z5SQ_L!1?L@DBIS}vf!3_-KtkudEaVeN4e@E_uMTFdGruosF+9lEcufo8+yJ?D=x%= zg~I6#kGcEJtJqBnyiiq#wQ{a+`aAfv&|yj@lecemn;p`T<(Be3a2m(oC?gvUdrR4@ zH(=e5x|n&Mw7~1R%8u}7%XwPD#`%j0Q(n%gUdy4*{3BPy2eYLq7(86O{M)lWj{1@N zHeNU{PplyKtM(G=Jw~FFl*ILJ9Go$FZ4YTDt|7J@8>TT)p;eg<_HoeC?0riR*{l7nFz=fs7!Vi9gMSY?t>T+oB zGe6{4rV!O0C(VdvK6|5lEp1b07p~ZWZ~0+IF&zsZ7q*ws_2%jLgWS;vUk~osm>6S5 zHVx~pogAvxE?Dwa-FIhxFsqfU)53a~3b6H5^iy}Izu=cll@dE#Bj@}_fFLW|+223r z05P&`pGz>TKSSHJHIKI{wiIL*5b#l0QnZNZ*H?nOG^t-)P;tI0%-{_0&e8vuKV&jQ{&}; zO-`NI^SS2*KT?z3zy>A4@~i7(3|wFoxDSJR4AgE52BcY%va)D@%7=5`pvGD>wAkb7 zX#-pvl(@ix>c@{Cz$hWFpkPQ7l9b5oZ!X_dgeM*UlGb8q7B4W18aKmpe0*GuDFO2G zB|RhK+RDnt6BzIy#_?QPUS6Oi1VRw``v%B(`7KOyEI`&^V|Q><@0J1yA|M?;=!uY9 zu)_9|dbq;~9I>@D*g|uZX5M@;o&+QYCK?(-(tcSUa9Esp_x`;zxU-_7qRPrj;90pf zUfK~I+VbH|M0oh8rMX5g@)tb2Rkcf&30DPNLO1Z4)>8=9-*d4&y%vYG4zw^+81l3_ znV3AyjPbKb_c1kPpe@%jHsts$NJodvKYo3E4aloDVsHv>m>gxd-KL@7^MUQs;14jgpFg#G5HXp(pEYUZ zC3{Th_6#WrvN};Rd?)3!OmF*r%AG4ccZ>Tvn7WV?f7^`xQCQd5BOVd$f zT2AjS%Qk&!lf!tV_L){bE-4ukG5K=uB<%SUte-)zyjfm;b22}R`F!Vg>y~j7Uz?>Z z+iBDUw4?K+u`#^4v&?afYs{;Z)r%WK1ut6Ztz0h99No`&wW`|VC?x+_OLoOLmfL6b zx(`?u{T+iOz7wbejg9*(0!htx>R1)Q$U9Wl>wey)1y@I>LLz17G>q;6z$WEIvNgtl@O)_|s#@{|dh_0qaH3Z?8m~ z)1mw24&_PwcQrMpsD?riCzp&dvScTE;-NH@o>h4;yUAuA#Z{}8w zrY(vhL*Q-ZT;*ozaJt<7PwI6ar%&gvpM0sF=nr4{1eqND-bXCttW(Qe#`dsLd#{3% zI*vpET#^nVjtk3}iA;`4dEdd;GS6yuxT(DGore*;Z@W63BH_C)9#ZwHb2U#ncYmQu zAi*SxI(Nmw^4i^eTEunUN?O5BS!i?>YZ2ZkP8Jm~%6ODSh30WL{Lp;RJ0V!vkCgeW z+WhPIxWo2fXymi^^A!?HJ*UA;5zb(LLnVHrYkK=$+@R_Hg-yz-`2gze7+tj@5d2U$ zN1vyb#n`E4{IYk}!7%fxtE7~~aepT?57OL|RJ_$V=7)mmaVGO=FkGh&CS!nTwr5U| z%;CZ+Tep}6z03@pf|p$GXfk?9VI(KGRYj3LzTer5jlUT1bWSLjV&*1qc)$XIm(MDW zoN&nz$^z(z&HN1Zvdy``X!w(ukPO@-CPLTPDl7#U@2**kkd7P25A|>k_Q!q*_ne@SFe9vo+U@MnjiYl0NcEy%oPkvLv{rjIiU0A4p|lNcbi#uC3MX zcRHxNJT$XubZ=!`;k+a;q9GdYDTuWGKtiJ~+)t@tuF;$iQb@4_78UI7K0Z8@BeuK48;*^H$=*vd~W9<;!KTl;<@I3P1|AwHg2`& z)mr=HeS>pFlAEG9mCJ54h(A08)0x_#ej@x(mHqZ;)8Ky z){co!OpwqDLsb6C)%sKVz*PkB{ zu7At!GuhgvO*@jdr+G$dTpX`)HlCMNW)H%nYTkIN^f0+%GRfQQoM*`v36T_J3r!Ch z!9yfEw|qChOYknM>FGTF^QIU&8UL3P^dpQC_#Nyc=h-ISk{oco(f<1>N1whpU(@r|fx7)utxeEDMkq0J}<{83_5dH(Q^3W48)!Kl9tHE00@QL{|x% zQjpbwk`3S}RFmFIheoYr8xuda8tME}BW-8499K0>Kgq0fxa9W`MWLvwZq}T>;5+w} zfs&nV>19tjz6i8+UIp*xOOQsOeZ5F?18*O$6X%<6hKKs(8Sh}CWFp#!E0SxXO{lgP z?c)pYU;|B>bH0$bqUS-skC5PRKO3~7|4g0SF=_2_DIkBVmOnp*I=PVObHO~)>zQ|| z{Wd!dw4*#gMb&Hc6a)u5`yTLQyuV5WQQEg;kqO9bKAbWIJFaMcr*(*V#<_{#fk(V@ zqFv_@9?u!~ZAYB4+ri(Kp!aoc5rYrmzEFq)9oScy-n%&+%ei7L|OxMQUDNf5Ub3i(dxY`-xMY)}RX( z59{GevvPS3_oO$}?6AA?#jG_GO$RWMD3B}w#4|BHhW7XMJ^dS9oKXze)wra*Q*<2e zvusp}nU@!yTi7-l?jsL`b6l>8Y8TgxbaXd&H;DauU3!DPc}5Mc3^r(q&#Kugv;J^I z5u#MQJv0v~Qc=6wL#@ zOg5|0G_R27r%SN9-QoU9JrNzmNS~yc->Mq+e7=mpnD$_b-0Q7N_*$2mJ4wCuiSkYo zc6lPiy%fBxEuwVhJ%QJ| zGkp*y zRb(TUDH!#&rO0N^4@VO%j9qEdpTflA^L}tc$9Z-mG7I0XxZeXZ)Y5ms!IUoN5589> z`S3tKvgJc}G^gcL#5|r`4V!GyByQBr*4N~xST=dqBtj-+sf9bFnZ+TA73_%eNk96LSzEK4Cn55QL2 z3|u}FYtvt(drD?8l7*ahg*=bxjWwZ9dYB_C9Mi4EqrNIFysyhXv~Y8Okc>SFQNQwfu$=1j_3!EDRU3|MkI>%l zB<+oTjj`i*UwElLbz7J)i{Xwz&Wo5Ok;`_OCUT>y^;?GDeF%f~$7e}?y89q%WfM;o znaHfy01u;p>ftOF2%{hRCkrt~F%$dYdM;9QUt`m}Pf&}`ck^CxGfWN`S#VhOWJ@Gs zy)K#Yd}}#0KjYn4v8nX=!&MB<9NJ@KPt<{DzfO%7Nef!J*A1X8WjuHjE5W9{T7pw! znN^{b8Vo{`E(JG-vzla!$XeAcOLW3A^rsonU(d4?nanPNO3w^GDOr*Rir>tC{TuiY zkbx8iOFv}>!TutjcM9ewMPFEbQFc7fDINCVaA3bVr<-jSqjBB~N-XTkw;}Jq%cZfX zmt+4-@SwtN+dNoNho1sNgeb=A_4|GYF}-CW%W_dU^0SQ^=lMs8kmBN|U^LYI&|zhm z?l+?Me3rrdt46`4@9j)1c;*_Va#@2vMT_C;>^oo^jj>F<^y*QXf22--tF46L*Tnl5@850GAc<0e()w!`uOa*(Io2o% zeT358W4|*&%S-i0iLz>QuKiS;u8x#!RC(b<>_#N#z>}Z)`xK+dG<#$ETBXJKZO6Q1 zv?D!jZe&IQpK|yIh);W>rO;)&GXGUz$&_PPfbGBK4`gHl%YR2d7NV-iJi9|&d}9h5 zkA_#5o^Xj$YMlmPe#z&b<4UV=_h2=dxkJ;dZm&;7(m#gmMdzNR$lGh)E@w&6ymk#*Do2PX_FD+^R`@^{3_d^r9Wml;QjNfileRU^P9ZLWAr9jGAH|L&h zG#*%wG)j5t&@k}ZxA&5-_Q>$!vy!tVHb_-(*Ly}q=Sind+ZFAw%5}@x)m7Tvb{T2@ zlZkCXQ9jqCcNMZXTE)ao;~sGi^DJs{*}xBx4W^~!#B~}g02IdC`06#`_bR;=PLOi) zc(vPZ@7>LIbkByqEaC+&E?#2VOU!|kymUWI!gRnTNHfw=y-ez{C+r$vKmje~TUq6&87FJDf_^+x_NS3SZ>@jL zRGB-rij%%>DEgbQw~FG97R9JtC1P(Xvk?;949B)EWU9~bf7BgrL};(6FOzj!e9_uu^aJK-4u zgBK(?*+)9MxWzUYoyJxavL8b3p(wBhxZJ659HgmCJ9zn$9E$t`i=nddY1NaS9^}Z^ zP}3Av&5Z`ZPIpi?O{P&6Kvx?i05qp>gO#?;<6X_HC)lFn_Z9aOHmNvz==X~Ev^+#u zBsG{g8nQM9(jgBIk27e%mPv9`#OV%dq(xd}J*?fChTsn?Y6)~?q^BP%!JJy^JB*(Ac;qW(2vwV?>XT=DH*Ml<0EvHm7W)8@5LC#zOMG?XkLnnSZ zcFsU!D9xaS>nw?QBNsLwv_%C$~V?rsjM5$9>oKNUcC|neC$ri0Jh|6I>GR{_=q3> zauvZ!YvEnz`qL|aun1dLg4JiuF_)t5KW?A5W@}hs(I`U*Pw_&SU@v?;gU}j4Z&0re zGt0ulpHL(fqMJ@1_M@3Q{?pyy|9d@X6BQ$@~e58Fh`>-Mc>d+s@R!F3R_}TO0*qD(9asO}$NeGY*++ zUIT;9*0{&5#uvnRhq5^kHP`Emp7FRZb~$aGwhTEYbG=NROwan5^L_ET>{z^?v6p%Z zIp)AgA~@%tmvpg|bTz)gOZhNb9#%*t=BW-8w;CE6pf^i|Em`X|mK0v!lOJBC-_Bn= zT$Kf7$tdjeYJOauHPQd!{3S1M{r2X{#KeU9L`CmWJ{9bU>X|LCKnj2r{ppEjS(UVTrIVvm(!eLq zl9EuBk%vPFQb!snRij&@U-HQ)J5tU4aAag;%q=U!fn#voI7w|>atf7JGQec*878_N zUVXuBvgD7oWnf^KtNVz~rmn0!G;*t^`2yU)C8lFFu+l~yVbd=Z zg3c(f`X5X)r_jSg^s2bm*fRtj8AL=x z!1ORPqYv0C^qFTkB)7EEN1|uA+)>spmEDcs*>8awP>p$PT=${L+eY1n0EUE0%dcnM zR==;jq&jQJy&?k`SF?mZKF&S{p1S|~;o92Pp&s`@V~Rd`$_u=R^LN4&Lq$+FreW4# zPAUeCrk>zEcT`=E-5+a_ft?e(r>)9qQ9Ck`DaL2*NO#OBof`MxrVP zz=twq zTpjmK_=dt>%O*0iH-=-f2^tw0Nkj#+o4>~~!7S`M{arTwBd$x`6kEaG&jzto^_~HO z4OgkfQ-8XK=5Lm(E|TJ=8<5S|wXQa|8lsm7ab^KbIWc=!IN#NYQDvffGA6ugdli{c z>!elsF)ezTEAhT~q1l_hb{h2AGT8V@Xq(CvV|0?uIwgY+iZkzpJv3uVLB_3-cRs^@ z_?>z+S7?y0sW4$jdY%`((8hPq-gr7OOz74s{GGGWpHMayFlD4Xs(hpc110!}`2lW5~ z1*^}>wb2KMuTxJbQs`Rv+j`eWklZ9o{>YbMD$?DL;}unh`F3JL0&k)Rp~xVZ5P_HF z?)3n^fz+TR~Sj?shfHYo+M94DqV$+w^48j9B)j z7}))M)z0|k;4o`@Fd{G!p zH@ls(ml5^S>4hqG$90Gb;ak6X8ZME{J-jMde?wNg>w8BmH;Yrzdua-I1uBuRbmHY( z7m1_;dtdCzKJ4gLdt6&8b+BEA1XW)X{b5%TxakcnVKkDvi)413+_@U2GWc@-CGxzqr=GdqqOm0j_4x1c z@~&&5TUC_ao!6%wQXk6rcBV={oQ<--==4oU>cw9mfmu{ZVGZ>dMUdcwPTL+YH|LY!|X zJ=DOqamg4eVVW3AD1Na&tHVgRC+U_R`<%i-nB-V|KzyJOR1sJ;ruB~R?XmBQ6P^Gf#J< zx>aC$>sI`rA@4N&dde}=;AULO2t;?XwVQl3i0&IL2BZd~=Nh}abiHm%tQFF>ncFyc zGBCN**CtdG{q8R3>bmW2xpq;y-^88qiLLV@u=!4mXtc+%3Iqao*f{ZWA+ zcc8sAb^b?>{xJ}D=Dx|g{S(gq_%7vbMzLH5tJ3jS@l}_^t5L}IYWh9o-fpxMzi#@FdRoKt@(M=bvrcu}Da%spvJ=wqO z(@uJ_5hc$tHOl-T^kZ=AoV$y_myP$u)9P^A>zCcJE4Rd&+x<^?0|eqP@$<+_mD&cfJAt@al)b} zMdTfXyKmOv3*blo9E#5m-<8P7eH{{XTyd=X-yr>B=;avWL474r38$v63|PpmqB z3Z3VVY^tQDn22FzC&^8*nE>rPK+*FkHOr(9GUF;-Dx=|6o^grW=%x=zV1a^zS6OTv zpk{gwoAjUn6f5Y6@;RGzA%#Uip`h2vDefOjLsp|Zs>N0LuV_56Bs#kVl(A@Rq^FcY949dd=&U%(w9%4q(EYjk&zJ!PwiY^c5d_Rtu%{;V+!!1(V1bV zf-Jd`=n=LH`EA$2K>Gj+?Cxg6*2&`I(r8o!R`IrdHy#*Co!C@2;zISL9u`(2R=2#%CddJ%6>H>vilgC)o9 zl^j;vUZr~xmFI7|qvp1+`AqOF*F#mEYVmlpdzD9h(Z0x8+=r2a3LW^;@`bHHSZ8ZX zdmJ&~Ox^&qM&7C=W7a}%>VI@o>2ZDmQAvZQqW3RrK*8-g^p%7E(w&F$Ksc)ZqUipFX+h zKgJ6`d*GM7jYu^!L!_*nG!{V6sx;z{&}S#w8kj7_Gxa4QQ#s9={)riyCT{u{dLh6A zrJKcX=%4ezQoo6pz*>F1BjfmadSZU{j?$dyY1V%V-n>YZcj$B>t8g~F?JJ3ZQ7_gGT2AZo;q%Fo zUr*7!EiF6pPm%Kj;RS)b9|6}0i3@>mwi6+qv(YNhz$Ocj!c$++($|rYJQ|PjGmYou z;=S8PU(?hyB9Pig+do)PYm#Aq3h`;^)3b5P?>1L$lcl_R2-AcZ^)|DGxA9-cwJ;y5 z4D%>fQOUAG-?J9w|BZx~(6RGsjWlARvZNJ$)K!G1k`S#(#|7Mjkf=34l(F~>I1J}o zD{=+|Sd+d`v4R>(Fiv#P+Ma&N#e>-kEEWrVK<1hDbT0aXYd?EA{Rt|e2^)PELt{M!#LMT8>P5+6V}PS6R^PXOfA6E%FD>>>-^W>A9P|8lj-!0y z@a(^TlSn&y*k%vP`{TLHO*PM-yo+g`znB2DWJNWQ$n6%z+Um2^(YjM-6{r?4q#Qb> zqC$oYYuZQ7mri|L==`7cia3R>q1@3zJOX^l;59&Vb0S*heiIL|F{sk{E66cC9)_3S zWdG7jG7sHWbIZF8s%E=a<9O>$`ToM(1#$mZy7MZ>ED)~ba+~1*ZlBH(K*;~l~j<3oq6N_O^{UQ9V@mxO)yQUX{4{+98Es0DNxLu zBnT(usw_K=6K3R;hlvlo1UykXAW&#&X~DOx(1cR}vNxA?+KTm8JLo zebAL{92c3r(C!1UUjSzT&^KCJYJ@?8p@Yk{lg@yksjfbez^sOckB=y{fHjDZ593?` zUpVpIh!Do0rRx=XU=m|&SeBRfZ07v|LVRrOuXJogNfqwDXGDW=adXGT#o2rKo()$~ z4{IV1`c?sz&2)|3zkxD9Dpn%FCS`P-{1HsT0bW<--2Tib=0JU-%%qGA;DA0M(vKQ! zkj>-X7J=BXgyq*VNJul>Z-I0tFDF;;xTXle1VB-$bZ|!Wnp2V*%p`vV5CSzXrIBN2 zn|`fwt*-u4TJyGQSAQ$YH7WLW^qL39T5sPXj5B7@3~PS!`6YL-94t%l8F@F;5N8$# za%4`{f~$z2Lg82%FOLz#9JqC|zbJ)Mks=x;jNyL&N4FM8dVw9k?6TNClXsfa`nmRfI{3DV0NI}h3lSn1_yv>?hk^As=ZKNm%hx|qU%V_)@3xO~~p28*7T~c;t-@B+gHHjab9Y0Na$`(7v$wG`1 zJs3uY289p(z}qSbdmE$I-q=6GxAwHYjR5Ix`nraWWdvCT5j+#PCruF7S)l*7pFoX z-nq2dFh4g1MT;5=LuvuXn|lG6Ux=4VgVMkLtq?Lv9$vOm&CaKJ+qaJQQY#iW_ZeN8 zkWRD$i#*;aqc9U0-wPNIrvS;au{2}C?|ber>NM1A%SMM%kP4EI8W^hzIH#LSRJY+4 zzYd||fF^z*3$o*3>IxlYW>vz^hx3{}NwImPK=bTw0bf>G3Arozvo7IjoQl2>iFkDU zU&glAHIMKA43|#fwjH!5hU#)tFv4J2h9$rP-fOQG^gHi8pBapV#M2(h${8EV-e1d+ zG1O7QdGo7TG|WLcPv;LQjKkXNtf7|1HL zzt#P<=2)*f=D6pTT2aiFV!a9XtjCH-)!b))@_#ep!4s|559n zNOeP{Lb2cL!a#mT2yZ;KeI>PhJT-4_*1U1uj`QOpjhZ6C8Y_*OKHW0sPX-lJXb_8B zTI1}KMB38MxSWT*@*y=VJ=0dolw;)Ti^ZZpCNy+{dpC%$j1FC9c%G2sO&*9>$iSIt zkoJy|r{#V$H~Y$uQjkH;I+?0kNul)t#akwJrXBY)QLXu);vbb09tA{~Jv2j;*8X5w z`kHstncU7d9S?>BCCGCEmGWuJdXQEdTYEW)$a0qTRyCGJ^%AYU@tKxl=5Ag+fbG~+%X@OBd}v8*Va`0+WS`TZ@j9Fok#CHCR zhXQw^DX7Brxw=~m^3AWgPs@%RMd>db#ld@=uP)seseH49&_qu`1U*2d49O3`g#rv~ zSq+l`^A83(vd1I+b*3&8-iOVGymC%!Kbj}Y=vnjO79d<)?vN}V4vBU%(|rAUP+c8S z^&2n1{OVwOo86$VC|s;3qHnX+Avtm}u&#IoPWNe-r6HGoTVMvQ(DbXLkRt zgryx1%#%I%1wXwS9C6vGKJWSyTu!3dXo+ zPWFgs^KCltIrEklFOob<-c*%j;QcKZygk~XyfMs2{KOna=5_smXkm9JRoCpX8)nBZ zzAnVC#dp>H->;76Zp%B>Vu@65-np?{R=wYa=n&sa4aOu9P^KKf1FHD=nM-w)#?TM(akFnnXKavR0-K2h`2K89#=o%a+EA7oQ)HumQv6*xm*?BodB-xq7X<*;B1LpS^VTh2|@`}|&~KfUhyIU$mTDYq63 zpdTWwK7OPa*_pyks-)R3NsG{CV?A-}`}m@=W_Q;b)WqeZTa-*J60A~cFlyX?YnL!A zhW6-9hD%h^(s);4KFv(KEg+z^d%k#Cd%kq81%LCFNr?T{2eFUe^AwkzpQyfPy#4I6 z@?w+LrpxMS(sRx?^rcTgHq9;dAdJ@uiV)ZK8?l|!mVq~-0!TNnA(ZXDt1IbOqm%Qc zOqsz8_cZIrqk};-defnG+%zO>Ts~}hXBH-oZ>KmpRU6$Yf3;O=o()Q|Y>9@MN(W;h zmsaP@S0!3UmNev{M+|lS=TrjiFiX2(OKp%4h#2OpGsdsk(iw!qUhePuiD(A52jXWE zY&7~&>C#!s-MUw8M>cvp_7D#kKtN)vWtbeS)9yIrZlK+T!~ruGrm|Ob{cV(_-TnAa%!xBdxRgzI=o6L({ z*$=shG#iC>3f2!tBQ~n zJJih7ET__GdS_uj9p_r~e3PTmohlGGGXc#F6ldh7y0^X8of5)&sU8^`z%Hf3IJ4b%KMq`S!Sr^Q7 zx?*AuCTPf0Zf6KUqKhjo&p7FC-iXudKBNcO%FXJMai75J!j*n$z1NqU<7tYI<5MZV zUyEh053+ANORtx<{w@kgv$iz(Y^>ZHrtP2*b{Y5Be3T!bg&{pKAGrM;~altX4tPgPgq#mZoGQL#Jtr?G6C8M zlZJD0GG30;>)*0>-!$9}>UwpcA`HN7v*^{P*thr72b2YoPPwBksw8)YQATu zsU69MCd@j^<9yY^vzn({6poTX?HA)xzl+y<1`pWde@1_Efo|2&)?BCV+InxdCagT? zy7Aa{yfRlrbB-GcOZ_4J~ z@-RUO$&o?w+gw;->u~Od4NJxs5iCwhjeL5T>0-5u7SjqHdTxl^_vt5p_Fg<5p74FO zcc3%o=KhopgfEFIpNZ7s`U5o~YJvhrF4$+K4hBKGw(?IO(`waWElAf?e?$A*yN;n2 z(m;hWbB;y@d9!vxlw2uYaL)5*12cP$%b9)(H_7*an;sJEXalt^>>?5NyB|WL}-E`N>7;RftA?a;etBs zH$BR{4oJ)rV0m>gC}1qI)~ua=JO6b=7p@kJH9M0mm5+)d!3XpgXNltFW+F3wK7S`) z-&Ny+){iT%^fXgzPK6ICc+!j+ae*I6eKo>THxeOoZe~3W0;Tq=)Of z?1N@odr6j~3RfO_Y&Uh;IJ*>=GLaH+W+=+Iam`i_q~v|@EKS_Srq?%}E^u&$B-1~! zsqvCXpC^__7mT{>Z5z`c{ndO(Wb^1?jeZUvzG98sPhFEz7grh7^#(pP6Azcj5J|53|Qg4OF+r^7*s^0p1#kN-dC&KI@l9`B6 zAr5f3E!ODnM(ua~py*1&dZ)EBxSLqW3w_|`{BFJyk>{FAL)|6W>%OrxS0!7BON z4dq>>7P4M1E09mmKq8PB6=Xs-VUaU(gxd}jO)r9ypu+r^nT~|Ro8=F0k`$WmlvzW= zHkpo1rl}YoD~WNIqs#k!bLcR1NMgx*Hw4kOUHPe4^VRb}hJM;rp1-DESiR2dt-Kw_ z73>!u%6gUV2=$)-fk$*+rEO*(n0q||O?=D;a4`y}l%ADln=-{J*lH_>c3$;0vf?Np zf^~{1IH(BO??qrs4yn3Wmaf0GE&QC@@}~0ao-E)|P|F^_Evj{ZK98^z3)Cn;{Xr=x@Q< zT-f*~ROV8&?kNV~1Qc}4@+@FuE-KxNWqQblO%o>a%Cp|aigs(N@3HDTGLHZ2p~5QT z%mO-L@WwpTF`M$`gGn|s+K_xkL`2|^weOkTd1*-?I~I+|?{Ekp62lQC6v*A{%KCXa zLOT9gV%m;tx{8fC2LB&ov8y<41wb-*8!*}{(uk}L$1;~S`(gc;14|vVN(fDoNrykQ ziLTYYjd*T%h*5{Rg}~Y2uslwDrV=RuD4o#`?ijWIb7^tIw-zu{NI3H$VOZ~rXKRRk&RV`#)bZ8hg?!8>#p$A`x1ebvX}uI=T{7MENX$Y zzLKBx-m>$_27~{G<-)QG|2^PyZcixSFE?Xa=#N_6m{(euu6JZclBFp9VhPjUnmWQC z_E$N!ZaSY;P(le!Q5n~%f4(9nb}(Ko|aHdh&-O?nW$q`Zbhjs z!YmP0pGsd???}CgW{CTZFTMt1%eEftu9nXR@7+q@hgwzu@SgRy3V0p7=(@Pim<2_3 za1F|U;*5Fi9i^pvS=$p&=8I+o&w?s!dhhXe|8Z6vP=cYszImDPJB5*lAxh9uen|!O z^be%6vnaO*5AyuWbvpvi95?fK=4|LIlLTJ$_brH9Q&foSuwuixS$+uPo&!1ZE`4$n%!Kx z7I&0kQPV2@n*HG{4La2IJslIEETy z*2icT-#&@rKSwlE`!RlhTY=nPu+JwmXFcx+AeGK1Vw`X|GX^SH=sYtD#;G#CFnU9L z+N>y;Ng|%FH|zI%|4r%S10aI(UqqcpON@MTYPl}%nI)0NV2XiM^(#YThvbd7`CC-- zB?87uO~;0;<&u8{1rSFdej+v${yFyPh$Tpgs`ECRNf#U|IJn2bsaObZ|Jjj1P0x(Z zqOV{{&$H5y$?IRybGV@#K5G7Q+AEFWijCk7YwQUa*(Xcw9#fWQS#L^5n63PC4JrA` zZ9p1SaDnYC<%mOTEsM;iD`tE~I^q4s8bdQ7I1%DtXo1Rd{~W~}tMHIbA-J_pBkH0i zk=T(2?7Uw%HjjF(ckfV=F7Z4lvfLE_!RC+b>>7TZPY{!hQM>mQ&1yWp|IzCdR`-2s z2rZ#!ws41RluFA7A^Q+jh7_*J8ACA3zEvTBjX&&tq`14nnVe&8$LBLR}C4%CYpSz5FUm_z9+8sB1LUU2Y;$v#lK2djIH@_szJx_x*QVT&&w zxQ6Qqt!`f*v5rm5havfeuaiUOR&C!{2GGAXH$Y#Iv;lwtSCECj3PZj0*htQS9+uf* z;^W7(%8u03a0MA=-~1|U-h51a-Yn5IFak;ZNXrc#MU8`cRAmVjdEiwyg`?xNyGeN+ zBs}z2i@G+m5ij+gQe`EWsGt?Up5!aE*p)JWaSQyG)rj}MRaR0&g(RAj~}0;N|VuQTn5|%gcrq)`7*t zEC8PL_xAObIexNhJI9Q=Sc?@v@?&^Z-ku=koUa_7k9m zt5#N$GR49tb|t|?OC?{eVP^kn+_VW7xbMuap`-+GEtK%iKTwp-?8hVcI=E8qp2!*) zB1oAQLT9b5kvQW^kAXz>q@Ok$n|tp#eg5~}IC;3mH9)0sj6l>w=fFcfoLBDa?BOT0 z^wE(=w3jh{BcnvFn8x@i4;x#6tIo~XuC0-@p@4=7yZ##$7&XVglyhyM#!JA4Z?E`3 zdl50Ls9`6BgqSY(InmB~*K zSMHa}**#nBpp!s8+TR!HCl6V*FW5+u?wzo*OYgg01-P@(V*)v%khpL-GbG|7*jCp> z#3T=X8b0gi1E`#Kx)*B1d8%P|q7m5C$jo{X*GU=KdGlL#=k8&{`g&4s?4X7&zJqbB z{7-oTy3x^x0@VviZ(~K)EThyj0I?0mK6*KC;QX2>kA3ca8rF&o?bUTxV(uBdfZXLJ z9e&#tx>fiX9!x;^XUW*TusUAkoc;T9bW>Txr|24+ZUe^00~YQMs|pn;XscV9e8A zYh}u`X-9(h%1>o1YCuo@-}|s%4|JP92AuHwv$WGh@n$3593mP5xv0>%@F1zS7XQKg zH!gqN2Hd@o%vfH(4U(8d_G@MT`Za|pf)0O&j)l!qqBhVH^)gr9oF8?YgW-iwZeWj* zJM#-&x=$(z3yt;b64@HZy);7iT?~)x0L6(3zjYaw2%JKSa!L`HWnd-`2PRaJx%GNK zHs-e{X;p3Zet_xA33DN>?!jvF^#n9e6l;%Nb5!Bu_ zcD#C5YmY%woy&F9hb{>zZU(GXfAd` zMK$K%BmYgD=Rai>>4PY#V)U4Hbq!a#B*!D$%nO)o+ekhY8bpVWg3WATpGh z8o((3%6XIJYOSa3Zb7{*M+zL>!**ZCYysX^qU#~Cg~N(tGh#I#G$FwX#w~fBRiIp5 z9VVkU9Q&IihOtf^02#%G-2ZTh7ua|vIq-$5>gvpCyG1GqF!3J~{f2`Ysffair0Ue1 zE^mLIn+lB(*A15#4SYEtZbD<@?JjJS6c=!5Y6@tg|Iv2PelCxL{w>Sr<0dUH^l48g z1fTE_JSa&-Z!6E<_)tO# zf?jKb&0RY8gw%T$*pyCmyL@-ZvU4%hvLnO(Qjk0Jp_HTg%WSs+n=6}1rH@Rec~^z;K+fOK27Yz@cKoBa9~ zJ8szD1;~0XHbDWKuH`-D6Uu!{%|??bkYRwH;1Pgnai#jfPgP%Axv#i=A^)0Y-IuDN z007LQ1bL$rSfU=I$T&(!ETn8d&z8kvlko}0r?xNMy#w-{Ae5o;p& z>5CUvs(nkYTBbecSohRBAssh{+RD`>MDC(oNT+7=n{^! z^hEdrm?1w+cr%Tzh1&l-ExSC-#UWI8xB7cR8C;h61uwlw%V*_zu{=kJ*<)1Vt3!bB zKJ2FBJ6u`6b!cK!-dUCNobdG8=t=7;diSFue-+9fL{bT$>XXS4v;=#RNLM! zzrHAqTF&JPJnw8gdic)q8}gntsC)jQzpcxMJuv$i3k)S5hzV(~m2+uO`gk_ip#Ah! zi1WN42WSPBd;Bcb$?5KN8Br{(a^ia8tDdQs-mDWz^TigiUb7{IO?nDUO#w!&T!Tjo zSaQ}sqwB$7h=6`|omShJ#yR}fVd2(0o6bnS6Zynuh$Jp9xFcP0Ztj<#vk_oO?(#Ry zGUG#!NL4c!^de`^dJV4kyvpw?=ETAIXS&i{*OE-zi6ns@ghVG9iV-grlWL{j4?d8fFfL5($?TDilGD5=9YX&ELNHHI6{U*w~8I-)*s>5|TIn|2p5qV&7 zfm|Dyk-F^mYk?g9IOYS%kS)vKn*`vqAy*adG)O+&uXB8@1*jO#TMtF2SJXK4nvE&G z8c7`-shqWq{qn1+bzpjNS4L?^H)U2seSaosuY>JsK8kXoiguu93KPxcPhp#jxh2}1 zjsT5#T0FNJXeUctaFM1VcX3z2_4<9NC*tx9`^J7dTyG$5QrqPFvv*d3FAF4Qt^nhS zPsx#gwoz0l-M5!I$oW?DZ>&rr@#nJ%oA-5g^S9~`Cx`Gn4%%EblTz;^@7#~9u85LB zAds0kJr%q!AJx)%#wFp?ebkEQ|I^QXem=tbhZCavr=*Jp1q+ED>HAvD1R6rQ--dVZ zBBePEuH#mim+omVEde`!w5_eGIUf3e;dS_=c?>Libo;5ZZ7-W)LTrLDg0kfXHPr*KHhEl3PiT=P+HH2)T>&`fbg{17?tDScA@k*t z7bmRlF%sEkp&2;U9K=SQEde;TKpFP+VCOgO-PJVC*jg+!B+07c>3T;^TpYfl6v+J~ ziSvhNGGoSSuQ~tfy7Z#+;sEF8ML9mXGq)r#bYHeSI*{Gt5OLmhX>jQw>&o?LV1jyz z|9i}F{~QSzqk4sh{94SuTr*aOKQA30sZ0jDHTd8#Q_ZZ6nfS?DJoM}=xV&(K=(glth#ys!97;_!A$9>lgGkNka8Jssfj8!9b`D1{lbdRmb*6#w^7nklN6ZOde0Gk$j{1oKh@M3Z00ZB<{-U)J`A;v^A@kg_ckKZT| z!B;e;{JPY3@V#iL#zEnp-_;-e5;xF4-8guPg>HqPgBig2?5`cY+hVM+IyDq5kuCyM zrHZ%H9U8nsp7-kfs9GFecYe=S#!2>yuNnRCgJ#gslVFmXoAGI4%M(SPw( zD-7g)2my+{|8#rkB>BDBcWdd000E=pEKIAxsiQ%YBFG73KxIxz3%pLPbCYNl`S=jx zmC_ERfbVGpvPo8YN+9R6bs3h>@&*VZm6s!Ce|*`0xcu?yQy2R~Or_AX#<$-?%*;^m zjgXXmE8_g93WoF(*rS{kv4@~bO^m0WTz3fg<4~1Lid9*R6nBEj{x1sBd#{CtMhsix zuO<={SP=w75?}SgzC9#VYyAe~FUC?S^yPIvieix<--1Es+@!YOgQ!<93D(Zk7ixza zeuwge<9g2Auq@6NRC4KcoTSxs&q4Q|_ojC5AwUq}p){MHV#D*FE+)JTYU(ko${e){ z9I}%BJc^b)j0FhM3eo4eK2mIfV=8E#KGc68Coj1q7VPmSKr#D#v3fo}RAILH)kxBf zU@w&ikCTn9bs+5HIUAryk>$kfP9cy|g7MgacW}2|)*J$~AozSn&GRZ78+swJUsT2@`kI6uG(*l8={8Tmc9E&_W^0!39*Gl9OPy!;Dwa;QZa z$Wy?8=8KYvNz}ezh=G*K4nbRJYZj!z!zcm1(h`-B&;cDoKtPS18{r#>W*zr0Tzfpg<3K&JOeD8-*J}#0H!h2 z6rdXJ%A_|Qc>Mf2p$#a%N_!aFH-9J-jNLbWhmNJ zI~G64lilG&zG`BQrfC2hpgN!!N(X#&0=iisVud@r zdh|(2gWvh};EZ`7P*DSkYe#|oj{C{`#LIKVLD0q(N4KbONNZ*=!2GX>DK>&_BN#}I zecOJgF~hQYP*Jn897+4cFC~RGE_L~2C-606l+i$d<7QC3IT{duvQi?hqaDDR!aPd%>l zO&*s=Cyz=?OFOx$XV4>LF&g!v-1lm4(b3U;DJp9Jz1ovGY5}F93US|Wq}<)zeRbuE zwy^Ruw&v!~HuTONoMMfzA7PZsBl$0`*BYhNLF+Bo)D{C`5)zUC z1OjI<`CSuKNLV;!^PG>bco^6T3;h? zy7zvQ&fewe=jOA=;`)^q@wQtd9GQ)cy62lJ-uEEHAGvk!6={}dncfUa-^PUm2w09C zyQU~!G`xNLR?614jF^s&%WA$!7rD;Uy;;uQlqVGf;TR_uAr0ink{%N!tncG>oXr5P zZN6H4(5UBdyfNg3KiHotd?~Vraa)8DLOkbrrWyz`7Q)!x-u`b%g&M^q5F7@t^UTgG zkI5^W4&Ogl@Hqt-j(?2@9#X67E*D8FsjN(tm7T4suOC%mIsoJ0Idnf0_MVKNv1gz8M^=H`e} zmB%@kB|N=g?@)8hmF0d)##$Ys&%+^mdp2K2xAtfEFF$5!+$FOWO1<9O-`~GoF-fp6 z^Wez)YMJ5i2c>>SRTYJdmVnkl7zXKs0Wx;bFpZM)dz=3?D$+PXTk zprBw~!2i0*s5WK5{pN4Nej3*0UUT{*5^%3XRCd9E*v>kQqd}Uke)s526VX2Q;dqg! zGj?cGx5)s2G?@Dgd8{tGxjG-k$$rt z*a^Qi?;9L0{XmB-jyK;_4vvkneXFX%ixNH6Y~JUwp5jOIDSN-Kw-~D;2K~sT?JTqJ z&U4*(Ceiy~{pe#t!r5yqn}IaZ)(@|@LpWe&(`jt1cqT+?ipUuyKL)mip3b|^7 z+b6AAGGPH=Cq{Yp|6RJ?Ep56JA-h=KNl*IW!w1YjDJI|(oJTXZ{(W1r$1O@R5 zUzJj{wzXm9s&TwAF~Ljs`eWYQDeiGz79AhYdVYC{Kb-fBR5@MDQ)YiU{YGqBqVK`k zQQBiWDb25MLWqfp8O`{q4JW_8mD18mf=EOwFMjkg7hXoQVExbGozFW&>@_I z;q)7_)z#I9$$b+|p2BG!zn_{_z7{#^z*gBJ91V`5jVG~~1ueEz?xd0m~o>EaB? z%D~CaehU))>C+ZZ^Xf-!xUvGA{{R)-9!f?=us6JuGWyK1Kcf>9AM^WyBU8lx(wSDH zn}81YxQza?eQlY=XaSP~XfYjG{1|bJf{{IQ=CX4=GXJlwt$FFke)<&p>(?(PB^fwB zzY8|9a&^k*suk%qQY++s9oWSSLPT3VYtvGOck-QBI_#+~iB;O<>0k|ZXo?sBi*T^AdqAltLq z_hE4WFdiHdpc3&t4h~6K*`Q9Pd+}Kyf2gXe0x1QIT1Nx}#NkRdsGK~`JDNb60a@=J zDAKCX^g?tMq9FD~8M4IL*V2Bkz4Wg6Lj!dc)Fk_jtK4EjpsdZBn<_YnB&@3J7Nkbc1j-_yh}psEYMb+oWg}1 zyz1t;2mA8*TTYNAG*1;MW|6`=rW=cMj!W@EQu)(N}k6HBt4%RK`U&- z0^Aqi6u**6@c@ljg&H%Dm>{M-N9kAgti=!%aL3Fy<1u`pN(e@t#%D>`0JRPl#3#m( z_;NfoL}KD1O;ONen<$;L}53qoAw#Gt9em)pAV)3cLxVJwD0-E7#p2OL8inYL^l_EPh|75oFEo&0h zIwgz_?cY#~V}Xf!0M-|1i7FFdT3H$?z{#jI8DegBa74%e=<+@6YiQHg657@r4Sxi% zh&*J+0B7>_f7<~Zk4j!KT|lb>gn)iPGAi}flAR)_bKz!^s)mR2@I(JGP-}s{W8#&~ z_r(Ge+5!uir;SkO&-joGwZe=s3noGwAEn$No#G&!+#sd=jk84VdOD&)a_&Mx?qqTs z2g;&UwuvHVh|g<|&IvP7w$5!HRt^H>yIuSi1T!Wltd~fQB{;#v#3zK=tSLhOUP(Dn zO7LpH3g}uokj*3uNpMfZ`?GlDISY z?dUuWmbE}hf&CIe? zG9u#4M>u|v!XY37LDMBX*(fRwY}=Da(eu}4XEi+e#t*u!2X5gc(!w|A&(OqrIb~i4 zP$2mpF49;Y1pXvIj{jC5H66NrupxeXLFt9?UUE|8=Z{M8BJPa&@mDnU2WQu&2Xnq^ z>eY_l^$UI3cyJ|g4{^f71K_+@+*G7G+(6tgsH%na;w5X5E*Lv)+5QbztpPeySmn+2 zo;EmtQ$Z8Clr5c`NX8J_JR^hw{1x<26y!Hl$TB@2d(%pqzPecGr+{Q`7~Um?!ae6r zJ%HvzpT4v`*R{5{SbK(y4xp@9G+O^`k+oNrx`B z%0eICbez;jO9451RB2z^ID4U8=ZboR%h5;}g$yi95d{VL5uiN<=Z%Q*%gd7b7XOM-X*qaa zPaF-56_2%Sz?n`DH})RUGIdw_W8_9q5zyatbNvb zpidKJBq_^(W4A8?H}m}bkYs4|A92E%tgLQ4S}zQx+lh+W*B)Gd`#Sd-oy$UYxJnXwyauP#)M9>pG2w~ptwFsPVXLGhhg-XRmpWGq5;DwtZtrgaJ;~TyGjYW* zOh#Vue`@Zbm*BKlbF)~`d~r`F$fS#-!ltb%OlK|;+QbLryX*%e3Bj2Jc(NnmR5;-) z4|cD4Mn^IhzyCGeZ_3X8a>?y8&$ZbYd@QACh9-5pQBgHV3r(^B&rie$FO-ffSCJG> zMuw>>6p%I;dk2RJ4Lv7zZknxLc%PvGb1Z@NYSh<5egX2N|pYQ#IpG%fz z#g*H>Phs!P*0oGby$;B!D`w%uf7%%(si6^i^3J(qP-E_;p2LO$uWX5MIE9V#xuqJz z#3!nd=s1KU8j5{>Q4^Qq@5@Q_eMU2~asM>9x^YeV8Gd*7;aErP1eGhkGS}oE?js7=t5$1Nbo5GZdszVSLO+_2AkC z=Q_F0xThxV<=&%qm>KS-x z?EbVC(EBbplq9@m`;9lFdY|OsPAFmEkg<>#7dgf&T8U=1(S-EXPHy9=F}^a(3CNyg zDpUMVdRrDZje<}-7JBg@za03LNCD@Z*|>!9 zl>03{CiLe{R`0v^>(k|keVzypEZ0lN*qRSh^rNA{*ovhw(SEKEFfad-x$ja-XL4em zS+13-ZrD{IT|=B6Ldtimr8zA^UNS^J8ATI`O2LgN+^))QU4N=EKB}RQe!4FjEz5YwxC^TR?F2M@lS;*Ki&TcOp{3 zkvCOc6y8224tXN8QFid|{K>SzK>anp27Tf4{KG93x$;YUl7`EZb#UY@E19z3kHH{zZH9U3@*?fzfN4( z=kVn3j#A)SI3esTLn zA)^9k+!7WmYzVa_UP=S240dDOz028olYeb!^X7RiIPrGa9Asen1|6)?KM+5mb_{zg zP48fDlxADXn8$tnNrd(3S_h7oH)M$y@1aMZ;oVV-7&VfZ2~>C%@GKATfz8^gAN%L~ zEm6~vdtL*myv%uB+EJk9m_awb3&x#)@%1EODk>14U&%xjRXVqqK2z`+-g#8ceM5OT^zT< zYk?T`foX}k)%IK$7N$-bex#&`!JZ^}*l6^S_Xk*M{yK=0Qu9x}q&a5ISwO!1u zrX5eIO|G3N=ymJ9cT8)i_#A8QTvKOf6=51bO7)4zhhkSB6wL@WIT#1D&vpdpOAmIp z(Tkmagt4(^O?A9P5{ftal7Li|ng6WFMD)jJ7;1es$Z1P8dj~(h!&j8&iZNik1X=o;ZPT$>Woi2v#O|@*f!u z|9-fi{tzlnqH2vQH0TGW=g+JjJCR$?Z4k)e$9B8cszvy6-p<6~bvfC<`sq24z_bfa zyDiLC-&PB~chIdOm(jG}O3C1^c%Oajx!zq11KCGb7(rjs858P{d`06{e*c z4Ggdr(f`=>TCghc5G^16Cmh-CZRPAN@p^paBp7xV@UL!^S#8kC?^u~vY3m*(uM%s5 zvE{7)XqjiobFNBkSpC*fV2ifr>{4jV+&kk%!MfTVOzVqqMCoBipU%@YU}7YZRa|15 z^(N3qv=$ek*Enqv*#59=WQmbqq!^)Ef0t`KoU~0nEk;9zm?6Q^NUizp(-r%Bd>6c{ zimPeMsAeoyq2llb3vK?EJ_$HdBdUU|OzNLm)+vnJ@(a&1-aFjs+Vi9d@45SZ;B!e! zrW0chZ!H2hzp5)d+<;+_BrHlt)=koUb>`Ou@xIf$&|JCf=vQhtNH23Edx9*Xw?AX< zg4%89VcKVU**i;)&8JN>G5U@X9Cq{1_9m5&h&UX*@fKLyKeNKFq}C*Db@xRa6QW*$ z%9V9hlZM3KT=Y|mJa=J=zCwRXRT+(~+2|*Hh7lf;B(l>!^s+y12mrv~Tp!B|N*o!z zEsiOdStP}3Bn2x#3YFpNrbXi9_>vGd$J61&2Y5}G4#G5=?rtHZ;qbTPHB2kAvYPfR z3dUlUaxYfm?;Ugo(jaA&>!<1?O9Ix{*&@Sys*qkDM>mdBj+36)Zvq->RVZA{1wETZzLkKW2XXzLmw z6BH8z&#*Sw-@jF<9$Lw64Bp=Pogb)a0jzI;uo7W`5~7acf^?jf9mPleMC(;Qs3V$` zZscQL5lk_^wT*Py_#XN^^Ox21y24Yp$h4pPIZkzRVpS$~T!7d_d|^9mK83?`&c25A zlx-WK&uxu7H*VtEVGHevw>>!W{{DXu^_5XstLXXTu8t^H0rNlfTzMEQx?T2pF{ z0MkU~mCBF_S?gdekg{sOLNbf$=6YB))*J1Bc>hMa9pTKEOPEd@$i_; zYZ2Qj2=fJcmHA*$BqTn(s;rpv_P;-Tda2I8<@!w|y@}VwKYCFn+Gp}RBR=e#>Tg}q zCt;_(;a|CHt7zn!ocMA@RI65MwJUv_zAf*wt>a)Znm5VKeQkH( zn$dRkKFsl|NRH|7RRbdubSWEBNT+-1k@!)?WIWn)RZP<(o>4nzx4ync}3K;C`q z?W)Ay+BLxtrwYpdF_?I386Twd$`*R)vpU?)aMj~Wb!rMLjG`9cAr_re1@T+SM@8Lz z^pW<@ziQu|W^}N5CaMq7q>_URC1VWk$kk{WirX%m;^^OCy>RK4-a`w-CORq*jDaMqpDgH{04Yy!3DsI)9g@ZNA>Y~^6yp^vr z=k1Lxark6%@%i7Qr@AAfe`h}IYAEZx<`)+2gdY=LVDL_LJmMe#&%a294bI~1)X-pz z9gEp?-RsD03yWm>WzO&VOkUNXw&P=9EVT3qsdHke)b|96+vN+t+K^MZp{B(dx> zJ|)Ym&n6p?31cl6{si@l_GLCYj8@6s!-26k#8Y7G?H#E8!ESORzl}awupwCXxz=EK zw_7e)3+uiL4(Y4-PyEjv73K2Lw?~5yER~-PxRtl(T71G($tk?BR6fJoKb4=0>PJ7a z&=MgeBs8(JX4m5GW9O)>j8kQ+PR@Dcno>75qor1f%$YqxLUg=QZR*R&_q-`sOIpu} zy6m39;F~88*jQH)PeDT3`%fa(sV}en-!r*=j`$v@pCxNZ&*|t~NwC?&*k@sRdJ#1|7dNEn-?=W(?X14R;{RMp!AiEwg`s|}{XO-Q zuKG2e4DG`BRDB-gA{T>8Ic{X1BbgIbr>$)@#i8hpfBle<^?79I+O3>#U&qn$4z35F zP?P&^jUo1rDRrs{NUgH3FX_DUfkPtCmf`=W^VhMgB0gwagLtaaE`J*T^B77s-QrEQzhZnn}dj zQ?R}c4barEgGbbcMKZ$_-Q?9Gjn;OJ_X5{+xULBElU(g>OPq{S{{Hejb8G4qjM?(r zPNHqxpknn@eD!^Jyy4-3TqO2QI{~!Ecd@mGy(8-5CNvf-WbNlrg&Am$Z+68A*HW20 zqEf_M6ML>Ewq)HQuI@27a`#hZc}Y)~k#{>^;D&HQ8H#k73jtjEB=K|cLj=3gwLfXe zu?#j6Fj0IHb#yTx$?15XcizUEENmV5a+NxMYcL2dZY%-kLKpl;b0;Fzxjn`nM@|#F zxJR_5-a{q9;FP*b+JiiJk*Lk#Ii>~GP|xYe`DyC=J7XC_`^M2d`9(0rN1>M3(WPPJ zqO4decic!0{v(wQZ58%;&F}|<{Gsp1epRg!J_fIaxkU>Uig)jMzH1%JKjos!$8zaA zP3Iq@G3743(cU4GP1LL25mr)^2t}&G-I&b!6^FG;j|FkL*N>wc#>W^b#g{b}9=9~l z7{yoX(-g4R++Vl0Y&q;%}i$V;VWPpcaXv5RRbwTNND7yPdwj7PToCq$FN_ z9pDy@`jc{ya5bTT`vyzjr7^eb5KVe-2)_KG@u|SGzcyp2(MyEq_W>G;!ayubfUnkl z#s7HR(6ESQ)ph>kpOvP;#-ZBZ>Zlj3TXPR8DNpGGMg-Pv=SPT-Qom?g6-rf#kPbZf zK?L!4;{uIsxIb}er2|_q@4?u^7b|yCiQfbZYrXD~n+eX=NTSH(5~O$}Ids#J>4$3r z8fy7%V_*Rn)8p;ZuSZrs6~!)Z4r!xq(lI;DNPd<2%<%ohE>8_Vp6N5rc6v<6>Oe3Y zhmFi)Zhv*d_FK-Q9kRdzc`S1TJ@jcobRVjlx^L-Ns;UjlEv+o0{`P&wS-Zl>`jAOT zk0~O7{aj%@ss(W>hY3o?=e~*qCYv?~PH)eoFcZndTlC{Aj;(adm(gNJuf>;bilW@! z{eJGYY*&-Wa3b=qZGF#}9M|J1DU9M!y~2l=f2jttMxfrJXP{%lFpN{Rp*dx27k)2{426mQyQno2_Mq zG)sY0JtnMT*^Nni193>WXMZGxPJsk7j{-klE1p;D^~68Vf19)!Ygq*_%AhKWU?B06 zr8h+7b>c123alREqw=_TVa{_oug;jyt;u)eJ2!V=T1`AneAp80MbeqvE)iDIvL_ul zFq~h1>_8NbsX;*%k-WA&(QvZfbe?XLlvRS_2*GyzER>%*pY)owN)hdzJ^?c9-haU; z=eR-M(DGqa#EKPmaQYoF7N)~-y zp=1r_E5#^Rr0itRm{K!l)5Vu-@$e04{KkI?V_>|SD(U6(g0AmsKQuJ8Sddix_}@Q` z{DBAG?`o?dwomk5?XjDI&K8eiAAJNl!B8tD5euV+FgmU1niY4&i=_brFjR*BGHrhF zTZTS-wYU_wO}kh)KJT=TnhCEt%mn)=m8`9w%+H`$Yd$`sdi6RI>JhzaBxZA8#BBBT zhBW3RHK&yj%h6?>DcXo#XXLOoEpoDLF*u_iJu#qzblE#TcsEB|arfhY<;EM@Z@usg zR`xcp)AprfxW0DvY_gn*VOHJo_uGHZU-}u{GVJd~#O4)yrV{mwi0p-Xq z@yJ}dBwX^lmKKbwOJuPL)A9*9^(U`Ks|6*7U#S&>L+N|`Us?I_&ZXYTPSWFSa^VGx zw_X{{lJpU+=1;?~ z?>7ws`T38wZJs4nLP~eXFG1~A`8_;R0mF)3$gQqwz~xaeerIuRR&3L6g?%;_!~crt zr|z~)<3IW2o{Trlm*kS+DVOgm&)lJ6?qkD6$9I@)F7%(I+wn;v*VOvd9{1X47LzrL z?qWHrXHons;gsr;>7K2(bUT?v^_t0Pfqml!$Q^S7L1gWO7gD*~n|w~|OP=PPHe+Ya zjnv;)FP`=wq-wuqe@kB@f71|qxG0mL{_@8-N^+R7?Gq6%6?n3|oZ<9O-#VDgiyZPatUF1-*K0`2i&Cn@s%1^DZbXZ5ReDb z*><@Z-&VB7;`3weqyF!raJL!R#pF8a^;rUb**-qn2}!In(nhH!IS-!A+RI|&DPtem zbPb2JXOtDueHyYv+Sru;V5HXHa?#sv7B>a))vsA?h%C_b_0c~{;)Mdb+@Cowp23CG z!Nm^Ow%?0bCK@{Z-jYbM?QRR_wGq-f*YyC$sfYl@952Lz^X}42j^Ts|(u$-<_9ypj z^rK_FdBk=54&_x^>%e;bZLX_4tx=QRLJLi)HP|ZDZb9eAd@IAnv7i=c9Ui7uPvQlq zTE>V`Rvh~edBHS)gE?{gbLq**iv0MlKM0COcL-ASsD@p(j1o;1io_&ixBdE6yLZu^au2 zb#`_pR&vP7#=&Od6}0ejGo-h2PQc$A}+U*fv1cX)JU21>c5 z$p%UL$!dQz27R;9-XCdmEE8||ebhYoP#L&k_7;bKE-oNU^?@p<9_Ud+f`7RJY2_A2^p!i@FmXO*W zm!2FJ(8bfmG@@pH1~8>T&)3c;@fcZLVMVoP4qq4Rr`+LR>BUGf}k4Aj1%9%GF?cL>4?UB7ZzBd+jhvi50<>2BU+@miU(3r*){da)c^CBnt2*AlqBD>jYmhbpC4B=w~Cwn0M{LP#61wdI(4(}OHH zIQYp#t#Cj{!hetdXMWyvtlWC*?_Wesv~|4MGzX_Z!*9}T&NO#CvrY<> z)?Q|c=tsXBIhS%M@pHDT_B_}3Yo3<~yfob)NI5v;k#NSN4bDBZRQ*loRh*H*d|CV% zCdg0u1qJ2dnXXwWY9C)*%ZiHvpU0q@q_MGaq}JuyYd+(cYY#t}9B$1(XME53W5>^- z~18r=F2aSOZPP8)bIt1JIvbhcuV+2@=~XM^0fqD z#3v7OqIiei3!gO}P+u`OH{V?uB#@Jn6Z1ZFjBcVPmAaTJw3d=1DY5S75V9K~G4|P` zwCqj^uC5lloy?Dhjg1{E-Hg#i{|g_Qrbr^gzT~FCQc`p&hImg2ihyQH(%o>qT=}s# z_m`#yFo0RJwR_tndHFR5_F=|B+e2S39XIuRs;aoRZ{Pk^=jMQkg=Gq6-LvNtzC?79 z*KOkCQ&ias?dujpyo9J#qZYs{-@ppj}Q9fn1gN|^sBL%^0}W< zMV(4p{>C#nN#4BtWqDcn=p9?kNsyw2K};)OcAYhkx5h^ev)vAJ_p8kW#pT;Ydj@Xm zJ!hxKnL336?pr3i%l$Z3KU4GfcobbOm{jY;)>iqhH#cykrKQEZ_IZdHZ-&iv#%8;$ zavHxpbW&w&{`*kkipX{1AF+l*BO=mYiG5Bc;+$v2v?=F_#n6`nB0%?+DiJO!DTHKL zy5EgsO#UCU1bJvjixXM^WnYcK(IiOZ^vqOx*pPY*GoftTR>#^!l3Y0Z|hyXzdqAV~$$H?XEg{O3EV zNGt-X=Aog=p)!EFPIfIH=KOg*ot;K4R9Uh8thx2d)A)S{%)jJhHd6Fb4QqwNaf}Sn z4~kvnF;uQaKixca!dI)H`*HqUNA)de1PPg>FZcHL>Xw>exo%EIAMLH0|4bF<;^Co? zWQ@Oifzs=J(B>|+8x<6?AE)VzW$FZ>uH(s}Q@P!!)_}p}K9%l2Jx(4ex?l#}bKuLg4Lp_Uo$l?=NKF|9*gCF$ zSA8yClj&r=wB5#tM2j8lax%U@H$b!BQZ?lp3F#fgyBNm4|JcFi_S=auISL{#WS=%l z^781|m*|4dPT%rRXZ(*A;5ZxXC@eg@rQEu|{L=}lxk}-8W@(wQ(IYoZgcN)Zw2vgm zUO;gLp8du|B)A3X{k}jhd;+KE=SG;GBW80;_H^@{WhI0T_F^HtmsV!7&t~5_c~ezZ zR_emaR1ji^*iY2X3_dhgf~pHiZKc_y)>24-@ccx9j|CMDv7evcc)bTXXyaQTWlyiH zaJ!b{2#Um+9=A_^<>LAB)g?4jYpPDS73dgF85&%s=NRK76$F&I>?|C)=iGce?a>o; zB-3{O3iprOMr3triayE*C+RR?qU=2Ca4fVYWb|h@Hez6MV0vw|tmN}&Bqs>ijqn>Z zcs^KrqNsPVl;+*r)8m6|D2EQujyJdWhK*ST1<9m5cS$6N%(p+6HTzqil0K8VMD8n@ zU)nu3Kyo~}PC);LZX1V4_K^;UmRhZ`hm(N)`V_I-a|Ohk-iQ4C{?taUd380Gg*O|e zVoCTb{Mh%MqKOG}KwzNL*0kJstqVbSif|juV#KGVU3V2?DY_`*g?S|<>9bX0J6@n+ z{K}9+)Ui+2=k1C6`z9FQPOij6e<+@*Q=k^EtQk$7`GbRDTBI|*@NOcHYvv8Jz*T&F z$L9yU)o)ZCX9LknL`8>2&Srk@jF2b3km)!GifzbMy_()M+2F+rvTI4#wPv9CkvdTP z`9elso+|P>E@pge1{S*C&MYp5vZ}uw2|d|bVyGB^kf^(D!`XGVdU+@{ zs&{1Jse}Lhb6B8J^Mc;XRZdfvS?hakk&q;g=>A$jaU!X_B~d_GZhptU!@i1HZ>IX; zMT|i;e6eujd_SQVKP0-#-^V}>YJ-@C;9yB zSWaH9nTciWce`kG=ev4<%{n|x-_hOkBr&p{yQZb)L<1EQ)F3dI^99~I|Kslz<+j5_ zoOe@6B)j&~E@x;EFpHTp7$60LRqxPH^o;s^y`l8v3Q@0#G`@f-zJ z<2Vr#SrfFp_f`hmQiN?CXWQ{Ws_ncv$za|QB`1+?rR0)e2iM>Eg3Qy`E|1qdrxAKW z0z)s64vqh?VcZ}%GV;vGY~V)^r82f){c-12-8VE@4YBv9aWzQf2N z(-j-WaQ4d-C}PF6$w`$89oSjq|6q*LX(Yp}xxM37`}|BI_MU6-Oy_U_f04?K_?I@+ z#Kgo=^rA#CK7!Q8RD4E+Q0QP{W6N{TZ?@-OU>RAQI=Z@H`S(A}rnybZm05Np;|#l- zQ%!eINGP?ZXy1oC>l$++T2RJbTJkE!yv0sNXt#jIsZMSu9>pW3-W|Dru7E)#(2I$cC>lyoKqDT2X=o)c{qmUw1nX4L)nxBb;} zTdmT4LX7=RDWST1GE$N`vK?jTT+Uftnn`FT7$Ifoq|-nt#T1`<7f^xOvHq1k-~A zX$=q?UhX?@NB{qz_^I(bDphTs-H@wiZU?G zMcy^fBvT9!5C)k?QpKsd-x+^JrfoA$M656dbu#p>wnm!<^S})^Hqop^o+aq&s0(IZluV0(%J$Cqgj-1o zgWS#=;^tzxUd?m5ql%vTQ*hjac=n(rGgmd>BJ=t@Ym)I8Ns9xmjR*IR#?o)EN4 zu&9~(DsWrQ_wetRW^Q5;tQVkYs;@l5wFgz-I=x}_NXv6=J-rC1?#x;Ou(oz~+K|ZH z=;H;G6;kBL9ThmQ4CH^uVMAu7psrrLbA0d0f-9rZ=`!xD;7+NDs^snE1!8MqznDe) z$0i;7Wnv6T*!pg6VindH_r1JG$s!B6u3w@xU5tO55+Z$Pd~4#zIzWh(-&YfCgc~K@ z@zU4vRG|N~qa7v(#*l^WlNSpstANK}i=g0OQ!wUa89!-gcG_c%2O2X?3_b^uvb_Bc;xI0I{X7;kV7UX+Bw84@*~ekuzUh*}Nz2!^%*0-uU8| zZGMTfYlu((`;z_rq1C@v^9H}xkcNKD`@sd%Epa;1$ub!aD!$;LSjfuw?fdcC$A7kU z@|CxTo?^Iv)4zVda8S3-^}1!xyGvG9Rwujt_YBzlosq5HnheyGl)=MBUYDxuCt4BT z0x%$);a2fw{K$^#e=^KJu9K8um$$4nqVWqSJoHFoMji8Vn$2WJ;?}s*LjhItv=6g# zk6zYBV{flk?SWCun(c6$7dzdTfB%-&H)F(o^fSCzlWdIBjrC%B_Jhp-^*$*-aB3eD zt#!=Ah<|Khfs8&KLLnmnZ75oG}mF) z*(W*Wdwjj|ctf)5Y>%YzrX#5iM~JS_$5RE^xxkd}uN0cKb@0UaR&<@FOe1+O>YOz4KHE8X<8;Z6yh<@UEc-jk!;OJKIMmA%hOxy_Tt(^ z9W}sE%1TODIv;iY0ZWTu2wy6_xHq{+j~*eNBU0GcJYO{17_W)@vA4(*C}Q20gIDXk z9Ppq}=Sp@7#m*UC+tK~fC!6a*Jvca7DswBasX-QqS6@jthC+o;vn%VDll339j{l$^ znw*44{z9Y*O^!q(^>xVjp*zKwDfwHbq<;)zh5SrV4#XQ%F~MO7vyCNJ*170L8E z*f3M+)2HS$LZ*VDIVPc?aCUf?8eMa4(2VFZhJIn~;8Z9NhY%ZM@p7}{{BE_Mttz1p z>-icQo{!gc^QOuC2Hx`|&p3Kosm<6O)$e^;6z|`&$Oxb*-TJ1HkeC@Cd42N9Tp($Y zPoW{{Ca)ZdNRiX@Y{St&^=tFiko2LqBQu@&_A^mKWVG*giVz;m%jv@87&b8@k1Jai z4w8xrSfKz%P>A^-Npls$Ms42{ubDpHn`Tm79_>yQCzo{FP&6~2t#7n4bU=B4V08r_-AI{=(@~cVrGj&Zu@o- z7DN#){Yn`v!+1Wt|&_?7EnhY=p_Tr(D z2UZfVVe7$^Uu-z+W!S?vACt}1YMGc|fYjecZS1}MYdEAM*L0>u6|^=`4zq)ltTQ?0 zlBt!>9?4jV+&bxDayGsKA(ekSASyhW*F1QQHZ1z# zUV2BefH@3c2D7Wbb~L5zlR&U@Py5Zu1{z@-it+JrqzmVD)=-DWqW8l+CK#%`xu5=T ze&8~X^yT2ErlOhJg#z^z#FIoeQSizKrB#;XKr=CG_DlqfoFqx}#p)xh>NgLOjXb0k5^WyaDu>_V?2te(9bt&=TKZ6*scg zXX^J7QSnjv!TD2ZtS~1B6Got~AlRB|Tgdh#b07D`{T9F={AR-e|(- z@Dy?V0!J;7lS_Sj_er=z0k@bK4N?+5dPM7cvaLc$FLHZL^NZWwlbac=AQA3eyKFaSx}-G8ny#bLj+z8C`3 zp>RU&0^0(`%&$a?%e60~+61nCy{?&2@j(iCYpfVPGd8D(R?(^lFcmukMRN;a6#Vw% zVmyyNW5NKZS!b36LMf>Bto^Pc&Q^{Nh>Rpa5MAi*BN+03);P|fk5xX6gainv#^;V0 zFL7ZUs3J@Gxbc8+Q^?oJ_Rpr27<7|gJlgv;^cyWY^`j~CtDzAuX*JMz{rL6<#8JWb zMQ@OhxzXc|D)A?%_ad0L!%B=?w-i$i4f=A^l;1f6QeOuKV)!1fin0~BLuYn31rqd7v2iSz;$r29qJ21eXYr&m(1&-YZY#LU zQ-P<`e9uLy7RyhSPuA=%8RMuAxRuC*5rs;7EowfCpbY!b)fwG|-u{?^8EOkKR7YVc zV#?!(@|G4+M03)L8a`Iee_fQTl=nNT0K|G`CojYHk(Tpf5;fH(}QOFz4!_QChbgaAu3LKktDfMd!Aajb$OSo(Rax0)- z!AV%IR^iU838R`dc!NR9ZPy)TVrxT4t}u!b`|zU%OO>L3su|WZb&5QV3GtA418idb z=kCAtGk}0ah`5Qyry2&@z>(o%Vqy+_Uu2(P$3+Lr3Z=ba**rDH1oVV*Aoe8)VlK!6 zqWBotJHQh{%tVy09Rbi?reU3{nMy%Fv<%aLU`*7xnK!>cGwbx%BZ+%$9B~LszNr#f zvNH&mgW13G@F+m7lS`=vr7E|vgBf#ZsXrk{h@e>V+vd^lm_!R4j^+IRAJRM$y<(tA znVy+Jg9%@RZ36qwUyoIl;O5}KcBtRRHZwEZ>K2_cfri*jB}*bey_ke_xdib7Bx6H4 zQ{u5>IWoFE6L0n#V1e6r?!d@7acMRRl|zPS@bS)!*{L^`I`Ay$uu%h34jIRH++TZC z?l5&$Q%}82Eh8fn6cS>F?BJ00kpRSMdoEVynLnoIhRr7LEl5lPkgbyWP0=TPkDq$) zk6Mj1`N{lg+!n9(sX z(ruRAvp8E2;gmVdK>up+azkhpfT3Ce21@@Oy}$eC?z@}efKP=pWDliEc~QY-QK($4 zaHz&-54Y!LLs-)Ki;ZOzRaDx5=tB$z+HU}zzr-kcB3si{LsMn^82$bI5diMr&SJ$Z zBY7>j+A55gt7vG%jcD53(bc8dXiC%NO+xTV77iA}#|{p$QeOKey;(|!N2|r#vypr( zl9Kvsx;ddpDeK%0Cfu8=PF-$4LAUrbZEnY>{1?Pa6C0a|=4RQ_?16|ws9h*1o^&Os zvBMUNP=!#~0~_J|Mv(=9@sRx)aKo(776StUHHs7hD&@>`b93`M%_<_0?DK`L7I5oJ z>gwvH@O8?&X%fNk%QQ3t(Th_-p`j1G{^j20XoHv35^T9_U18mi4d}XmdODNm-byfR z57}7_w;@E#mFgPoq5GAoPztn$-AD=A&SKBvsk9!CkWl$u!!y@&blijc`PyMoQSBQO zbxNvAN3Gw!;o%TnZidw+0guWoP5*q2=i=rjlcf5u)486QkPrm?1R!41?cwA!H!Pz@ z;+L)n3JN;ztvrCHHOflEN3@VoNqml(xZfN!u-E(^3`0nFOI3oCir+<*pBf;ks| z(atHBa9Ih8q~=4wR0av>W-$G$u55j#NFLK$fZ{?0axWLEW za?h64-mVC|))fd*VAeb`65Q~Lu&}VU4i2a#)r#W3SE^QTf?iu8Qd3mz1fCoVeOIhQo zE^2xu1k=rs3k3EQg@ik0Wo39cI(RGSu+65_JnUz8_a&4&?d9*GegyDj|apd!30!T;@p;-_ZCu2+7aqUnW&jvKN$E>KQ|AVmYj97OK`@uodSkp+35ojCGqhCq5S-%qlXDoh|k59$g`sznmxfGK{X<%pwUlAzR0@9U~jxGY2FYqcfa3Odp z;X#lnmc$QXDHKv4Az^Z}@%Yn-(~>y%_3KzDBdwkD)YR0&-!6T6K>4>o+hSs87sV*$ zISu`onFt)ZM5YkaE3Xe`@^!4@s{P`2c5V+)jQ%VYM z0sr2;d(P`)l#nLj^z{%`n0Da%d+9NW*B+WdOrjC7OCE8`qYBe}!A5p#{=8k=kcgh1 z{?EdK&M0rZD&S9DoW8Mnd0gIL_qe#YFMz>=ZL%|)o3Y?fS9p0peP3PddUJo?=ga%+ zkKS>pKn1<NC5@vRGECfmfqX-1nXcpoIU9oYAiQN3LHNn$we}%-QPj~No?+rdgI0JlOMYg6} zK6q|I!psKbuP5~b2Q4k_41lBUC8llsKtr4WRFbujIo9^LBs|RKB1J*JHT@yow-p}a zON>IrSFqORh8uD&X+=fXQQGgY3h?|+wsBHKp6dPU;RDVtDmuCsLZ0_l3l1c`kEI7J zcl4|5u~4_)-K2g0{{2$_ho1YC)D?P${k=9pkYeHLgx9WJLp%r&t3|)IRTSbuR=cob zY5E6WQ+ut;>ei6HLpVdI&70Cv0TjHIuvA6KaUVvlI}S}7} zj>RL=MfX449aKzRlxK-I`Sy~XUqqy11hTx5QOx#SCxotdtQVKqf~TG9zbEqrs0O6t zD4mPL1;0dri@Q{z2LI4ztiS>wbd+BClWl{k51;yn;Ij~S;5T7+7r&5Dd(N#_8Cv=L z)_rWH=Dp8by`XY=*9QhG;I?7l4VmoZsVRe{g#~7fK@9A%`aT+Hzu}wu56lnA3Gh|f z*75Pn7}$iew{LfuFWypDZv)?(Sy;eUP*A9@sWI)#xdk;;@!zqwP-1g9xsuXSe|P~1 zSSxdOm_0V9tfDdp2?8-pa6*2IuOvvy^_a-U|K&%`Rec`X1s>7aC~yWiJeK1kW1`iA zr1M6`g|i-?gPosmZY+lHJo&*w5<7=bvhe>zgYL)#s23!{D8U(U!v3=+JhE`b!;_t! zFER3Z&Aq*vz%~bW5Wdius2Wx2hCs^&^#(o&TUr4&#{&GX1@>%JyQ~he#y&sZe+sCB zp`()%DeT%~fqG4wv8})VK5UFCd=`~t>2(0>VyUR8%mflkyHDG0oO}1wp_bdGhTCoyprr1?-K*#m#4+Qxf*=SB z#UtmbD%ms}7FG_F=?6@5a&ptPku^w);Ul)6EVWKNP*YPwxAW(Z1=8T*gYae!I2)U03 z>=}Xb>%XhETM76w0JcWfY=JTjlA>Pwj~@iv z+uL+yby)w-7BS`*7iIqH5+9ikwY1E{A4^C`*sQ@*j871uSe2uiMbA4YeL___LM_?b}oTpiGv@??gc7lfk3awOGBP4uDqxbxNR{ zUo^G(*)x3lMD^YKX(*REBsc2=VRIp)%<1kcxJ$WP9%l@s@r{sH$gznobBT$I?lE8s+xKMbaLUt%s6bHVR>lJPNd36sTys#B8>)aRxPafD_iekPj z;hg8mbK}PSWO$2+n6U-)Ur6@|%hPlh2@VQMp=CA&{XhWlL_9n^L8Pq8xDP)W0`cOh_m243|P*=s_9r0*WapZ%iLQW_MvL z!1)P%|7mt!9xn8`FJfbf0g3GSt8j@7w$4CD2OyTnVQH((fd*;H*9PsCI>O3K8=D^L z2^mRANd#zxk!W}(;4pbJ=l%OxhzI_VDF&RrHbbY6$lhQvWTHgG zANNZn!knKzeL|or0D1x;EUPnwt{?-oxXn;QMt~>_tLfw~0W@?u@Xl=DSVzjNVh04+ ztf1TnAT{5dL^#EDeQXB2xdZk;Zb5{GpV&ZtBBcT9gXzSFMhH^L?>H@+KNp8qs7G2* zV@y)izwC~GRag=#rOvSm+fwIcEjw6ExgzF_XzT|*PEApp36-#LW+!P?i@xKJIS&v%*sNCt&a-O<86;xrQc!* zO+g{j4;&oPWAvOsn=45>?AXHh5P}+E8j;R#xY#&Jf@m;6CM_&)RPVC|UTbSBtk!e| zlqG*ogB&p`a9Ly(q?Ss2luqG1A&Ia)7#%dTP#chg4m;@OOR{{{UIuvid5^|Q&2e;! z^nx$ZivVb9ydpLbgoNNPU%nhdiIE$_LQBPLvjJt`7Q8wZzy;uO=btTmh9GY)4P3t~ zMVNrEehdAb`z#)E19KpSM;a_R&3K6j3%Cdhj2i?N4y@psTD5+O<@(0|J3 zYpcOa%R@RRFxOCGrq>b?Y;aL~kM)XS1eCjZy4w%ZqeKa!i^Gm@IzOXl^4TSY?WXE* zY|6+Jdw6-VLMm%IJ1QK|WCK{!ITDN>u*R9$*#JCp4o6K)DP(@6Ti(@G1Yny8FgiLo zw^tHG!WdAlwL3JOV#D@QmjvYx{ma#aBE3&}u3z8Y>5*ZD=te-r6DM)EtTX+t;s6h# zoQ3>WKSeB@0>8|B?HU1y^Wr7cvgZg%H1D9Hp&?LCiSMcBe}>kLO!fFVo)>89WT(hJ#^9-5mZ`0HzYe4!vh9S{iH{RvA>!;z*QfUmHKh?j6}WG4HL-m_35@j(_$0cdq*T3SN8dkC2>E-8rym1T zY&uNgkt5+Z?;xzMt26%+L5cjv(l^fZAb`jq2Ia)`<$-+5#NM9Qg^;De0&rslYy!{G zt#!^Ha7tl^{>bJtq9lMcNCIIxBaeAX&`2T2UkZ>v0%~@zc%RSCnn8$~Xagh@5jB8R z9|NEZtxjtl#NNNbGz7%CU>E>_Fu>t**=7NB^ay-&0&KmOinY*9te8As1X^KbWrf31 zS>fu&M~*-o38BbpZ@4x-2L}i8=yx+UOdz8nP%|LcxiS4*5O_6Dp~W^iJ=zKung*4@60+A$KL&* z0I3%ywdtCnkwm~9*R>IUC|T7RK74zH7r?B_5D>N`XI-I(2{_=?BIqFO#>(+f8NE-C@B}=h+-y*ybHIynGT~nut@`Z)(t`;0Q*jb{Pjkz;(++aH1H&AqSf=T$vRtkT{ecZn;icH3w zyZcT--?jg#o=+%-bc~JBfq3x?pcDfC5fnNrs&~ll`LnVFRzbo-P)yhrTfZN=`yEN7 z&^zP7^s8P`<&X~u7!Xto62r~%SbgB`F&@4w6=lKmVtwg&qX|>_nYcvF@66a@a*5i} z%vW!v#>9WOjvw|mQm3Rm4A??=zHc4>f_N@R)`Y`aH?BW|&U;V%;~B;U1e-Aor5KnO z*RGSIOvbBWgJ+nmg#{ZRi{L?j0FjHA1J+d*#JmWd{a>Au)Dvkta6C=59x@Ty&7lXUJ`PJM1{@qw`$L87zSAxGzjs3qtitEdpAq)w z>%76p%iVhq@M=AF-Y*SQ{jLBe3AFN;q$~BZzw8riAD&qV5h>+1>t;6R5iutJ`a+;^ zIx%gMf4+H{0G;G@YIFnXt;2b59$xmx*}>l_Ko)Q2YAoaIT9qWh63>PZ`v;R=#85x` zCnY68#H4_TDX0sgmoIsm*@(m*fO;qs^6uWcbqf~;L_9q`i`ZdVFg`4q-;*|S_LOdv z@Wgs6U(8yp9Zib1kvx`7+2Oovx5aZ$%}5}te}^>vLD=C(`SGv3iaPOhv|k>7<<(&= z*1crNz(;vI0e|FCa%)Q~=Jf|0j+KI;Re=S6V;d+>(w@?JM;OA7SH6q<^8S8r*f%a& zw_>W~-#twC4(zaQu=5$dU&sa#0_3lyfqZ0dH-}3q2eJW*@Fm0((2e{8)!~^Lb0D-0 zVL(GcFFwE}C-{aCFpGnm!{@O-@N{eV2szFAr`I!D>(9=TN9(R%F6vrIyaF;!PV;^z zZe9#4@-oubslnq}-<9wwl<{?S^D|1y1IF7al;3k`z#^5e?KwXaDP8C;3E2n>tq4B( zJ|Jr?Fn|@zb(I$zQw)^jAgPK5yTpF~#S2aMZM6no#jq^}T6U9OAh{!pxN zC23|fqtgAUyH)jwqmsA{BP{3-hj4Qliv3o={&QmbEiOz{4=~nuerF!Aj(mFnB}fCi z<&pESx3}LD9C^N;4AeW$ zu3Z(Qa3Q}nilxEzr^MOuMc+?G%gRdrvWmd`=t~I;3(X3(Ysw!6v+6TQay`UB<`XO- zO1<*&HU%|>e7;6TnH9m!h0TD>Gf+kRCR08T^AQ%WqDg1O?-W%jmgB^A_m!1A@p9)3 z6#EjpBo_^6C>TyP)UkVNFLzBKLwuc1NJz*4U=2})NuQrQ&z~a^R&{n3I@(bwV=3h!B&mCoA{w!Zv)C6G|(=iL%V zm*Cu2cZz3fF20}s^4_BJskBSXPl+J@|4h5nmVwtD0M5nbb`Mm zT0O1vTG6lR;hpWn4GW!nvP_RKRlGBfsdRy7>IVQFZLwP<$cqGd{AjW5wW!N z1WE!`hR$gqP2=Uh{rnkHT+Bzsqx%AU{T^iGgLN3QLPiwfBNr$Wm|JkS-|+VKW+r4> zCk_lvV;{P`5#2dfH|!Br^2g5GWtUqdJx1=0G66jk(}?@A?EC_DO_uNQ=u+h0S(ZOv zCS>K-T>e;Dv`!5Y?HnJOT57zNuyT(WpwunhB-1%{p)Jzan|t+54tKw>j*E=80KQcM z1F5rsi~red3`IVeOZfd-aeV-k*DO$L0lNr2vL`br?%F}W51&d1!|Rcb)d*#VGUYXN9UUON_V>HA z)tNO- ze(~t-S%~Ic!Piog1UnsA!?jcKOWG&LRb^uINksbhh{ar%@pmkpPv2$?vPeqM(Ngot z-`9!oyYwVfR0T6DxL~2jzYqV?`JGW_f|MgqjoleFb5Tg&?eBBI6kjC z4&c%jMY03#)vH&Z3>HvIA5LTN7}e{7K@sQ37q-1^O8??_j^7p}>)?H(NM?=4hF2RY zb{_gl1sj@*n#|1-c5X9wx-TK^2|ktwxlPQU5s60a^lSH@7{9FdWF%ME%W z1#DW89y;e?CAKg^C7`2V>@XS>txl8!I2TShgY>l!^Tm!8u=&`FX~>(72uSnA4=tXN zSRU*Zmfo`t#y%hQK`T7N4=qi^|0(6!GS;sUP=0hvUo)mWGAWLhl!#bT-Fr`)iO}}p z&cqh1*Xx>ertmYk+x%I;aC;$)_a=J`;Ur*c{>T?rprL?S$JY`uatM#ukd%^w5a~b9 z;#wpj;@&b2ZT{JcDfx~mqB2G}sZ5IWZHQIlIsH|v+$W|$Fl~Bma9??Qp_ROAHS^bW8_vicj?|yVYuFLg4-{&~a<9NNEM|kJBykLDk?#*v> zSTBmTo>!rXm7hHmgPST+4e^HHK&x92>3aO?5V=voJ<0hXlyFR6x%~d@cTT~#iIg|L z(4lUV?L=PwMLM+m@XT;r3yz=7u@ZwHl4+b+Gz5+!=fN`3PimByM~U-)S6 z?xV*~o}2I!hB^@NTudO9qC;XgBsBwCg4%|Wk2=yrNZY|ZL%4=QF>(j$D2TWeuA!QN zD_9ui=brfRce+Dk4_h9$ON@wK;*n&>O>CA3s$I9JJ=h?SFf)79Mk7#GPuG*n8Sf)S zL-1VU{^AjXfNsQAuqy<0WKH7!sZp4x2d4si_KR$qhIzR8dZn{)rMJ#{h68rqB=MM5 zq|ErS296NKO$3=)0gj>hX#hZtyONJm7J27*Q^O!1{uoOR8{xxBf$`) zB3Z9q4L|IB%#@|0hW0Fl#1jZju}wW@v6C5vM1g1MvF_(L=b=U;Z_1#Xf0~r^Sio)4 zocHJt8=bgY$oJ)bUK!$m%yRxQwbZaz_}KNn+Y-JF9tu^HYL9~Ke}~Nc!PpS6KV$ZO zwh(bcE}WqcQ6c?UK`!9M`BZeBVC7G~1^1nzn~3udl#9_w{K00rqE)o{U z`ruY3y{`fJJU9RyD{gc?&%P^5;vZmNr*@N?;`;ULP>7ugW%O-`Y>iJ~~ee5ZuGi5ouY^!X50!GVO- zkzd7F3P6yc!Vdzii(qS3f|RO$Epwpeo8kM*oL+GW`cchh^cVQOUf7)EJ}#zeI6x&G z5N*>45#lGN-@f^yT|X`(DkzY7d1Df@A&U{8M|)?w6y12~2waQqa+$m{TT+FlCrF}&vs#3+N>@rd%JKl9F* z9KQ6Jw9|TyTO+ZD2al`@>cmt=n~Cl1zLLDNVO!@_LytGZ9K@E%ZqbJ>r4zpA@Yif> zA)m(z`qbRq+~uKC2(t+hSMN$@3F9wBD+|#oq!C^3ip#YmIS|ch=-|gR3gOTjZ@`zy zP)FaBIla`a9^kk#btf-LN7FFkNXn~kKqZtlQn~f(uzb~7S*CTRvl*59M*%ukOwr$J zA#x;GAh5cKC_QSOkh2VP+85T)d+_NH(uu|`h7xu)f5`6I+{QZHX|ojW{r=tBQgAJ_ zxnC&f!spnIq=yf;83Zv`7F%+Jz2Y&e1OIN)(fsa(|5sALsqCkanBP zeHd>gg%$a^uJO)Xe|Y0b@HGI?M2GHowq^!;HHc5@OKaa?O785NK{O&f#lS9Yavkdl4h)Q}nEu zBZ(ESjOX_W0L@*%nK?-u*=|<2oM&$-@ywzpOqO@dlN8QiQg*fyB=3MoTtJQuU95nA ze;o;4JZHCO+iTr7Xdn<}awE@c7$eIH6UODYJd47xS>xhcd+#S2QuIaMgV>J{pDa;4 zM)tUCxEuGINrU-Ld7rU3$M&GU193q&__ANcA26_q}{cO)$pmKITbdLwzCu7a&-W0b}N($1E6N?wKbuJMDGhX?Z>bnM=H5Ls15VH z*@Es(9F)VALIC+xdqNkNs5doX1=^Uq`47XDcBf7q+sKXBR%1zS!MrF zgEne&IUsGCF~G3Bpz@X*m*tDI zHQHh;6cCsu41)Fx(eVAt(gqjliM%7I^qy8cwYC$f9W&60r7Y$V@M)i}*x!Ag>A-W5 zY(RlCvDK%A32nV#+1YeE?+#p;@un&CJoZPlNrPGIKS0dMEGT$6h2IRa{H93k*F%2g z%D=N}vu^1X91?{hz7iLZkC;T`G)B0 z*A_MZrvPp6*m%zl zYoz`h&3Pb;`{=o5e%#ds$Bl8u6IQYvYjmE?xH6g7oU1p^d2I%hrZ~I>DbE{x#W z-qrbe<_BVmGiTK*CV5T~FYtj!vNva6o&;vR85E`zigge{7)&DxjkI%hYaq< zQ7K&1nv+oLhYpHi5Cz<#SKRvds}a`-;;w>rYw)XuGwR|4+BJ&}#0|YIe@ydDwRCo< zi9X%BswtH4Jp9F#^|RBR;kUo@Ugo$vR<|F?mecTbUS5gxJv**M4IwBknn^eU5vj9U zwzLT{496aSAM9=0hQnJLvV@~PA#>^e)*1nDe|$>}7X0;l@waCR7Jbn}h@(>KU| z%v6(yCtMyX1MLTrvvhZ_fl2bOI8YBafVS~*S}Ctx6W0M5r+e5(8(NYSmxxHBx1v?# zbqx6oce8ZACR7Gvua8xA?Vg$Utv)gN{bT3{#+i|X<-XZ0>1);!6A;)lLvyd)g&uf! zQ7{Uc`5^BF^#K{uh|N@1-E-w-{>Y5YgAmU8qp&WS(We({va$r`*YCbx92FzED%J`6 zsNRhx0a4TmOCrh3dm7!W;jhfCtwW*qxpSjd;@?+_Kig$PoYYY6M?wOeYNbdw&bQ{K z&F%s>>-Twfb&PT3EeB@$0zR9HSaWb+v1|OeJ=-MEf93}XQ;2Xdrx_YJ?%m9*-xkkF zhaJ91(|)r43e*Rbp^Sk^ECKnnc}{IdL>PBMr5E#!vQG=tB?80GG1vi(b4L%ZvWBsk7G<=VT3V&b+6L^ z`lMV4oB!K3ra)?vr{uD`a0_bAh%Xq~#hHP!;b(HI^}XEO&uqr;9kOvf<|8F9&^;>C zqg4m({7Z8HDL0k+HQt5~?`yvMx*+_`k{)>DpcTR+UXrKRMFyh(6e>Z8q%%7^dr&jX zcGOpp=vS8#)kx3n8%>a3dh_uk^O98QGGk4dcW$u%2IV|@UfZjx4aSSSRG5^Vwd59G zNQ%X&kI}5b-W_l}>&J>2q+7_+TA%u^gt)c>ccsb4 z>l(3i(Qlp}9BOzo>8+`U4M8X~&>MyZ4ZNAg$+ zotL~CyH{7IPw0`6+jA z_mB3Bx~8VTY#Z!V%G0sp;?1CQViz`YrH_jZykc1L#gUs&q0I_y0U<80>|bQrDceF# zZh54f5%dJeX{`X=DlD!O^tInM_E+yHXn#|{+A>tW!oZuEk@MrCD7|0l`X?({;w=i% z>^OFFA!rc&^eL1}^P!dQDpvxcpiq7d3JN%(=T{tFeP~N~&x}W6^7jJD`iWKR8%2U} zt$1e5^t5*42b%M)AJm^5;NAWurLW>wTd&8rmyq6zEe@-8IbIBSQ=p$jq!Y+4v+AsR zbN!my?8C>JQpx&SF$)=(spCJV7vs?%QpbL>@`@?n7;YURg2iWy7qA!dL+17|_~CD7 zpgobn`a~z{hys!ac>Dhy2i0kjbr1J#o%-ATXwBY zPUh6)W)Z~2#RbommDTpx4dew{L9PKz7QAkRdC=YXld_TWjyx^|^a3{pLn?y{(8Ztn zASz(Kd7b!*p~=1ht@2|iu>qqmMS!~h8oVr+puDpG2K^PF9}v?BwC6;2=R%06x2q^A zGi$%dry3bKVPKVrkkRtk+zrc5S6RS z@EQTh%C$0eS?4E$jqAF3JK&W2bWI0lb`b~J=U!dDEnW#zqGVp#SNialYw_ujk6so- zd}ElHeUMJ^+InMcC3J6}(PuQ98>Ey99EqSEUCgE1dgyzRyQ-e%x~$;6U=71}D9tlu zz8C8Zb4g^mbF*1d4()K|brotA8~7t>$5<5=m9W-t=XlR^^iS4a!cm_KCv)_<)15na z6#x2YzJy zK|w=K;jEW{tnqrtGXr~z#ty@Fb5mHN;KxeNkCoMla{;N7_4m-EnHiSX{f7^6Py*Ml zE9mJ_AU+B}q#QKGW&)!e{ql-z$TKf&vvwX_4BEhYpXl&fJt< z5rax7B0`j>2OfoZ5>(FSR#rh67jRVQgBb!KI7b8x(Yuw%H-lgY0il5!0t1Cuj9|l1 zf^;OsHAC0w?r(g2fm+O=FN&S^4b1OhbKel>GVtFl-Z&<+X=rk4A|ed<(ov=Kv+{$0 zqIT@8@RM65Q1+N?~3M$9A z@(K#Go15pd-QTY2wGU5VeYoqc`_W%mNcdTfrS|W90YSws`mHzP_fm5-Oo*b@aaqNt zT{u>*zEA~QjS9Vj^XU_|ywFY9)D!mhK7eBk@yP+i3XE8akZM5^Y9sXyPyq%`&wu#Qlh5Mt&2>6c+0!?(d{mGU?A-VCEqFs%z%kLf8_zvE8 z6rAhTp3gZZb($3awp`|^t0cvKK+~UER|#qsuF^i$qzIKL?Eels9S{M*$R$;wE`jV1 z_5sj~DMK|67V`F>_FZ$Rjpdy|xuXArgBb$+fEytKMgc=a6yP9+Jtsmor!_#+i(J?d zMg$-im;spum%9$Icl5uvzN;Az@>O)R@7Xrkw&tJ~Ckvz(SPt8uOPqy|!9X7R_^3ZI z%Yn2Pq%;sm&hW&PGH2uBbJ)37fxl@<`iC?of|kRtbYtDWnFADQWvoxeCc z(a6I+@(r&uGX>^E9+K^GH>~xrkof2eb19O+&-;wN`;53?+68iAqB0&f5yi5b={j70 zvVxPcG#z-hOug)>UNqHw^FwT*_c@L+MNdrerG?I)eYYjic%6T;ldo{j&+VN+Zksv- zWK5ve%j+W8?y3Pak*KI>OM83%vEnW+qc_u-H84X!F^r%hF4NHwg5Vrfz6ceon*Lqu ziO3+pPUDl3*u>9aBd>(=7=nHUU;E_;Vox{ero6VKzGO+!!o04is_H=KNRYWcInTJE zbTIxEL}w6nfc>EhhSLEB{};F9g9D_rV9TP0si9$LSeLE`svoeV1bml10o|=#?ba8F zAfuuegu76(ulu&lVxddG1ys^C6K0Y3{%Vg1wZGSF9~~}LZ*DbtT#B-zyT87Garn2? zd(+`9rOaW>b+Zd7tMht_u{op93#)?#pC*pQGP3?W4HOcO%D$lO;K#*VR?2APxax0{ zJ?M}uXOxQj0h=dxm4UDhIxR@-2mE*=Lc3SF7ohgm-;jq-T-VHO6`zkn?pQ8YqXLBl ziVW{yK=T}?n*?qqZ<4dfk<2VE>Yg2f+UWucyag;{Td?O~qTtdR*#$9z(V7!&y`RFy z)W?S5d17iIbQL|y_iSGuRn)}y%N>%l!Vhg%PsHlXF7@~}0JccyZ!;hPP&1(Muii9r zo84@$_y1sA8oV2mr=5FNh+6qW825Fi_h)+T2nVxw5-#b~%jk(_-+=gNg=p%;w@X-5 z4Ia9GCp+?Y>-ZzdWDY4d>J^5hsK)ZVVOVNBCA~&)46ycsitjeyJh*Y#us^|Vq$B;| z%DuUznr)rM!=#diqqVMgfmIGU6U1r8rY1S1I$s5nAB$Z;9|s&}m%f^onGOFmLnq^F zJTNp3DJsMp2=Wz$0xeJpw|**e{+KdeehtGP)JZ7+z`#J8({IhwTj!Tvq@_I@nYRWr zM6B)$EKbfsFNP1y*@<1Bd>h6odyBZgx@A6G0=p25=j!Tl%QK7?6~0hC&T|9&y7jlz zd%+{)YcnVP?xQ$&gKDbq?n#U=Z$@7Yqnf5+?@uW?{6X=Hc`=eb%)5<|@}$(U=_Mhh zr;hYtwMp#!jeKVcuKDh}Oi888%T+4Vjri)<%^Vz+g8!5)>AgN(x)d)VU7p_9yLFY7 zc4s77{=tu#SWaOKVM_77)Awqkl(I|%{HHN+mi&UF34jBNfcRqjEC4D$5XI3!vQ7?S zrhPZw>S9kPQ(e0f$b(BIoza%SLa1hFUft3i#{j3%W!c4Qk;8$w?>%R>D(*h!b~=X= z2hY{NPgkb&+$WR7kY+rK_{MzC=43V@96&bjL_bZTrj}Xl8;Fh?ZoV$8{0~`zZ>+9<%l&#yw;Vf7Psr-9?96Witgl~J; z;0N>R`s?BO(4e5OnhPx*;V%kh&p^Q}90Oky0b*O7zaN>i$xs3!BDg3xbHUt)1>qmD za6^-T9+z!Kx{lk|kes2d$2Y${P0W>Kt=r{y7UR8e(rW!j4<)-2T{?7fC0JMbFy@D8 zK)mjv3xCq|SvM0vKWk zggy?Mjz{nHfnzHVGB^+tCGsl30f9mx95z!^24`nyn4AfQhbA?4)R5ZVhHLUXm=(aE z-Uh<{O7B0rq3`?@K0<%(!S>A%tZ9!LEwV8kd>8XP-BmoL#9JMe1AiIif8F}q3@BG+H8lDZzP{xLd5^H(l90Au7Q^f7{*P}W zOLU-C1#i(-XWrOh=)?d=Px|l?ZL$JvLU-hdw3>=e>TppnneVoAe5G7{ zMTIAR?afJ)x7A;qrDvaWG=4HIz8}%_I3RIlSLCX9rMoxY>}$pNTeJUBm4L|Ge3TOo zVCw(xJ#+tAb%1nr8%R7VpG=C2ii=wSXkz*2@8NeXAIwmzK{xanL8i= zwg6i+@q@qh6VJ*sxnWO7khq}L4Fpp{`?qgIKML=5gXzT)QYNcErcfMQmm^q~V&7X|sFyE@Og(dsslr(}L@`(}(SC+*oKhrfNFlC3MN zD-VBj!=wELCfp16FFUT)m2WUe2-RW6{NPC`eVcx%{>MhFSKMC_r+v9`w{G*PdNK=& z*v`dn6S1T@0%H+r)h)zt9ML{0UG4eZva^aI^KK!|Fyf5 zBLBbgrC`|V*U4d0A?mm|6E+%$NqKBH^IUXsdiG7T5LxrL@`Eluj*oIK%}sgZQ_y?1 zI?O*x_55>-zTC8GagmGXTHsxR7dEF#?#zmbULEG!rFN{Ut>$U?-zLzHXX)zzGd1ggNV=S{*m;m5w~-$4s8`w-11BMwa_x8j!lqg z#5w%x*9+OQ{^8UsEG#QftLeY+Ba}9WbTSsDZvf97`VEji7)AMe;#130k zPJ9`u{^Tin&H`QSPlL9u34=Txv<@Y5YiFW~#(_Me6687Hu0434esY}&Pu3%%cHPyv zTKSQCGThtambtk#l3vC_J|b@igtD%#^XtU%(ooLG!*$*m6HZD_W)Tq3fQfSH*F(#T z5(H$%YNRQd7-g9n$ypql+(K-M)YN?!#rGaxGu_y*(9(<6LPJP^1Vk!G(5$95v$e$! znJ%2f9)Ia3bx{UZJ_*R~aP{)bDn1HF_k7YIFDz*NYKqNSY9u1}m6x}@x=M2I(IOY5 zSW1vanNo3p@j*ff#9a}*+ys*1j&K7fjMdp8QjlzRKB6WqU{Qd~N{nyQT%HTJG`-L# zl7)6J=yifPpgoz)&CNjunh0)$O(Xv7WRb9=c<6uh7lu#3TQ>0jl(WD0u)k?RU;xale0OId|z-+kIj-TfBiY<#~gBNG9iI?fen!LSbl;`geLHyVW6!2Kg~!D zPtoJPWy=QzHpANH5at2*^@Yh=W)pz{Gfa&nj|=k>o)IC3X1AVK$_LEUyd}kE&GSO6 z3McL>1MJ|GX#W;-SkKgcv+*M>es8Z6_vUaOE|j|Oy8V_jw~K<2FuU^LRlaC5$zAuu zE74n#DoSzXth{Pbki148w!vCBj3!{x(C#1Z$JsEk+xh&*p#SLATWPLOy>j=SyoDsS z*2;S%Zo(^y*Xc%rpIjoM9Veo1ufbFqs0`2N1c!nU)>}s`NPMH=0S=%KcJ1*>9g4UPSaVSARQSJChJe&gj-e<^iK3s%r4ao6% zWe2UQTxx=w-(T_-=V|(74eIBlE0p<6;fGjxs&$&0RDE68rUxafm2L0v*PAlJIO{=L zr814IXP=A2#G%l9!rRAZ>*MwK07QN)lTjbj_AX*okP(+>Z_i0yT_*_{P#y9VCjP4z z-yc_9j&=}pp@;eo6a@x%R9OCXM{zX+b2Jbai0d+Bl!{hTr%Huyh?_t|5mRbX{|VY1 zuFvyQ0qG4z9bVc&wDrDoIg$0X2u*C2FTNj((^ZQ_O;1v#Cb{4(72@qt*EHp(;Uem!TPeX#Yto4vMmIH zY>(gCEEf=+%q2bG4}cL1z99jp%1P<&R`QXJU#g9_x|yE=S9F~3$~%}Ms^ZZ&Kh0AF z%bOYDhn99HI!9J4x)Po{7;t{FBE@0L^F-3AIFXT&flB%27+j(A)na<<4Qw>o)6Mj6)S}~k)85N|4X??={CRoSqp4Yjj@RMq`2u`ZX1hl2stEVq1Z&^6=q9sMdJ0L0LaJdL?2j#!Nxyxa`F=tQBZS z&-4yd^YuBhZ9#*n1Nz*Iv zN-2gt{JtypA4LX^CNxF)eC9=F75d2!Ya^9Vg zbRvAN$EhjqIf$Q-5Km7V%AZ9@I(f(m?iDk-@DAMAr%P~rU?9f_4oHiYKt)ABB1{0G zO12tN8=^g>j^`RX`1<3_e}HtN(NPY{`CKd9+;PsQT(9?SqrobH9ycBL#mSu9*fC~2 zr(+XBFnxneP&Pq9p7krT8+EzuwmlyIo-1>)NF7f*v0|xB=zOXcEC2kxfcC!|J793cfOY1 zv$+SAlsLFK?_WwK#L|W2%ojV}QSASE!z#$EEB)5T89RGZDO0Vtt*6(zp+qGTQtbkN zFia&Iwgb&T<7Qua(89&-WZtz^YNXdOkZ!`A8h62fEcwtp_*2}!Fk+gRV`zQPdLh6p z#YzX<87o&xR?9l}B=FKRl^pz({TQ?D_ zMgkE7`k7ND`r?Y*gCJ!lem;irUs&zm+Fp4!8kDtG?S~2!mEDfM$IK!m`S0h#cA5cU zO}k(Aa_|Gc3}qYRdYkbNKaTUOKVm;wpT^BpV%3zdH^aWwqauAJVkprw*MWz#zlPFJ zI_7l18B}0|S!cJcj*ulW1>$qaw8CV*cQVrAPSrPd@p{wGP!>1E{n}!!fU@_2x!1U4 z2X^M+$wRa#DOgsv+Nma6$p~<;!)nfaS8$p@8wcX+Knm`xW2HfIWZ`}UcZXczk4lv0 z`{dL(|96>x729g=3V8j{?7u0)NxObefDXw1DpFqRc|n6D=>tL?TR#JdVz})xd9r51 zzw)-cr;WW2Cxm2w@Q5_2@D+Gw9S4w{{+pKx0E>e|r5}vXNfcKsHaJTw&g3PfrAau( z9PZOndf#Nndz8Y*#L1zQSEa-FTradk0XH|{@y_^Zu7Ea;t^<$Wt_!gqr;%z*E!KjT8qZ40<9bBVB%yu4gYt0&O`yut4@VW9& zom}eFd*Qir@s{ArYud0v}uv)Z6rzx&ZV8bdWF8#pjCU=lGk1zg7@K| z*6E03p#41i;|WpZ+{84FFTec?{XOw8rqNc?D-t*DiqM*JzkiTNbBL`ZFVNi)d7WnC znb+^nt`u#7k&h!m!_yja9|P@a_KH_xi?NABUmWv2!QoQ9bLMC3ht`|)-}rtnUUeys zVbyfeo@kDJ>2)akskbFJrQ0$noRH7Jk_4q&4`=385QKQ%dxmblMSQa@AWxWTZNpw+1-o*8z z_ht^US?lI#H6>c^id_~~0ednE@^h70QsFlc2P%M9o+C)9sw#5u!?i+=9967yY|u@= z&fMNV!Z};MRt#E5Y4R7zrFMRg&?4&w0{GY>b51VPzFexR3~Syx4X3Zzpq&Ys$+b&6 zhY3B~Rl?doeOUxADM(L72W-}eZtse|Sb}*o-iZmag9V@k0T1RMTMob966jClh$IC` zI3YTGsL%oOtKY7BYr`V?Q%cutc7s^z?gj>VauJ1GfRxGQ_o5^m?AR(tmYfmq-=%L% zV9Aoixt~jt=I04C+*PJ~W-s|BdSBN)Jmzlak#IcK)K7+&**l>+%0{m$(1T+J$cYAh zY+45fNWs)+q0DUi?)h`fFWEATzP`SY7VCgWV|&8oyW9Y#i?nBh!ThH+vdh_7v8~zb zU6Yb^y$1|2$+g?Vt5;j13r)*Bg0-V0mJ%rIWUFwyg6Pd3# z2B<4Ti>058-t=ROWXy99!G>@~#nV$9inC`Dq{YQh^=}6v4dOjStmgo6quAgZEN3!(=s`^3p4D7mZ~Z~hk8aB zB3cFSR#;>tg0M2$I6e3kB6@tx=34#1f4anEboXgque<%Vi^@pkCu6!86 zxaWF)CSk=$NqhQxnyK;Qn!_7u^60jg;T5!^c%B?xc~-;Zb>SlM6fUH{>J3Lv4tdEGoe zKXK@f$W|r2SmB|@e=6=E+NM73|CiFsk+VVN0i%Kxy|=VIDyCWRA_Y5kZM{9uW|mlB z#I8`hR%!X%7d5uc@RqW6T*1#H)cSgo_bhjiDG;c)5mJyV9n*aI(_Cm6G6ma_DF}_E z&gsL^NT&kCjS6k16yS}-Fv=i}epu!GL2gmKH*IOk;RM8l8CaDq%h$bV+9OOLCzm(_<;yMA+zc6E6qBm z;5FHVm#E>~)&uSBUWY<~--QaJzrWMJ$&%K1Q2bDe{#v8>JKTp*Cb{DEZLWL1Fpi9p z;PLcS#p*)9!Oehz##*XJl;2oS#)L;-QqO*_YZojvE2QCX&wc!KXT4lm(?BamCW|rf zmee1VHOp}d&<^?PZ=v0KdwK$1yts0H`ulDaotW0TD4H9gQBazCU;uwY$;O5YlCTAu zoyXq@K2+NLcGBU(_NJC3xqe(fQfsC(>*IZ}BxB&S%xJT6%Y1etnrO0KZb^`FqGWSn zg<7S-gW*q44|s1|Ikk_OKaE^66xNY`)(Mx3L#j6t&o0f^i_M?(wK@t|mlJCiK$2t* zig}_>H#0^x(VYxTEG7Do{Xp3a-eIryf#A!a&Y2xa2?OI2VenA_%xAQc#y%pJC*)~O zH|@Bkz}k`mo`lbZ(n4Z%A<=8@6E^|DmTv8n*)+M#os|HCQ&N}9Vkz_Yr+yOLG-+fO z7A#vNq){P>%T9<||C|z^l0itxqwOu)PV9o~9@6w=-}(R#lf?HI&eVi2jjCMB>%z0g z*Vw|tMlYbz`{Le50?;WkJ}nK959&-DJBY3b>6|$5lPXAN!(0q0Q>$ZbKs<^m(URfIP0dL~x z_{2E>#!`JIelE1%Q)<)wtq|Da`i4uMeVPtl;BBn^!)EcMU_MRG^vKa!#(<`J15DAc zEr|OrtRo>&K|z6>zCNX%o*sb4+&AI$@t6&!tg!yh0L5kopz9)R(axA5yAY4j*;6CA zqn#t&)2Ac%>_o4`7GQkbp1ALz;-(WxW2Akdk#UrghvT4Y$}eHNy>cz=`1f6#wbho6 z9_n9Do(Q(Vi+trK`Ym%`DK(-epow@BasK`6{45u`Y&-|W0@vlE_Z}sq2Y#uRJ6!Ktt)q_3- zOM-7g{%AT})OVQ)G{pBUtcl62DPe?VZ69(BG#sbA%fsA_a$VqL&lmkegzvI@|H-S^ zm8K_nSwpz(k{gdVHZc4M+Y+?FF*J1FdiTP&B;3s<3)lAVcu|#(>JEC>U0SZO{ z1=)5`znK5BIuoZM?&^73{;E*>i%U^JQHFD%G8~?YADj zHYuK=&mK@DGwnJH9dxmq`x)coZ>F@h^r3KSgs1=<)Y6NOLqX9ee8aAC{kc|n!l`%C z!h(F(i|~P{8{`kIAgh0oU*fjf>THnHWX9}B4~6*to6)Dg+;OC(NFwPepR|4*!{XuC zYyIN8Da;olix!?Ism1*_fsvIDQ)E=ID6`IOjbh6C_nlLy!y#>4(ym+2tgZ35ztOXP zZ0pY-h-<@v zOyunP2z5p%S#!G++RnorV;daJ_lHV!`}+G^!7&Q-%!!+o^+qm2B7^M%{-y>(HIy+E zDxLLYBbAOnoOtmhV*H<|<898z>`Z2wvz49bH`e7cgghIwOfB@Ow%^slez(Z}DL#b- zPCR_M@W#1!B^#dNh5`dWf}7snd=s^2{)jF3Mde!9}f$1+aK_--2_kW~e1-+FZycUiXDBdGbAIaL~>V_u9Emhjjpt|LDpG`RAeSuTt zB0u*v<-L8cFzAR-^!aLSd^~YlG4t~icl1il<83mv3SR$d3H0FUzwr{qiZN6L^$vp4 zL1bVEiVBU!l9ZI}h$(Q2DyYYMmPaO^wH)*&k97Tv^Ww=|!nWoDHt&=ZO|p^}9QU0w z>h4s_Pa+}o6c`$LJQVfeJ%Q7vIzg$Cx(qlQ^4JxpF(zUf+1_^2XtD^5O>3en<=G)-pl*;Z(<8^ zJm!iqvNX6}gw>@5Sc(id?mnbJ78l*KtEd)OvXjtEqw^bPrg&;zVl?%Hy&B`=+Iz)$ znBi&IDxbJy4dwx^2BN`apzWr~nhZ4Lrj+fOiGQ5c$zpW6@u` zM!3q>o}(^=@@JCt@V9xONt%)#C{=k&rT415Tu-x$H*fT-3W21y0Tuk~fn#0~y*x?C zwEA$=YMeVKgpHED%@XkC3+jp9>tzdvn3*Q}{;yM{QuJl3CQY;`5D7qaCl=~6FkXY} zN3@|~t2!DQK-TYJVDjVz>=)7yS^9Ka0cb;TcA2(-QA{d7G`~1Nu*v$@m%3L~wPNi|aaq`h`Hh5IU`#TF+MZi({S)xeQMwJ5>c;P%MB8 zSph&X3K<%eTn~Y>{_95h0;A!0H0>Xv%hpE$<)zB5j?*Ud%8l*+CLuI39tOy3)KI z2gvU;z~}DHQ;fX-_%VM>ckX3%3E4);+j$yljX#;}qxE`y~#%C_2 zSt(&-H<9UI2#EL8v^2Rj!K`g{``G`WS_p3Ila){-xGfQgInq`ES{uU{W`B2hq|iZy zi7Uxf3(%+?z&Od11nEhMT&^~0%xBIMzd}LLHd?!wg>yHZ6>L)o(iwU;m_uj)7MAKz2QK5VL~@zK;qg1t={bGWc*@7hYsPM+b*NqA_1+yyg$aD3$cv zd@1wb9R#+Ryp9gS*##wR2w)!(mOBD;TMDTrv$zy+!5=8MZE%BxYqPfj-(_I}hiR?H4ht-KC^aFhcA&SC zBPhh`>grL=TVKP(u!vB`(5nV$fgqwpQYYPTZw*`RU|{;Hr>PkLN_nLDIRY#s1(L3X z1tv6Nl?3T9fHMh!Xxb)IgdIOCtsppb^P|VK*56 zjR^tMeoAI$G{CnpA^!o~hkHYTtaVlb!jx{vr1Rj=fbP^t`;_HnE?Wwg0b@EasJqR% z_z&u6QgR(Rh7}?iII_i2jq=g1!?$W@@^S#6PseK%?DY zJB7(6tPO1(ukh%#{QOA`jPFi3yg~SgsDT2BISMFpi4V(NG11`SXxfEn2b^Fe~9+I&?Dg+Z(r7#SU{XlM6kR5MIP z0eO4kl_)J@z-S=aUqq%3jh+B`Z2@c<0WhJ%2QR=b&^J6BlAL_>;S%&K3Meh*0k&6X zj69k~_0&O#Mau}fMSz0?8fI81KN^7@3qevCLo;x%gY{OpyMhag)1B8=88o?ud*>*B zG)pVsQO z;}jBFESK70k=S;M$(C`7h)@D63J+*K5O4q$ve@0vsqo-7y_f%gC*1d;8SqmfCvTZ3 zZjl@GtEB$ub=U=W8mG+(F}RJHLN_Vy!AcaSKO8?G)o0=1xxLPXeg!EUhVir9>nZF)O4=&bgtRWM) z+5%(no%H#UrCOR0Hh})+V1I-N+HhmyIaAeG-D)16*jPkzeb(yO1pr_L0kv|dfhL0q zj|2-N+xPfU;x`v)DTnZFAmvi)b@(Ln1az$LR=ro{P2SyL@7`VfyaRdA`%{h2xVX4v zVFOJ3CIwB}2^l1hDt~K?!&&=pc&{mtU?E!;NZULAo*&9UoTYcofiIl9_%Q~Sq`@UV z==1@Fu50cw2=EPwV`3Pj#w?a{(sd6urT(7&Mkw^K@4VEh1(Y|}i{4)`$~3A{&0oV* zt~nBE{00g~3#j&E_t!=&VV8yMRLy97*lK|W*WiKIeeks+CtKAcGH43X&$0yRJMD4I zin814>NqIC(@PIK0p_8f&Z#K+J~6D?2!)yS5s35)zuYg zG*VGfK|wq4>h0D?sr%)Zs`nso4up+=4k{O-uFE9Q;mLIAy!y^J zdzBMl9tyax8z9aAFb=?;+6|U|+6-an#{z1v(ENPXy}doyzK|9e(EsD(SQ9{`5&S;V zti{L22e3#MfMykDIE0a>0)I0!Auxxt4tVxVSFaYg*7@U(>;O4d4zdEd<>kZxh6bJk z38;?6Vb9T?-}f*uNz8)o0Ts@Rs_m-1*Tk)cnBy2 z;*yi~HmCsgOCfaqdN2Tz@t}PgDGDz>KA!u8osA6-n2_AvQqBuu0rOVs1(~j_>er|#sNXA#@+q?nFKrX}@&l?&V;GK*XjojQ1u>s>Q_XZG%BT+eH zcTDbdKZB4D9vCKq;k+R9VK>{LBA&J}0oXH&2^OotgXX@zKAPADD5@w;`osJLpf!NX zimRq!ldHtf0m5^2NrmSi^u5wOaQM;$vy~EsC-e98eXlR{Mj%1q!L)ol(DcrL-c&kJNIS1h`$VrEp9p7gKVP+=;u*@%1otFEUp`T3d&^P-hPu}i20E!kR3o?_B zep1UD_zk!&CZg{^lSw8;OEsCMy>g z9!OP=;0A#>I6#LB0YIf@wBh1_UB83>9aOfYSv_0o~u=}VT`a(kr4kOv(DA)nwh zXEP5st@}sYvwb5YesAUqckA`}lrSU>uyUlUC!>HtH+VAE`{qUYvpp)}<`lT!&gaKU z%R>|g5L@k~c19itZw4u3)qG52;DA;&ZmT*-ehV&kZIGf9eE5Zmjh!7~xDOp*)3~46 z+1q2}bampSTTW+WWR$G^-u82@Yy9txak-gdZ<+(469h8)fzq7N@gW3g;UF-8h8s8t zcNDt3QVHCbb-XDr#FdNIx@{rz$IN4@>182C#xu}04}m19qqBOdL? ztFWoux`lzVdGyE}sM5N`s@|@PrCr9&z@kL80OUi{ejIFL&_Sh{V?zQ?B9tr~KGi&A|j8bf7NGQD%dP-(Zwes3Sq(nW1Mo}LHRoNF{E3OuH=zpC+*+8=5KHiYYLZ1 zF2sc0Pyx#|N*0=MSt$48K}VeDfb>KxD)tP@+6Y=0#nianMj&r$ngv{fp^~0XVILp; z8eA+aJUCCt@CgWlq5a%NXf1rn^h${{`@2wmhZ4!I{8D+{x64()-(nUK(TDrMTSKOq z{&_$Wm%PE(H89}H5OtEL60%asU%Ca|m6YK4C4-112po;(fX&17w{;PraXE8#aS4Xr z85iMmZO|f0qxzkZmDQ`6*;!`D1XbKRdHCSLMFs|j=b4$A6~Gi?h7}110133^!h&J~ zv$%Nu|CS~~=7+3_?XYD{67t_*GUP8&zw)SS^K z!hTcLv80&kMAUa-V+op)M?*iiD@gM%#SMtFFG@>G%fkqi-Z`}|0n(N^aPOW&_lElj zBsN1-6YkO_$H8aI{2RSBa6JHuwi*d6ODtHb%uGzOwQ$x(pA5Te-+(((>(rDf^^uY%j@k}LM`OXktT(~U@*XgLMuY>|JQuLa{{i_$QK9zSr`ubV3_g*$X@C8HyttA zc&ru|VnK@wC=lp_#eobwf>)7lL__%dU8c>@6k!%X+7#kXH+h#=RxUyb0cmX%4D(*^ zbAPIji?eeO?CQD@jgq;#xnaXOhLkqjV9qf-p8i#vbxGK~4a*$%>#y#O$$LO@CV&l+ zTV|~uA;G}5$jkfY+Zn^zmm3SC(SO9Yf194{m#zp$_h^p`()@mJ$76t!V!Dh)l**Qu z4uS7|X!29*_g8vvf4?&(zXs+Ka*c(Ua|Y(EqpK@9yYixjqHmeD3oV!qzyXH1FeB*; zOd-7u!5iSZ9}SU+R7#Uw`;)e?l$mY;Ge^{M1_f77PIq$6;=lf;ETotGDu=gVao`aW zqP4UViaLokZx00N&T!u<(1Pn~S!-$QWzCaz#s5duS3m`|ZCwMBic$gs5|YviA|)X$ z-6)bGC@3Hxol3W;fJk=}ao@edIs5Fr*IIMUITyZP zhMo?3q6z%$Z!jp2L$AsY=wSaw4$4TdgZtr!Y(Nn{!u^!xuEoI&*>_&)QF$cWGgVt* zT2-ZgrN%mHRxAy+!^QEE)!woSax1~=`ery%)%>0dCgtH(c!rTX@ac{?;J?VYGBso7 z9bhgLHTPCq|LHK3`^L-bHsw!+hZIWT5x+e1uv1l;tqJ!FVfNVDi82Bh#8&S|K-Uy9 zILE6qRAV}T&o}c}1(_8O^?f3=4A%L&L*ng|TK`or8povi-ePy)&+j6l$MeJKR!BKO zhkDoUb`nXtDb%OPL^Wh4Y8ag~v^XTpknZ|$0EV(Hgzq}UT@!J(qovIh zypUg!wd<2!Ri(2)KNlDpDh(dW2$ljOOIhR>$*NsTfJOyiFLboDVo0?l_1|h~^8=(e z^05NMa4RF@T#AZ|VUpue7LY-B%m}<>;Sz#rfJ@vUV8TGwkl43Q$YBd9zyY%(AS1(3 zQ&Y>QBPbS7*g!?2!u1Rc3X;&&j6e!(1qAHYhDvLD>`fjr1`dKDFW7~{Ye*!vV<jmXbn`3@(7YwE;Ln zF!7DH-_!~9f!WYqfZD*f9EM@SfBxWbJjn06E}$83Q(}0FeSlLA5f%jnZb1Yq0=+&Y zTJ_Mftgo-9O*bcljw|GuFueo=Y@(Wor~MLsi_dsqxp(2fcvawFBb)+%30gaPT0#P3 z8M#2#ygpW(t$m7Ex0{=rL-h&sBHqJk>n0HeC#yyZ? zuoP>550(iKi_G8-N#zHQ*gC0Dfy1-{G60{EDxUz!Mp$6T0VtHDVtEB%OF{8r-mK2f z2`1jKQV2$fxbMTN7psA~{mnOJk;TLZWq^hVNizt(coz4YE`JVIkk4791+O_0iuS$L9tmi&#=b{+khw zA26045CU-s7EK}IDG@9H7|NpFge31V{!gRU8_4gLL3S4pxVNbnqsc%Q8OGHNltm{1 zyaQO0=9^lZr5ngZBuIcOx;a)9hHj6+ErSBJttd#_zzW5z_1_PG$N>{lK6hM4UU;GX zi=_PU14Bc@?d|P4NNs$e$%}$F)d>*{8BPHQ#4qNPOm%Vbf5AlfO#cRKdizQILj(4f zUtj}B0n)M`e*WJXX=%|2{m~ICOQ*f%4=_^*nP`eQ`@+`1go-zxjxe*h2@gbhWabWvhoMo+tdx{6 zG&WS;)!rtSMa5va6jsocmb(Vjoxc8vx(4N|*Tk8j`*JTL;81HGwwg@pKvua(IKYnh zq@=!}#x#O{A##x%olz%g7d6A8B8IPkV?zAJ1&b9z!5aj{NhSN{QW6Sj$}Cnw?9^77 z{9xhX>lK7XZ$cv4n#kQD8!)@yf5AwprS<$8hi1$2OCwBs3bpsE1_8MTB+HuyI8m8~ z+_l#&#{JR;*(?5b`0-!7z;c%at zr9+DmX@DV(wyqp`9feNGC@~zMLh0nl_houAL%a3<;+Nu|_Xi}G6e+WvY`bvx%!Di) zy$ShuRToGiHP0v_jl;$9ILYmlgQv?zcg z-wG}yfy4g6K~H(c!M0{fKW(|^lz;(0y}sFqCn5p<0>JBN4iT5gN!f6XCQu26{@S}4 z*{cVw3`XucT$_JL#o;pu#j@QW8oa?7vK$Z>qPhIy6{>i3>#5nY!3*W31878@+3;fJ z+ZQ4YL^lDq>sy=rc<@F}g5w4{_Dn%*i$PNyzxvGP`ik1xWFU`+u$(mZ&|d=WF+ReJ zGlWlf?_P!=r;770O=S808&IjJdeHnbC{3NGO(fJA{=;x+D6!$SqQ9VG`HF_DRBs-) zkJFf;sPuD{#pTgxiiW8Pe4`2h&Em-pmZp3!XTJA5#M6}J0mKALNdHUCMdyBwy)^FM zBHt3t#=u0tu15Isq~XXzGZSgQ!HAeKwk_Wb_BYwkz3}$(`bS0u7~AmmXhEa3zl;#_ z4UJ@piAUWoNZk0zLK{c6#FU;>%vTB;HBK1b`m=%k#%SMVGdQ{!XX1y#z4Tfpbkwr1 zpM%e>+K)DLXlq2Ah#xYZe484l?8V&`<7CPf2u)RI$p?aF7DcJ-*x1-Ij3Fv{`x_19 z-4QN=66R9RH&M|%Ao$P0^l|8MphL&#-v>)DcZ(({7T;MpKH6L+e*4JwisPXiL0>mc zZ4X6=zmN|3WTy?UQpnMs%LFV&4onW{fyn_X%v=i$Z4r^QN_tmQX@-upGjkuZ1@9S} z(@SCzF|W&C&kmsAWmR(+j~WqdQlv$7;(wMH`Gq(Fb5G0#nIfRj)bh>2h0>MGRo5J0 z&3HZP$HA-^4z-0?2M#}4=atshf8qM~0n29sZEY7c6cure9#pezA+TLTQW0s0rq^ha z>^@@Phxr8hReOfgan4G-Acxi3Zuv%aRErvG_-5>xTR}4m_=iF!ZRh$=sQ>06qFK2y?w07ci zn08i6)V>M+67Y=l_?o$Xiif>%Gx2ygZc=T_R&75+rW z%1fnL?a;klD;i zS^Ss4feD?jCdOiV%GkQ*=6lxp^-nFGgLdtjP!4bOWu`|mGH8Wx;$3=Dhv<2#;%}ez zaX8q>D?O!HLoyFUlnhho5vC^052)|R+BCG0P%nguG1s|Gx;&6!!?J;4oqwR9I9g5) zg<6->4@OrsBhWN5sT@F^rVv^dh%Drb6R~V(_J`A>)R956u^agh5=f;S1%DlXuXMD2 zEBwW8yO!u3Gd~5PjDZ=GXPD=eAY-muc0WHvl<-kD{ITW_7#L9gW{0=C@Dj!%G{|K6 zT1`?CjP+hIsuXyvlz#9cBOwKMnz}lm_adnIyx=O^?PVV&v9rYFWKU$E3*^-~M;3l# zagaoG#R`(b2io8IQ4h8^VSu^gSIC+TA_&t!Ywjdy}6!Bz#MRYIY3$As%p&5!5yg=uS!`g=6lo5c6FMhP~Pr5l_23AD+#Q*2{m8v$AV;Ar6a}j+^M=l>D;>Udctd%j0sb zU)1Pb>S@O22AKo?Q8y~?^7(LkV-BOwq zZ7nUGDHJhMNmJI%9TC;SVog5~A98P*HPhxeaV;Y1-^rw9U>acC$b?=V4DvF78_?Ws zSTN}z2<~?BvS_B(86~^(+6zURCYZ zBi|;1;pA#!QN-OG2yc*7AO$-+RMTN?kX|MBVhJKlU>i$?W^T`_tPL9w5KK-@hHxh3 ziTyAjit#Rw zH*U&yu&N*I*hJ1wJXbR`O9;GRy{v=C6*fSanRoCMMIm085~hv=x(AhF5($(j8R6}6 z#NSRf;*Sp6h~GMz&_@JqcuQ-QUI?6Uow$?{nSQ((>-o`OWs^hk>GgE>Vb9K=hVLk6 zLqYs0y%lOV=Txz(|9+zj*Xn2(=hmE|ZrhmS994|N>?R`bSWi;D?PUAtk$JaziN0*RRBh;`=)n0^2l+TO?pL|n zzX;)|%yPZI8-sZ=JKrXU{mi{=jN6ms`dR=Y=c#n=H8ft341;lzp$FP1|3USF8)y8+ zXyBKy%j;#;fsh5Vz?o$VqS^k(#Yg-u?8I*I5(!gX@afjeHR1u@=)$U zZt9;qFZ!SIJQNmZ@?u#A(! zk{P24KP_58KWMe0eNe2lfnZet=O7FSAV6inUzZpXDFhC!l0#w$JKp)C+|y6esDH9v z)p$IjmS|@$Pr5Jhx&7$j2A!%H8j9|fD`gnyG}XAbE}HdT-kj3N?ugnsFO z4+L6RGy1p#oqr%#YFZktfPgks>O=AnT8lk;@ml5rJuhj_RtSKgnO(q6dvHT+5p%t6l$&$>Lus4Ld1`!sl8HPz2XzDpOcPV zCcl+5f3#B;r5|wr4Xuh!TEs}iVq(U+PXa)d) ze_Qwp?5^%*6HU~(vkN823O-zh9BTYN=$*VRf3k=oKRGe;=rw+w^jQX_kde{*+q_iy zhr9RBTg5Y!FHRF^Y{Z4u-K}nawhp*w*+@X3Mp&GCAdG&)K^bW={1^8e!i7cR`JyU` zVRGSci;MfpkEq%7f(N3X$c)2~t%TN##|5uG6nQv|KxmtQ{ zd|q_^M$g5S-oU&@Q}^o^3A8+Pu7?1f0Tq@%V&jYg#tQ!Yd)$Ao|MJiz5wbA<>D!|D z5mY)p*L}U3L&vy_3Va6p5|92Y@VMCWDH@Nb#Bn?&nen`QE974`bUTa?@U{bCtPE8d z8Eh~r2#bs30BqDr9$ET&L{QM2`1FUf*AGq?)`W*h9rC=FR=woE}obtzi^==mI z6G_W+u75l{9bx ziVPV}whxUSA7~kpBT`U_c-mb3+1fF$<*~qWYGl45ter`-C+^)7#f(PvmweiS` zzHfy2&~hpWAY}kcRfv!!NGY`E%HMxWEBtMOR6zUE&CBBD!u%HzBg64@-o2n;NqHIe zC%=+O-zx;~%>DYpvT>nzPh;E4$xFepJj*mn;Iwshy9 z$N^qA{^+1a>UnBTx`Ba32&k+AiGsAfC7xfq;ps!OYE5|1%#iu^Iq@F4;&g|o#5yH> zEglgP(c$v4vO4e){cl`gM9wrePTPEsQ81c z_c8WI6mPM5sswVdwEzFIDk;G5heTglyqW=p?9nv(Fjn+t)jJB(&s>Hhhd5!M!)~Rd zrTyI9Z5glZ&3uGjTc2uVMoH;~>2#x`Y0jl~HAOXQh-VkWAm?E+M`rZ?Dwv%!@ zkJ?Cajc8T;pKnK=JuR;kpKcIqdQXozu2=m@?;Z7;qK=lSzQ8Xm1O(|TuB2t4GowQ2 z(@#-`8R_ZKfxZe}qyMgWG&s4KWlA{UeDV~4E_^G@0v0!K%FUxS)bUcE$@ zQ^Iq(X^TVL;l6aXXP4iCk0nlo z4h(gNfzo8zQ>-vj!`V5sD+d=v&r7OZ-tkI@3zaAF(CG6WX(=MYs`hF{<6Y$Sf6DN)62#JrE6wChlin?0QSfXXV{6xu` z20p~>;$llPzTX3ESU{&AB94@F)s!=@4b_Ju=s24#Us*O3$Y~LL39GnQQtWMt^~S}1 zFH80mlqYR>t3um4V`vn<0}p1E3CJBtZ`0Q|8S0%2Qv9%G=AH{OdH?(;sfJKTiiJ+vJFm z-2y|p-`ujav;^WEEgPFWa;2fbm9~mpCP7=9xURYzStS5P>d1F^rTY5%waZK{%cS13 zwX1I#%&HcGR75#V`NicQ`moRNT-OqOu3f+0{J*r-EFgO)X)m-rk^&xy*`JbVLe z$j_n_c<)hLD*^#>0C`2Td=U;Z5cU|*pC59@Uebd;~j#K$! zE2AiMWzGw>PfU3@vnp3o+AMxq3;1C?25JO|DA%T={3M`5oq;L)Sajjf%b=CjjDZ?J zEmU2FZgsvyzT>Oag&-rGL*=Kcdc%vRZ+t?2rRm&=egFiimM9oE1AGBwf)sK?QzAQJ z#@dVn!9xbdAE&zN>s$?5r=$tnP8R3kwtX)q>Fu$6q#k}ipm%Vz=48g62hDnTH9jdR zDWC)_n7xxQ3$eG7;Ni2QyKaK%w7l|KV^~u!v8kWp&|rP$X)9}H^&sFD{>2P8bStw5 zgqs>~xKH;;JmIJQD)Zgb#)F*m!GoLWO7ShgGeypKG2F%Ko@%^naM`d7!oRv^07wr;iT^j&sb)c*Nb7M6D${(n7WY~Eq(%mK(o}5gf+B{l|vkQDktSxB%(|fX^jb<>|kb}Nd@8It1N1|<@xh< z8G2C>UW%B?FAOiZl3JKKIbZDzqQj;F!)6J2`5@%l!Gyg)WyQ=u@ew8bSOn$Wq8$nD z+qV+3lHVmp>hOISrG|Yf_um&aq<(!Hrb@sQCzfb{I)wh;czrBSL(2nR2FnD{MlpnL zj`X$UtdgoZu9StFb$tU_AzFmza?=A>BUP={$0`J$(h3X3U%T_E zAovG<3(A;%ioz2M#=Dr;$(sye3&E|~PiOIpw4qtHxUW3w8MZjL8i$AIbw(auZ?QxP zRYetz=-d*L_3of|f|bfmFD0(G@{5KtLA(K0qLdz$v#b9Q4xD>m&|#-6bj+F=Ck?o< zR^n?*F&0+^WC&#DGoz@p+Uev3KSK#?F~IAaj}mqjK!02pLMe!ZYdipB_C7fuHvowDb9`aBGTH8tL(ozu$XHm{3k1^{?`5Wr?gnaw@HnxQ2kNGwC9V}f*xq)wX(Y1r88D(Q`>(PK%2pdohjS!1K|FM$U*{Wt_T_6-y0WC$ zBMKZ*&YppR&p`jGAM*%(uy_Y6ms0LB|oar zP`bL<11nM_WJQW2{p_8sYCMTvjX$;FPatb|{zc-L%P10A_Q$6DQ3~^t%Ldyt8bsg1ge=|1b%?i0hU!t-k(aV+%S=xN_=Oqq93# z@!}7L^CqeW>7vK6hC00iFbsepPn380Y1H)T@riRKK0_GWhe13dA|gyYs;(@JY~}1f zm#&Fb(E0_IRQbDEG@$-52nc)!E}2;BR3uFy@sM95tS0O2V*NKe!O;R_v9ZumtS5BoQUuS|EM>^8v4G%JsUYuCfdJ%!^I5E=txH zE83o`)Mum~f}O&H{7XG}g>5$k#fNv|mGgV=vaysIm_5JY9Iv%=q%Jb8UV$5bRdmjW zA)|2M+BlCRKmEnQ_1s2b7vHaPWNzCI92eZhu8HCKlp-n$j_1<(`11g6Kr;Vaj@#JrlIHKJI2%!oWq zqn*-(WYtSf6taHYPw@F7G~SS06lw$upe*R&4f1muPh+Co)?Yc7p)T3~PQsu4_s2SU zNnMSk^iY&3y~R`2(YT_0(slCUh6>HbhLRz=ag@8$i?*@C9XxOFM8{^$Idzn*xbE|? zHoELx4Z zJDmbcQG(Oit?mIP~mu4Xw0qY5p^H3XD_hSs{MB%k~bZ-P6K_BpH1RCN_!5 zf7M3*oGey5e}Fyw`RLEzca*{#{Z4utS43O%v8eeY95=DGd`jI`cVy9Ww9jSw%4?55 zgnOTz5HrN1)0}7NlyiZ`?DOP|*Ib3&#??AdtGypn0&efn%HE32)_Hk`^C-wd#nyf) zxw*Uj65j0noTbhJdPXZdx9zBT$+a+hgF93&j4ck@Hgz7797edm4qAu{wpJKN#Xsy^ z%HURImQi9{94C{Edv4ps_Vy74!RdMH#N-)K)`G)>-%%;l)7`<(ELF|tmIRi@hfHxr zf2FFWP)_;ietD>*8)>w}`_uX_JtqrVj+DZ2iCgF|Zo7w?Jge`Vr_fao%oAwXacu1{ zO{Fhp7Q1*e4|=N7sq*OiZf4{cyU3$_CZjP#U!bBnzlM!%03Zs0Rjj%JcN^gd!z6P$ z(AXP+Hs_^@iS_#EePIw=(e}oJy^rnckRgatVb16WASZ%F-|Foh zlb&xm6t)3Xd;lm#`qLDmmp0e7keG_kBq*m^uxmRO(>dvkk|n@fnjt59JM{NszSHY} zHY4K8`-Z)oqg}yJ%I_IDW$NoS8H|Vt_YL+gN2f8oi7d?;%z%Qq z^57<+x1uURrkSnD4!u|qZs1fq?I+2Gv%n~eXa+f|x0-v~g$^KF;)j`aFzjIw*hVSv zuR%HN3!MEh=-&T@S@ky^C;uLx{&d)R@%@3R7_9j-n9u`SkbRrOeS1c2qw>P%elLCs z?L8E#`7QZtE^Ji$!g5hMyLoOSb!fehfJsTooAqYP!pVWit>u-r&mLHnt6kF+4Iir$ zCAzOG|Kz$8rwARZOs%}Q9$(s>Ip76$fEgE6<%A5jIFc-a#B&z3KB^TrCEIZ!4YRtK zF5Q3f49~<_UP1od&-^5s(&f|FUJkbv^f-vuf4m?2%=P!`zx)dpqY(zi%>d62igDN{ z_{N{nY3@*2yO%$I;j?MYON#Ye{dY_8l_+IBJDkV(PN zly|na#-Ndwjii-`qDPhS^HsNJ@%6`U%~NZSs|v#P&LV7)iH%pXP~<3P`Mp*& zE#HwKFJpWFfuN89lS2mwqP(;Ellb{-UGGmor{YL=;B_0jOX8L(7|ieX0E*L z-ItjBIhbLFQBYXG+;%@C=co6s_2(~{n)OtI*;t*vFVqU#S(Qb&Sm`C6XecYw$j#us z27L!89wR{S%oNY&)dvZr*qK7InW9${{Yp`_dJY;2KXbe6SNb4j3u~^a%hU35PI^Wh zqAyq?O8>IgmrsE+3q0SSK>UPAl_Za(o6`@hY!>aT88d(5)r4iMPLZTr|5~7bf9vvK3FuGb%&9Xz$2! zJwtot04<8w#oEA1MiS_+;BpMP;N!=rr-xg~+Fhm}>EHuXD3Njv4ygQj$*LZEkK~z^ zRT=;I{Ob11oeY1navguURC9Au3|6AK-!dp%kC>R4D(ts7D(i7nf?!i{aS94X1GV_? zVzlMl+??1_PwbQGci)l;@&%TPe>@|DJg#7l@Mf0!t-zQjhcU-h%vd7cx%>5A4~uoE zZNl*xl-%5eBO@nJ>yI)?sc!BspfUX;6=G=STM+42(XFm;4 zA^ZI?S`au|q?4ppa5c}WsJJQJgkd20&vn^HacK!2L8F52-z=vMx}7;aveoVA#KdmM zXk)}~22}@lm=WC5N^@+{BKH1jOMa!#Tw?-h@=@*h_$nz@p)O~KyhR>B1wKg(~^=TA-uT-ik zY8zqM3qr^epz}b=p`#3yn5+!G7;v8j0A(F6WhMp&Z}^Jb zu)?4TxyX;VxSpNQw1>%3oc1$EvRjsKeEgHs-?R_<-uGm++p1i@)C5o3ez*6PG&D7l zYIPdE$s+If%3cJe9ankh^++xy2#5;EKH@iv!bju*=Zbc4T>vr=Cio*TF*Eyvy!ta_ zFUVL7Wa3tEa5vFuIb^L6Bj*S2=*ox`dlHRMzjZP&F>9DvEN=4)y0;)lAjQIcC+6bC&Y$eB-4?V90zLNZ>dAs`DNMNoa;`DV=@`n^3WX%vyWg@} za7iCiPx%!WvoJT_sEMyP#jDv2Q&ef`GUr#lv^3HikV>*dr|ik43Ds3i#->6@ z)0#O!tpL1bm@$!noINu*d;L$8l$Kz^Ws2k9MIaaNfP!RxYfA|}6I7^xEqQ+Dc_XZ$ z|K!ONm;GTa9r*|w+s zl%p^FYOstU$9)I2Q_8 zt^B?n7{poit82)szsB~-_1ce`*{&1#NkLC4ZKEgFZ%^&!qz~3y5fw*J;}xdh*b7FFgeP1-Ko-0K9x@49d3wEj zi!N|OAm*IF@de2?DDZ^u(9(W{|3(!?oJjXrK5h2L)VTf3M*A=>UZrNgy}N@}mr9%I zf{Ec#jMl1VDnu>A=Tf-=B4QSQeL}k~u;=LxQJCpyXgp$KZi5yj3;9yoa6dV3!J(O{G%IvvTy6bS}8wiU& z#kr^7SiSF-LvCQKo~sk37%R|j@lNd4Cv+N>7trhj(MVOpsTDYHAntxZoCnTgWQA?~ zhBzdZjG%u`0ZwVv<;CfPH@||QT14(HkfjA>t_+?Z;hF3Dq6II8b>mO1P56C{=5135 ze}xuqi$f}&@!B%DHgt2-QUuF)FfSA9kNLutlzYMj=!*Oh57(!vqoI03BC3N4Y~eZR zY&3ww#Lo7%0StXFdi(n`_>dr`h?uyylpRER24!Vr5Z-#j4dGN%E! z@?Pck{_go67d&~)|J1%DJ+BmtCx%j{%3&w!6qZFmBtE4x@>t7P>gjipqi}M<*siGO z{Vjf@K3$2*@FaT#_VghtJbC6+!B16#Se1AkePw(7&HFi6Tv^B5QX4hwv`@_^q z->0u{w3ypET@x?7cPc2{Ft~Ff?r*HE6>*M|ig~hx>~5^%~jcMwNpyV6|Pi+U`McF&D!q3psf56oR zs=N-kSCD|NDh+o|1{j=iy&56Gx$zW5-R;Sm+HFi10 zRA)OhIAG#u4YkD6dohU-)FPsy$UHHxBgy7&7yq3-d3Yp0So;C+ zeSt@khrS(j0^xik^Oi(LxOhE4JcP_X{LX6`BAbfLKS%7+z#%|LOaH+&ZZ(sAaDUq- zZkVTDKQnn{+|%l&P)0et$A8x$O;Za&|39F682NwrJeb*VUyQ9Ok&H)PAK^rPxYIF6&KDZQ7Dfnxlk zdV%}BQ=%{5n|ZM~cHBl};1Y@Z{uKd=A#w)Jfl4qv@ zKvXy~Nf06tiwhtc_rwWx0&_&~E@a9n_sqv00GmV{wB+Cw^at*1IG~5+n`8r6+-gK= z)r;NV2MKfn6#7l$FQr&NTTar>_fSfj5j8LML=6tTw=;NK=_9bAzh2`}U5qiWZ+Zkb z;5{mqaM_63a1BvON@$qPIWmbdg5e!A@;Zk`$RHV-Y8m~u+9~PDr@k`wSn;8KPn^b_gg5Y*|1%>XL%>jmJUrmrOe1

*6TZ(KuV1vlOJFSQJTJ9~0y%I!0on<065d2F6UVxf?azbdfb($g+B^M>o^ zI>o~Uuy;pFBA~ZaE7!VT95aDT3z%tN;GP6+4+D%AQ+qM(GVV~(SNTLC&Ek>wYU1xW z=n_N?CzJUnlC~)*ocmIF-LJ4YU^BrucInRxS3I5SyuV)N5Y6wZ{X+X=Lj^u3EtD$} zb>aB1pTWl^T-{NhK#UWq7_AW(G*{OZpcjK891!>qpt@fj$-5Lgmy(d^1G7IsfdV2U zNfD2*Kj4kL>xh+dKOGdZezx67O=I+PBOBilbEgPWyt9TnH@ebRq zzVApkW4!S1wOggD*yquXeZ`66B6~DmR$*ibpN$}g^X}~N#^+Q-`*b9Y+%j#S?jWAZ zkdz;6PIkhs(Q{~Fx_>_m?%{!i$5#O5>xBFY5wX3qw;xgaI| zQ40&Rjx?*-{+B64&L>45iLbgsjF><8;CjS+xR6s)k?7vCl`hlUy>#kfcU6PQ%TNY| zg&!OemSOTF@`03KO=z2&n~_KatDqz=j|=_dOWyu7pH zkIikzl&0ku#;+K5tqiIz8D@xm#VkP2eF-|z8JNEJ--eGApv`@WFjN^Z6R>`O@zoyS zy|ug%B?v>KQ-KMEq!NHeTZ20 zSIwT|`SD|hMm7mJ3j7Ptkg&qYjRafnJv8 zhrFrR-9OCTt@Y%49Z{pn$yE!8(%8IoP<7(4!*jXrTi%HV~mc)+vy%Tj)Sv@QuaPf zT=!!MSZ{E3lnPru$WGM>6T9_o;)?n%5HjXwXX~N;gmfd48BsHEJ9>Lxfir`Hlauva z6Ao}96yWk_*Di9Htr<4sLDEpLY?P3eE(9Mdm{jeRoSaO>!udOke=Ofkp7dc{b$z7s z``TFNVhartytm>L$A$FL5_i=T9mutdea24@4K1@w{+4l!KK({Y)!D(C&};AfdS|2h7VAUC{z|luzdih3vB2zO66#>>W1J;!Cgp(1j?~e%U-Yx>_JQyA&9R`lo#rq z)Yxs1L-n)-Y57YqNbvIVTF$XF+L4`PIZJouh6W=5A#?9Tg5OMk!mD(&S(|x0U@@r) zyGIEEPU($x@$i_oID5VH@5!+}JN|Dx{bk$fUrS)IbuEmf-<^a{Bo9{~=u@r1#S7U% zuiFJ3$bh}s14r9*tcaPIMSTYDe`Ne3m>pETn})y+X<~nwl@WwjK1dS~YZtwfhf>tG zmK|%sS%dcNRJ4QxwjtdcvoSb%> z6Pe1xVz=VhKCqTUCfx@09r*tMFzKc88AV-|UAN*Zqr&%h@SIT9*MZ68UHzIW*k4ei z!mtL!gu4o6nZn@V$m3`6No4|~1#6Td9IdI_EqLyq^|uXL>L!N1hSLe&E7G9My)2i0 zD5dNtrBN3wk6l*!P-i6pG0!~97>;oHq0PK$- zJp{|MPhxqZ*o|Qf?-;Zop+$B&1erPhCnzk&kWh<s zNcIHaixs4u!smNxj-F>&`?{3B_d}>#Y3Ff{97W86OSZp(96Drz55GW|Ckc#I<*2q;tE>~dQ@CM05>g5g-RYktxaNYlKd_~VB z(VgOjQatU+vPzM5d1+Ih_27WtL$6%Op2^L?>fey}HgpB!ui78mn?BY+yhZU{k7 zP^EeJkOUg^i0x&Xd=xU)c4v2Y;Q1wRKOr&v2-i*_SY09)A()6xop4?o8@BqheOk?j zPbF6>DcIJfwsPA&-H7gC@S503akkEM{+EvvI^Hbo=K~hgi(?Vp4jMoI>=g+Q+4k#_ zJ3ZhEInXyx4Nkx2eDNp!?`;|n_=Tk9s2^ohVJ_nCxMi&hWW~@V0)N-6$NGWzKJ=&= zFAEC`lR@THfUD@Y1ekjpbjERjZ3lE6e(+&MtdA!q&e}Eg2 zpEeE-b(XZNo|k?t&*d7qeuoYeD*G0yUx?J4jpCyfo3^6cYn-(bW#yQ2BFYM1`A<|w zCw9NFTCUzyXG^D8z|#v(J)P5ipP?1blPq9pe6gGclZ*Efz+^L&60pj!$jDYO$dQDI z`b1WC;1nt$K2QR85-t&P0DuNzmyn7o7i#@m#5kf-EOd09F#C7Ag~GBgSsG6*tZwmm`H__KLAHi<`O`*49*m(GH)B#!Jg9Os^2{a`Oon^IFzsRT?E~UW?!4 z>+I7%;65)-JA1TF+@)}NtOcTs`OBR$u(tqz3dHqt_j<)WK)Pgw!Kv-PT#G)lKjYB! zia@8KLmmhj3h(QS1<>T)=H&CdbIaca-T)35Sy(7Sp#yPb;LYV+TvkZQZMRxdJ&oAc zr_Ch4;zEY_EdaL-8i}@cF|+JgO0%*(clw;CtCpcXul^~rb;A=2_DG;;{CAzB43X|h z)|HqqQRy%G2CC1dsb4)4*dvCjy1r?l-Bl|~-5)GH9vw8!#_-!5fj!WD$M3*iN~A|2 z^92AB*}raQWk{a%0RAt3aJqtItrf-yv;yw(JKqSar>94$W}Dgc0XcFcz~PSUo&gFB z>uBlj-e@NcSTeCRukU%{hGYR~Zd1LGT)&SaM(>-bCQQ6s>2;VEn0Vyy>3OAnSbSKf za&rqE!>+){!ym1*cOo{3^q11>4o07?Oi_?R<7QBa*Lqh9jVW#4@a4Eu?}HX_le}v0 z;pto`oR*fxCm^7jDob2lb>q%gdT8EZK~S!$sse`3owyIM+W3ToVi0KHZ6FyqWU~y2T;pp)42gEROO5A|n4YBMRPwEn}HNa<6VD&k+g@_k}7ACc5qO7p%l@ zb8s#ac)QIWxv6=!BJem=j!hsfgN@PE-Te$^;=n27sIpYVHe%!!Z87?vKAVf9KK`63 z+}*Q?lQdMFlF%mMvsr{|^L^i&O9Ti7o1{Nr{j)xrj~v_OV)vc*K1DyecQRdNdO8|U z2d{^+`CoZGztYegI&GjgP4d#)*UEUql3uUEOOw6#dPis5%;JYDI>+bst4}leg^Te- zFiO&^f;US?R?3fYb?rP7Yu8_mcG~joxhYM=tj9S|xSy}-L(Tw)2?}*mfV+@#7a1rH zB|2c+CrO+N*v-(%qJoqS`0xe)>Gd_-h1W;~qCT8#TA)(F0La<(@zPMU;j9(gAaLA4 z5NWV21|w-)U@#yX|MvsC?79^)*PX+im)irbQ0Mec%11_iqWx>x;V;(?1mg=0^x9`S^?RavrgWSZ~&_-gj^>2-A&B zf=q>%1kWt6Sh8QRS||s$I$*1!kY_2VC|H;-3lETf!Wn;kJ%;|8m8+zu?@?UsAen^W!t0LYipZb6A^z_#(7gsjx`1Xq;;~?x5(YgxMw>!O|c?!+UhrX zVm6KoEagsk|JD`xr6y+nKCY^rMehvJt~oSdvNa!APQ`$+ z(TfwW*gsx(O`+BF6qEEC1-)Xh>0Tt@rxD!eB{F;w{C+H&QPhTLW9ZiV79-YxZ9L;sHGi2 z*r$s&$M6mFZzFjOq_1k~?@`--7VEM3Q(7z$|LBQcOY#y0PfkQp#<5Ug>)T;%E z$sg3!cV8zeBz?Y_`t|Gw+h;mjnd;k8nogKGLb{HnYIG+&`*mGK%iW?ql9R>*-;H9* z2zu{Mp8zuO`Knz9`Z>@g!ofK03skjpTU&WD*To*;!NCXUyd9Pos=i05P#}Hz{J8=C z2LJd~nd{2?Q1k@CIY>5)cNf~h*L0Mo@~833mq=882apdG&XRD~?vK^pV=I>GRH~0W z?v3Dn`Ynp<+}5(C=#59XV}D|rXtJU6vN^c+_iW0ke0%oINWU60go=ul8wh!WW9m|H&X%nt*_UfaOmf2|iqYca zoOad`yaK}j$w2&V0h3F4&oA~UOe(n^hz@YC^#L51+DV`UmP87G$9VuXT($ZnS z9n@ti9MP+TuOEPkvv+eK>0dB+^zE`ERw6YTS+DIuG zrere(>9vT6h|~1PyYx}c3eaE`0b$4RbQcW{65V<_E%bY+dVt@bfshKM8rDG4!@AD^ z%wQuRHYEjdc~u0%YDCTQKFxIOlq%<>t6-JiZdE)`>?t+;ICk${%QJ4m%(?+?0hTn_ zpuPp}iAeG(CcjILj4LG+;Vt`CUuV9&6&sgKN>NjTU2IZ3n)v3WqRV4EgD0eYs!=Q~ zOxLOn<z^^AEf?1R@FO5oI*iL zDh~zI-r=DQl!id$cFOek_O`aQg}^4b3$*~W{XW>xK`ukP2rAioASl9R{qGsxQ&-Oi zuUGKS`v?>>V2mG*1>k-m8I)Y9&!IR3Q}D85fv}IDA~C4o3uCxyrU*@BiI+)twX3pK zEuKbWjchIxtHkTvBzH`uPExwpvQ8@4D80$aZ(YNev;tdNvOPF zprMh63=v^S1Nh~O>W&p4G~-3uH(-l`3EbrB*7wE^-nUh+hF1!m;7#r1k9UuUS8mge zOwD5WZx$?caSBUj?vFkZXGrawI1yqd6^yETb4TYf_6PJskBJp~JGH^*M_-GqHF)f< z(CB&~C^a;dnVc1%AF=scS$y{RW(H`=q5WQd8z1L0SS-Qj*1lTV{BMIBM7(X;@{vA& zf2F}!j$-cW=~)Ka2>uh;D>uO@Uqyp0XxslNkTBq zh1NZ~xVU(}r!MeZ0&Y_d&}mzct^@xxB`~S5mr`6ciLsd;brX5p5Tb}tF8X1pb>*FB zP!YDb-f+v`J9?&Q_Y6L>+Wkrb#hPI;%u2TG*1iAC>HU&I-{{p3Cp$GzmS=o1Xc{TG zuOKI}dGXwO?)9c|lzNW(uRjE?+G!T0n@cM?QnLn|Z}f`ao3#WYSOw5WTG{Q%cAH@w zE+rTf1mA8HOoM+Xw@}CZjZpC+h?ZjM|Df4t*Qv{}f=Aj3#yWox2 zzFb!$7A&6VlQr<&_26M5e0aqOyQZ)!@n!WRgDTH6se-(`krD$zQ@?dK@Qlhij?)P9 z(V8kZ>lq(UMrs}iDS^Fmcnt~)A^VYFQ0^uJSON==;|?ooPW$6=JI`HkE=An&?B^TB z;Y@2EQomd(|-NaQY1lh(|t%pp}CEtYF%9cCA6xDSuV7e$Uq#=R$Kk zHRS!DC_|P3O@nT0+ssFO$HvzwZDzH}xu>4^kGcoh^$4h~81 z^ENof)+dT7!I_zzgQJ&md=%6c$V&vQGTd+_V2uuMNAR1He=*o!`QjBVU6;h26siBW zLkgDU^0z~=mF?RakM-zL^yJ&VMTS^3dhR)Zx;Z@LO0Z*#q%?GS`7nze(A20fPiB(w zI*Lqn=$o?z`tnv+tSr%CQ9RmQ?+<&}w5D0k1@gFq!!gwoxbFU_5`%zEKN%9q1cX{H z{1CGK;g;IQiT0)S9VRk1RhX(qUK(&&D!tR}N#6eI9_m&F{E9K4QINI=07gQRKp?2r zzE!foHV5sn^2xOXF91bhH`puPe(t*Mf1LTK`WOO(WKxV{)m9P`XTPftu}UAeN#6#2 zB1~1}JbO$2%bEVNv_hqn2}snGE&pRYn^j5_Ks(PIeQBjKuG{NRB!%!ejDj{Vy-r-l z{q*uNh*zj$zwTr`ZAQD$>*>!H(sl9(JqEl1b*42hBdPDMqWNOD) zqqVEH$#q zydrUkEvZY{WzdALuP^YNX#mGP20;om3Ga^u6xNeozwU!F%6=@bPupzS+TV4uAo|UP z=oay@kB?6UG}QBcn>=tDsQ~Mf!g~(~5Ly*XOiX^uPMoFXWg8`ZCVFPG9-7wwaBIQ2 zdaq@Wp$jUXfRYKaNW&0KN5H8!Qq&-qBY_MV#oVMT%c-)!5*NUCM4%72kFaLtfR7`A zgRX04CLKJwkO3d$wo4I5g8&<2>=BMZ$)sJ3}wnBzzh%k zS}0w$!XTj{8Z>ej=Q}|NR0~W}3S{LXfD*W4J^+w|Lkq@iM$erK=jP_7Yn*AJ7Qv>z z0Xj2?ka_0X{5Rruz&|Q$0S97ePA}4%nER&yS2K_)G_Y%S8?=_gfe`ehA^T z@s$(Yh@jStA_2|gJMgs}hrrABD~U6+v$WP3KzJigr1Sne(%tdb9TVYJAn%I7gRdB$ZR z zgB2kF^l<-k6cSMf&){$cA>RtDOvJcYr&w>0aT#(}5kAAEH&drI* z$?{OmAO$xPGK0vi@F{LGf>rSPV+HE>4~ndh3dn%LW4v|i1teM^PlD~))8~PAz`y8! zbkxkJKJzazLNtVM$JwK!qhZBOn19(+SO*c;bNGJ1@d1$it{p@dh_(QXKWm;^ag!xw zX7ZsGk^#+#s4k#M``^3r3^R*z7cztd{EwVo@|viCx{5$#2rNwdL4JJ)NH9?NX*A15 zwcz68=fY~yH!^~6j5ex>e#Eo=k`72Vh<1r&*#J4x`n zai~C%=}htwBVg1H%iF5L9fYY=LiWUf1t63r^1*@d!5_~C zaRScvdH~=>qPE~m*?>lbI{>DpLjGga(c!~x%E&MPa)7j|4c?_~-n}qIM8I&H4&<7U zQ*83)?!|)TI3m*nIADnDAcoxny2FEwi7)2MhygjI*THQq zIi>Ui?=?heTUlKV^DBE9a5_SI? z%a`g|+1e_CZSUAGTrDSS>^07xzkUgTQ!64ULe}g^WxhtH`s2s9;Pxj+zQt=wyuPRs z38)PsqC8#*D);2fA?Sr0=^v~T4gL{e%shm+a=&>)3m60U>({SQg%b{o^Y^P1bG1I+ zXL_Fcv^@|H(lzLzd@C`k9m-mW!n$&$%DRuF`SiCWyek&S&105qO}z^Hr8UqW$Or?} z<**67&lo_Z(nIu!1w>T>a&v8}Jo8S^zpbiahy$?NTk68Tu9U(9h73P{{-p98zpC|=OZNj3 zyowBtSyL!erN9!9hPVA&!xKJm+)N<6V_2#OpR3J^IuDH|yXAxd>+p10jG?-})w&`zL`leVMae47;HW z(BcKQLNrR2RrJRjFt%VDq>>G$#h8ld0kr}4e2c@5|t z&|_dARPCLfdmwULxV#XBGZ`9|9^|ut!d%yZ?+1s{7l@Mdg>=Hosw(YzR;_-ZV@pa) zQ+eg7TH#8FhbdGQh)|oa8B@JD{MFZ+%%tPds>AGTk_kUcq#k_SS02{Np0vL=9I%_g zg@9+1WBNJ1Tm!h%OSWq?{SAWX z6RAOhL_Pc|k};gPd+b8&uVX6vpq*ob;?{T~6y+r|pi@zm)2KEo7#;muyzpy!dsa$D zs*-o$ogaJsMSfvt5GDMf&{C5Qz=zz(mR2N=eUIbAFGxX2= zC~{#8ovcx}B|mhfvM%>6KRTc>?+x{1wC3hW^XV4L4tM{CuOhpx@@uc;XYiZx{bA)* z^1|r*_ir#Wy^ZP=D(fmBrAZ!eEhtZZ_a-44_v;g{ywU=0XK9o5|4Q_C{!&`ruwVBg zF_wm2#5|+;Z>+I_23NIhQ^%IiA|>Lhe>Ec{v=J z`k9)rSZuW7`8Z3D3%d=XzM+e*>llsldCAbA5~3Ii_xOxvqIKu}#;0NvEWz;{QD5RE z3zFcARk*`94L*h;Vc}5ij8uYFV-UQqr)ym^ot`ohDAu&{o+DbZgailz*$u^H%zg+Z zm)!w1!nEbW>s7*I0H|zziw=fl@#F!ZYAjQ`&d{$C{-6Pm=g%Ju^*^JJa~#QaCq#kp!r=HJ>*kR0YGg+mqBp(f zys`JvRoX#UXiIvo{X`js4%tq_Q0hZu!m`zI&r}*rj8Mk2g)z-F`X+_21C>Sn#q6BkqIv50kmq|H|Su2$ZV1 zCHa>R^9OvII3F%OFbLe@QVlKVe=_AOAzIkoBGJ_8XgWcj*+;@A|C&e^61$LN3P4b; z0`=>m@u^8kZ7rAQu8Of#K(iAPNW3J5o*oq3ChNDtLZ+cYf)$92r@#ty1(x$5T5Ztw zN3$k^4_51SR7`qNPc<{e&2dTX&7j9c0KFcs*zL~B{4O^Mw=>@w9LAmF;&NH?kB;

4wa|HB1 zMWw?*03|CJCe1Bc1;A{@&y|%wU{z1^^d>TKL_oiBw~rqZ^^ijy#4vKOks>V#I`E?j zCkJ#ZTT(qlk$lV6j$N=m`7cru()o77^1NL?({v!ZVdjNwud#e8* z0Gr~ZWuUj_u;NzG*@~*@Z`+FB<;r@Q_3Fw8Cg3o@`LL90h{y1URyCJi*V(tLq>vDh zkx}9PxV^860!34uUJ`~mmYa)PPX^jzJ_{Qc#e{BQ1kRo_`w$Y>mCN92G+8oGFYp%j z-PNk=t5nTrj2=(kt|re9t}mNZ>6@i#n!YBKg(yx0e1}X~!~-ETK@`p~q(FgV!F9JX zY{a1&#EVG50kuF^>(_(J>4u%U{GoHA#mvO8>qj{9iKXaQW$%7BD)FzKII4+v*_m*0 zbd9Votv#+enlv>EnlLSPo-_Jhn3coK#mwAKGVM=_-uL(X2bMH5i`*a9pQnLMXC12< zKcDuBhdtewTJ=AbF=j#KE`?~mF&RU>Zp^76T=nNv!gNP zo3x}+wHJ-Sixd78KFS&;?dr^O@i948vhNi;p zha8-Xax7SVakQcpfWn*s2m5J_bMzIAQpz(Dw@yr?dXeQY$z>Bmmzs|r<%!1??AbH# zNbR#c?4ijaepcz`8N={BdH+kr<4f|b=5%aTf62N8dJI)jQO`c#Y^>nV$)p2sGc+0W zVseaXcWX`3DlHet8)ceBU&iAt&fQ=oMhz1kbPI;K_}ER|m8#t%t1+#Q$y_B0kI_9X zz`jv0|Ky*TWM9z5J_8^o$h$!!J`WJX4`{r=4#V-M4}_Azpb*Tgm^(QQNZ^fT!T!$# z@+C<4Ey?}g>v2?^B)XVdI{IpF+&y;VM{K8tolNBkXGzMBLz!Ww#eEa87lzM%vnaJW zZg}d~H#`|!S{1M)B^ATE(&0Kp7bP7~@uyYyYjYE`~DOCh%kUw_q zW7W|o6E1X7Ej{dFTHH-s!4O5Z2fdHg^hX;SgUB09OlUC1)Bc>C+dNBt`BC)mTwLOV zaoDlo>;7+^DPGP~KOjI_EIt=qIfl(TE$cGPzmkQQe3l}idooR_Z7Ab1JgxCgw5!H^ z@Kn$2?(D>UPg)7Oz$&|r?pY%~yv&|xwfm#aBa8>>qC|v4E z(eu__?A^b=yA$zfGvP<6^D0wuP<@w4wD(xC#xG>uIXOA)_T9*||LFt3Uj*nS0ba;W zTD~?Gg~xbqZE5)o>On0u1qlV-Gz=8Etl`G+H?pw6CZd`Z#k%Il6e|%zJBObW(-VS+ zpZo|7c*wX;j^wbIHdMujcc)bOi8jeT-*H{*mrN-?2rvhM!Di6>?J_r?XMmenhM z%PR#{ts(N6x(#k;6odl5@eXoOAIi&n-nceLBM$qJjLOmjY36)CV8#UGuaDr3l;VPj z#Ro+xUIgu}P_9DEa4g*0kMN|)$Yo~=8ujA)Waztg(sq^-?&j`ZoHP$AR+@Z%7}cuV zus8m>VePghrc{qcaCYZepN4X}zQ13)>yZ-&UB=!(J&S&$Fy4e0VPinO!5fyxe_3PK z*Pq?nj9*IgsHJ|-_VzAcGl_n!%XL6xAV1@F9NiAU@bDe3Aa%~hIn#YIGBSj2gs&zG z*gGH}ZlHGt?QliByeBbyJGR_9lH|$jcYRCgRu|sPI8~~Lzv!6iDO*ksbqAOB@ST$% z#N+9qTo9Ah=G=*+@8@T!`jt9EWvDCuZZ7eX%fhFG&q-9X6me}EEDtNK9m#%Eq%0S& zW0=c!;CVer+d=Dx%)@Vp?<@$*-lakAs4cIqQtDid!&pmLU^FKQZCi!K-CrVsahD z6|5g~?@Q0x&Q+$7F6_}A^+z@IgYOT}4=yZ;cvp4WpKR!eVPd1cQjc0#YHRyMM0RAG z5nt4>U=LrAOgkxm-^XZj-zq0{zr{(bed$3^eMrb7MLVizQjdBb8`}xEWC)TR#7^^1 z@h3_6CtFdIi;DagTB{MTi0K5%W!POjk#-sR!{`OZpSFM~h z9x61FUyZJOReOG{%1QNaI5hFKLHm<`p08%61ypA4;$pf%#@7@$Jtikchd3Uw6qH2N ze!(%bGxiDMart}MiEM7P|4BquXFNm-@add)!BO;?qfCV#A#LNbw zm>guWh~2%tk$nU>v<}#h@93Z^y?EJ(#-|v1V4FX&8s&%zKTB{}piAg^X2xQ%Jw^JC zw1X)Jt9+t+Y>=>7yMQIP9_@T;Oa6fiwW~kh4M#c?iEEtUzh3=eImBFl!E1za{#Jno z{1)USF$j4N0H{n28ND#^k=-4CuXh=GFTnZGqZQgE@GR^>^7Iu$;T;~i?3QtZSr{x8 zOUq!|3uB@5hi7hd6`nmi*M2Ges@xO%gsC!>l zop-h!LVJHDyK&c&RRu~!`(g3i$-=My>8h@!b?x@QHI~Nnqge=c8|@XaxkZhLes3Cn z!hz^jXPIDP;cV(f9287#A<5^rUdO_TYE_7`WD##6AZ?a;#?Pm8*6c0+7s#lmUgb^=>*}1$<@WtVF?luga^lMoVzDs}7+D2mt;FhomtWx!zsh&! zM*{`pxBU&Vc_-JnuHmxc<3-^;YUPZ#)a-0(nCNwc)Yy(@GWSiN(AoXojqFe(aOpy1Y8eTvqPo{o#CJE@yTV=dkUWT0v5V*<^b%@yq)AXgUucMnd1ZNN+5sQ%TJ^D=m_mLVT-2;v`GoR$?3$y_ez^~{``fEE@ zwS)57!@V!heo!s({Bl0!+@QE{-;(&Q6dMdd0=Z#%!ssC=D4gW&tr99PZ$I0*b%2N`MPvY04{ zMklbcvO?H2SRnF0ABz(PX;*#9!Miz46Ha;MujL(%pqd%!sju$BnB2du@i=Zw{5NB7bxS~l)LPu#aT;2`^eB$mySq*`^q{ttVvm?CwpSv-1wRF_3g7FWog{= zvc8h%iWVp0j33yTu;et#3}{CWo3(1tLu zmtvwE#R6kwq>l=cLw`8CMu)%2&C(oWE}D^D4eCHJqJzyTB4GVS*4#HYHg@6hFNaf_ z2Fo(qpQ44?%fjPF!L3h+`4V}E@`TYvc>FAds0Rst;UqCN8C`B>H*Q`hZ=F5zIN+gc zOhT(2&9$(%?dD)w=ZI`YL3OnBS*<`zpVQGnlmxfZ>xXjZJa_BAs#+>`);Lc%Mgez2 z<`-a0%HDIv5hVt4WEE9-h!;J45CytMv69@HMb1)p>Doa zm;IV1(Pkp!(!t#!-t<=y!f$zkLHY75FOL;iUB%^|veHr{VH%-5;~3r~PNt-#VPv64 z?Sf2Gi-`DF1Nm_~M$DROSFs6P@71$dJ_l>3nNf{ zcY>O-BjggAo13FNJoamzu>0jA%M9F6Ha0g6l$a!f@v?@zZVKR^E(`9C@5?h$+&=r3 znqm}@aAX!DR3{Vrh%=72M?6mkGvC(dZ!L%EDKs{(XqUp@&+=xOW?QGkufYKbk2niS z6bF_$d3opiXHwYN5>?(i@7#~V*45LKV@?|BqzeNM9|!Dnp@2Zn9$75S)km6Kl}@Wz z16!-na2U_mh}N0}SEiEFp<@{O2#wmHXCr?8-O)zJl{Muf>Zsn42UM8x)bs691Mt`IVbn2r43m8bm# zu!_RTPagXPnVw#p5WoX{_9Zb}=%5roSxmfRa*9&*iLru7Xiibnn`-fS8tUMQYg3xK zO49o`*S@>X0at(vdnPiTbw8fr`t>*@;`EJ3T;Cq&=ip^L31x#`mbB&jDL!)PgeJ68;^)OPIK-Q zb=KY>p<)|%mC!{Yr7^VNC*bDxdT!b6WY^+gYdYCBh{Z5EznqN+BpyUA-|I$?;uUtZ z;7WnwPiVtaZ#@-Cl4+RLuuD#SsD=MX87J3TAz6W)y<&5Y){G%$wUp5Mk!4g%+*P=c zYgS%5#fM3uw_L%u|NbO}7*`9hEE>2)cl#;Tu+%<5)iUWA4IG&~>vIjF z*e4w0yO;San%zDMIXje>Ikv7|n=+>)R>ps~ajj>*DVr?15*D-RsSyhXBQa`IeOzuL zJzo$@Q%(*YNv+ek{TGutDe7KCY%Ia|<^WpA;5L8!RBlF2S~iajEL}Kwc<#a|4lK_v z@Uz@!%k!9P8?krdb~+p{e}5&^5#{*ttv~}mfz>1_L=&?i@XpB(Pv(y*4C*n?02NF{ zo?6Ylep6WLYW_;|a_#>AP6F5y6V|_uLuV5Vj3VN}b`yiCG#fqY6DvPIF$x*R>%JFN zlZ#$xC3aQcLx|o(PHJ>cZ|tinXK{~=#`mz&AXM1Fzm4mXW@b^bYP*BcKBlK;N>{Q} zCW9|c>pD1I2aZLfb8vCtA=z|OWU=j=!z(uXeDU(OPe(v2i|X#oM(Ygvvk-Crm>qnLaq9 zQ^U`7cs@xf3r(0ncLGBKY@vdKU%?~#wN^%#0XkA-qkvZ35G>r2LR(6D`O-)_ha1cYm`SgkE5GCCNupobV z3+4SzBD-R>0nV=XpQOGWZA|pDL^l(^NpJt)|K#ijSM=0v?q1ReIi?zudlo78TkM_D zI^F3IfyDiHQU=_9CZHci^3f&*a31NxsaN%ADf$SU9^r7#vT)1O)j>(WEq;9$x#wo> zPkp!F4BhA$7(=HRDvNz_&Dm;sHS$%8)b}B7M~7sp7y-dvj3DEDbY;>{HK-Y2+RMyZ z;t{cw7nzQp-V$^I$Zy)@2>YbMK?YIHQ09C}QD6=Ml>)3}-N&Elz-qn3YAxff`=i~D zwXMOwiEhXs7^62Q7RCH+RKq=|=$DmlULnz+A5~e}sYl)5eNV}lTlYfi)_Ha+R^o%A zPT;Pe03uWMXdCY(T>UBtjL4<6!agaol4NIxP-wI8Y))6*a9L;`l)xUf{ECq$5$|7d zvOd6g(Ik`9H_cJ@A*8gN2=*shTukhW-9OzyeEGZi&GfV5B2Nc{ARP^rPE@ z{|MveCRz;z>o*T+43d9; zQlvtP-bxSLWcx(c~I$|*Y#ZMe|I+VvgROcQ(T-n^XVVlO%64X=Z2dK z`QwkGF(={Rg+KEm$ms$tnWmmz1f;10u0ZjCgG`fyOb1o8_O0}kYW52j_OnRQe}FT} z8dMYL%{5fvD}G8t@juy&4-mcOJ80!sFN;EBPOhTH8YeQ;MRBI8JpH#`@JiH`9{jZ1 zB-E3kKtawAF9Nw41=x>_;Bu&7zmQ(izF&_~S?J1G!hf*gEBAbRcV;lpO!B+q`xtun zOFiV>v->eg*!P*O_hylULzRfITt&lmb#@>K)Zs$g3oQ&_Vukog!S7O0QPI@bFVw}N zX5z_aa?9g^@wr50Rw%u009@#mHipkw{LtK!h|=^lyYrOaeY=~3d=!hMh_usEAfC;3 zZ$|0Rt$SMXbL@llE2W&lKvRI<7{Z^FZaCjl_2(yzOiQC&W)1rk2RqFXU;yy+rLMt4 zkxYAW<%y9;o#7kd>ePweB)1kSiXq-osTXUKA(}WIMJ(zIeLt#+4!z|b2U6p4+r+-` z3NX}0U6%=@L)6NMKbHjFFlb?sstE*1lUg{BNC`t&wxPU&s@<2FD_Dmz`DFsX250%U zoFP350=dU-$V4Sx3M5JVICv{%I?Je`gu^I%-|%*>j!EVunJl1?-7v~L0txApT0X#< zUn$a22_lG6&w?8xZO<+?Mgkw&;Z79M8tx~tJHwZ^0{0vb00?+#%lOkIn9ED>j#U}v zuUOkS;K&rQ@xC9o7T#dCypBQv;YbV>2w2ihYNhuL4CKu-N#72dr|GRH^}1Mt@{tm(%f2C;{K>U)wB z6_Lv*>H&txIMNj{MW1GXsqKIIzpG$d3Jf@xq?1dA@QTzWnGA5~PW3dRp2x*?fe*zV zh?Xnk|0uw8dPRY#Kn@F}!b0t;-@|A)1acmf+wpG`d$fU|0h zOJ{Pjt+V^3#jPCGP7#s1z?QJ7$=NOnaT{Td4>{4qUqgJ+4uH)Bl^Fptj(7c6^z)b) z3p=|W32dfq<|JiJ1A{)~SAfSB)UBWevbY{@PYsVGImHM&$j;H!=RoyLw^ip9^LKR0(&U05QXGqXami4ed`dXJ|hxHc54eB0ZU`u@NGW8PHar{w5V;Xhx%$n)3@ry(Yl`hF*K z9>rMURWEZxQ~(U$!#myvp0x(u_PUQAaXLAz8PVm?0+`V1PIri&Bmy$xQ7D{{ukBHu zNAg>Q#MH>o9V_-GE05H(#1#S__=B>3qa7`C=xB>Bj`6XU|NP6Y4%mi#864~aR|LdB zk&23+A^KQ>t_Ntpwy$4P`u4EU(fII-S4Tb#^_L`+XNGr$x-vuOZee5dzH)hEW>WzL zE>6acK1r}|+01C8)|dS^lfU-TYHAw9{PtdHJUYnXqI$J%ibh8HXEy`W%=e$i*x0`3 zal&jU*`_(uo5FocbdPX%|p2Qb~AnY*NucQcyUQlT9?dMG310y4#$= z2YXG2_db64(B>{H>KKTYqGyaH@3Buo|Gw#6zMHbC5LsW$NsBg*bb8R zzI9R^KQH##b`e?W~i+MyTgf#9wKWu8EN^K zMs#TC7vqm0#oqG5kZ)gO@3IbRb1|H;T(`BhjrjhD#`H~7Csnv^bu5XO^Zkfc=jo4F zF!=8SeUAZM^BfI6qm`9m-|MvqrAXKVCBc|s6NhH?*DoxTvZ`umLxbe9P^=K<+s@ioo>f(hmEBstQcV~XCt>Hlbj!eEneFWIa_CsTy(4uS{ z_NB-LwzP>!C`F?V@=(lgmCUVU+j}O@Dg|9l$&!Y$_t4>y$t$a=bpeJg9AKY{8!m?$ z>81J$Kq`#fT?1gb;?t+jgY|LH!A~z;K+Ej2T|EUBaCq|a@^y7}=Fp$?e)-~fETHjD zDjMXgtur$;-QC?_FV+sdh0m$bRX(4Ch1^rmo%yC|VocqSMgKf%!@4%7=2v|VsvkWq z1s8>{>%OSg)>ae%#tz$P4^|!L-Z_3uy3gJLg?k+MAHtX)0emJ{q+LObC*}vwotrKa zOlLJ5NO3TIK=|>d24bG2_XM^F%lbC6c|a86JcN0?Hk_WG{v6;}_)himEytekbb|(k ze(j-0%^Ml%>9Itna<=H-EDGFpZW__|k324Besl+CNe)x(a!hZ)apuV0Ur9KHjB&qs z%FQZ=yDWlcNwE(9cd6;Ke@iQ?MIt=cI8|>f&1iZUGq^;5eUdN#uHfG)wlj;+EjcaW zmfD{Fuf!kyur_E8R!!B^Mt#r@%7f1F!4zwEe}DWSTX#}S@@+kS7Krd+3;+|P+G1{t zsE;2%R>0^4D7=0~p8_m{I9?&DO%OYVy?8<5G+RRm5;R0>*xx@>Yd;Ch2NM~dJ8a3| z(sl*3ojw3{xr2@_KLAc@zYz~g^6Qo8&JWr}klpPK2o4J!zASp7Hj!!c##Bc4 z-SX(00P%Q?9`_U2n2= zV;tDePnr4??lkUS1x-KkwQGA{U$09!9!VAF018)x$~m5#oDk+Pzqzz=wH~v>a)Nhdn2&+t^sVroY{0$SFHGrzKaTs4p6|RF5&eky&df!= zt7};~kdxqff8=TrFE@D~eq`2)i&SHabZ{$+f!k9T>ffb$+a94Jw}_u>vaANp3sK0_ z#nY5?k41|S*8A)+UkR3kX-sya)33lEh@*_Ht@8{M1OYj_imC-yRnyb?CVz3%_vM0h zz?FZX+yF%US-k_J%^0nv`jyY7F7j6GonT*f{TKA$0m+qMS_x14s({mj^$xT}$mSVv zB!aXSaJ$>nnbtY~Y}CGVxLM+{D>OXu(zMCX9kljEDhZGH)EXzZKeL1UdI)WCvdm=m z<}!$f-~6)Q^z$1#0zS$EC8nyXS_W$BqNe=(2oF!spU{qFa`_wiN}T%H4+3vC2%(&x zKATz1!vL6!yX0HD?c2j2kcshC(C@m_HPzPq+Lw1XPH1smt?sfVbJPDLNa9X_8nz_L4(EXnm$8n9`<*xJ@H`c#!r_VUC->zeq zzP-@CaGdFHB$Z!wmVB{FLRse^y5lF2oY7&?H*6euS1qz^LMZ^|hm1jGUm%@XHfB#W z0y^#lSn3uqPM0pVrR|u3RZynhB%1XWQcY3mO}z3E1inib;17$50(15KnMgv~?%8>Y z_~j8O5%VukPfk!MsNpA^7GUfe4F#I7hWwiUsPR~8!Ih%_ci%Bs&8o`8_ZiVnOHR%m zvzk$zEBEp*?=xxeMr$Rd)cOs@XTkl-Sr{osxK}vG^jiW0v`<0O`W!|Za=@}yBejX= zZGiNr?v4(O_4W1b`5;)O8cqfKC3C9=ysyUcKy3PM^{xK}75yqT7Z z!*Y$cusJla-85|(^113IMY)@W>K`1`T^HDXKQv{S>iBVQHCg5T+g!{*-K4|AL$xp3BmRz? zWEmvb32HC$wwRM|%=y}|(r~G0dM%@?#9w4jS_!8}iD`(ki|zmBe$H^dC^catEu}$d zLsWJziTsl0>DgtyG761Mqk><&c#-cuoN!TG=HfqE0s{q$P*b$SPU;SaRen!R=g3G! zkq=Otu1POnMnGvlkSTLnRP}!+81-;={3`laBUI6Twcf9Er>CY~3pa1Oe23idDIzEX zzcu;_!U!ua-0JzJ8y{?4Rqf1(N*`_Y)q$U5{n2}m21y?@~XQf_H$Ya`h1 z`q+DLaM!B3YpL6pzcD;VLQ$UiVR-ap&=)`y2L}gpN5|PBU;6h$=*n-)tGTAEm*X}; zQs7v_!_J;Rtywy0dRZfMboD6&V!$S(R%ME&TUDD{PzA>W7-*xd`JgGR{_XqpOR9Kj z^nZu79mg$unm?Bz34aHgHf2# z5y!y5;ALaGOB%VU1D2ZK0jQqvL!9J?`um@Iczdsad2<&4>5o^3GgrY&?WmJhD(3lf zG4OpEf~nP6FkpMAqtmIYqvMA7SOX+d2ZmR>M?qsAh#dq*YXDI8#?Ui7G&cV3+2QV( z)&&mK-Dryn%-7OVQzx7nxR7(8di_xQP+U%49*!1p%B_P@B6k>^@Y`5*_yX8|U$g?V zdbVcaLEVw&s$*%tuz{grKMW$dkwk~hR#;$w(Q6O#CBc#L7kG-*fpYE7k?Z#1YTcx3 zo{PQxA{-QAC}VT;Mbcxf!{d9&Q}RYm&d#4A>}^Ii3&RP7WfeEQ(UiCq@+ zb9MeA+Os5nOcb=7gn4A zb#iw_m)jZNhNAb4tCBZ8K16NO>bCg?JxLUNe6;vfG~>~XsCf|AjTC)%OMkneC+D1H zW;$%Ewd+!L^g<sDnkqkTwcIyXPX23V|pAtSBI_-wYDPO z5~eXSs^F16I9$pD@4bv2aO+cGHarB48f51Ef;fZBf1hU_uMALMMWtnCmjAJul{YD` z{xD^2W5ZSpJm?TOpMhuePvo|P5Z)cekOptn9{sEJj;Ad(Yk@b#d9MC2dRF(pau%^K z2J6GwrK29R%$KGt62mhA?`9pL^8OA&y7UW;ol+j7C;!-2caK~fYy7)X>0X`<9E!ad zKEtT~a2dApve9Gdv+XKr(53$c#TRZ{dOj%~XQJkI_W%}Ev7Xg555*z+3|ZEdPLcSG zUlo1E31(L5zqYEE+3lz)hNxe?a>veJ$+){FHWZ{}88ckUeYthv6+zn|^6ZK9c5`|+ zuIb;`#XFsRLPaP)*;aRljq$TgFrWYtXMWNlCqcYv&Q6z<}rd(_J}5W^B18*6ABm9mzWX6lCv$C(&7CBR>HHFDyj zaFQVW8$^-qJ`_shi!m@igSoSdTd}p$Ey6g;`KTx12ep#CldrF}_Uz@|g>Dd+<~VJe zNo3M;dU6E9F~ZfL@o!$;)N%p zmQX;WvT9C)m8W3ZY5s>l{L9`NbogE%0kqfT($YKKz0I#Je=SylS*X{VAo-d}=R}{m z<4}u6bD;{hk)V}gbiGJ5g)E$qFj~L@!?E9mbPLPeJw0_!2^413K@0$ZH;^n{c9{VY zFu_|9uVBcOq-+kBT)<9Kas~J17NQ(lx{30;bkCysZw|(eFlz7o3(4Em>fJE6>gV%* zUi^mgZk|VBHEISj()z!Asf73(FM7pNsKBA)o#4d(z5Wq~0NQ$buE18(p2DdO`deoJ zSpWz0c7O6j$<&nI`XEji$nOtsiBhkygj+sFOwA8=6a3*i@6I+wD~+8$6aI;`O_!R9lB`m8PzexMTPe4NtMx4L59r0l^B6d>c0P&QvFw zQFPAir!H+#=@kUp0hg2U^?8QHJ7}0FaMFrRCLRlf!J*#%2Mg_9d7I$zdu3#=8lDjF zny1t_m~K@9N{Rx|jE$?KV;!Qdk@s@b8}lVl%2IcwuDRG<>3%g@2I~v0Jm} zUAHDDCyohotGVUuQ0%D52oxE84Fy56mx%)>LWLu8&cFMs@*`rRdGWWPCIMU^;1#Px zS4T%^nZcL~^gTMLYbZeN`e)nFpWyfVIyad(18kU3-;bwQ6jfHl!x>Tj6^}VIlHrRs z9#_^+F`sE|w7jeE@z6trz~Cc-5M0+NcL&Z+Sy|cqdT65e{=tEQt}Z!VXlbmUSL2m2ijhOyDergBt-k5iMbs0meNRK=Qyd6^G9s-*{+|QZlrOPRk_m%E&E}JVIUH zzr@i5K3K#(Xldj4*S6HP&5Kpx5^zd%!@@AP81-_Q0q>sB%1QwU^y`+uW7?i9e#rN| z-YW)>It+N(v`kIo`&H8GA1(lXU(YoEAValpxjNieMe9bgyq+Ec$CFR^{)Iak9)0p- zECb#QEdceH=EwBt?@8on<7Xj;F)~5Ts)Rups7n1jvZZ#oQ zshrFtCxV<2rwuu`Bxy6`+Bm33y~z!ZEjY+5>dLjXX?xHRX_+zK!-&56W_9jh_z_51 z4{P$tq_#t9Vj?kUzYK|@)(F7*6NM0fCicvj?e27-(I`SaNRmax?o1Fu#eknxDQ%)< zwh;W`*;9ngDjw*UdsWM_bCLN*oDFbFmq~5~iH8N`va_|WkuZk`1E!LjoYl{P2tWkK zM_UF6mq7{eEo_ZUw{O2g-eqK`hWsE2Y>UF?W_r3F;B3j-G3tL<`rTIfzz|Qe=UpMo zM@gHKKjP<@X@mKL1UB^bqK2K2gHL-lKmA zm~8hw&xE+w*SZfp+m5)48jlR_yi1(u*Gx_R0uM#+*ZL!>C6@4}&blI=h7y|92Y>i) ze2cNTbw0w01tUTYi`(0ASy@>_J`$zEAQ4PSNnwP1A>@62k0^z}mlt{rSgeLa)6>-A z;^Lqsj3~TPXl0A#zi7V{pws(P&AB=L<=Uog0*PTtx>Qbf*;Sk4TLMABqU8Y(jU@)+ zI;nOv;KehFgrw{ru27_J#Y!2L!iQIU{D`={!akp80=}^L&^!SW3Om6|NMMu$vFm?& zomdc(2x$8~Q3`$LC_>%t*x{FTlZPBH{qD;%jlH*${1mu*PCEKcF~H6E>E`^Y<)2QQ_NA+I5)hI)771^h)nu~Q=l3#=UN z$8X5*DoDd+VU!#nJ`)EQcO8BxpOh3Gl$?bnCDzx&6GcFQ#V9651u9WXpj*U?bnsBU zxv86n6y&nkk2fkz#R9qUIkCvqoL3Yr2k`#L8ayUx6(CUhB&QpAn@d$?_wg%+@&I{H z5>Vt@0m-V^cc2RQ71Wl92Iy_l-5c;LbF;J2z-|IAkm!y00`QtO4Fl7(3o)?xMHu!k!DtwXuUMJpCRg8khgeG zz!&!KmP&q*gbwOD?}` zTl2cVIeXpoh{EaFdiGUvS+JIAhfqPLk*eSIFmwT>6y%J9OjK?LNI+~Py^Sd9(><=# zF;*PESWuGvaI$$W-e!7oq<~I;{_fzltSIJZNk%@AmnLHEu_cWJ{|`-H0hMLewT+U3 zDAFMz5+Wci(jkI$hop3aNQk6JNOy{Kmy}3@(kLxFbO{Pl64LeWd*1J#HLO`vJaM11 z&)L_$;>oMnoylA}9DEud2hpV1cdUoFm0osq?Jg%PMR=KKXI{Vrydx~!#Iv^=0A&%3 zPU#_#33)vrI0tN@)7t}Rn|S95Y1_RzVFnIjoEiPqwo$BFr zB&xtTHCGcPB%m8(itD8#zyYEY26}_g^V|#`Q8AZRBvafqkZ>n{v#eigcu`B z(b2u8o8^bhcN#G~%gq=3uX$7C|4>#oK%*9Ti2-IL{wXQXEPoGE0KrKddOn~*A{$*1 z{>@|9+JFWjMWt_eI0jx;OIsVeQm6D)`_$MkS{`hAK}ND}!fN-;_22lCuazEt8{wJ2 zxJ#Kt?R>*;)^7VJozLIFZ|9XbPPN^objq*?#??v0@$vByx+Rm+hl$D8E?vkYBn_){ zUQt5ozd9JMz-Sfb#>lINgN{KD1Qq)iF4+XHJdu;eNNU^D6YI60iF7pc?gImi0u@d< z8Q#3JIgxntU(Yu;ROX1sVF3HceThc&{X~?p;Q77=ri$s{J5e%mnA^DjPO(JHNs!ly zd&(dQGlT4$9OMS8a6citq2WEL2TC0(N{goY;}@cQH-UQSzG`SfBmLD+BL^s%^JGQXEFn96?>9^Dg| zLQD%0wdpK*iwArPAbmXMh`b7Xs3zjy@63fv0MDP8URrt{@~Znl`Xb>`k9EZ72dtM} zuA|yoi}TLy?wiQNGgEu$29)xkogtceC*I;*aKm2<$X0%(j#5-U&$NAkf>j$TNJ z3KXXBx9q=`mod;jkw&Kju|cHe!$Y)AP1mC>2H!v7%FKaZz7O1`#S3^iRUX6 z237aW-i59`0K+}>>MD5akUz9b7OYxjfbsy-B(*D2E^WY*1!h3Vf7FWL#O$JqeYk0? z@r}xal1X~+9DF2S)*R)np$Z9@*iE`s_~5?y+?Xw}gd^2@e)R}CQ|5R!EQ}d9yHBGs zngb`oB}+ZsI32k(+JfXTRX{p9^5ci~(X=TzpM*i$(IW7){`-!BB@QD?Y9vn&PDPgc zZ~h@KTDvv*wSt?g4dez{Ub(f*J^eQ>Q=qzPmuK>dL>No%euA-Ea~$*YyUP`(!&Y+} z4zNQJX=f?U-**z}kwAuLx=$hwUM-y5+>%ySg$=u_xnPwAY7jLJf?>nF$-1@-*N{0utgtDCG^8D_o#vh2%l*hjtThfUApg!m%EM z`R*-R+9z9Q*&icS zC9{i1KJhMh7<_HD2zLn8xmluy@t*u$I&SykG0R}eLKh}TmXdFu*F~JtKzp|U zzMja}TMX;B?b|nmoe9!5t2;#+p}?}bM;r>=Xan4RH0yyCbxBTm!YpDmk5v;&i)Kev z0@S}VwJDWKom85tjQq_)mGt#WNSiUt(M&f^#*e3(ZY`Xu^H=`qT-rPGo$QV|n$*t%fe~!V6AdpIkp0AQW6#nT&!zIK-*|Txw+Qv&UCy1znlfNJ^RhJvY z_c}T{&%uTDzvloe4toCu7_B3v1e8b@@Y;LQM}ns9XT}Im+Adh>r#;o5m2gmIKat(F zH#2JJxp(Jv?8$q|T$$TJ1$nn3oA|Ee2W98myW-<2TBFLfat+lUc=;?(46#N_pb!!7Tdw{`(gRwjeJc8g>qss^N6)XVpuWr--6i2D!^LW^Xv{56u3}zo9F$y%e z;Xx!g4)Urho}bmdR9Y&;CrmpTYzr(vQY*?0U_VtyurqAq&Dg<8kVY>f%H{f%lJZo32c)6`?wGbKH`@OB%uWU{+=mAOEB+n8Jd$(-pPG5TB5gkb&NgKHASh1> z$dV$UDI0oc&}IrJ+jszMh+q-tyIk~hxbM>Hxq`f%2@rs6TINLH?(cIJ&XN-8Z+_>| z2}!lv3c1z9Oic8f6HaiukGLz({G~0~7ZTUiuOXU!#M+^Wt1UxSF1y%`Oq}>NCt zDxZJ7ip((OC`;r&O~5tH7{uu?c0t(2z@|9~!KK@LZf_G%_$oj1-6(FB(ci~*SeHIc zhDxa5(-)LyW#Q3Pz8v3pcqkY*r{Zxjx#1S1)raZD*DUt%0XbI8l}pn-Y|jTvvj#~O z#~gQeK=K?O8(T1J@!z_H1``mbPmlxv1Rf+f4B43Uz~>P@-KahSofvXwLt?VvRw{)V zeZa{>Xbb{5;0{J@`_fuJ6#9@8crAM_C1Ay0B*tTd{N>V5tAj};4ee(kpZu}uzAJQR z@iQ$n?|8dUC)`FZVB3R4J4e3)jdcjt+V8Qv*0m=U=e^SQ1Awq@V2^{-pV)u332~4G zl#d9Ks*u_sQT^o)ZJye-dVM9#Xi_d7!o>wDBF?#a64Mr;aBA%l_vr{JNjooNa#TN6 zc=plyQy~`|C{WVYjghgV8Z(pCR1Zure`+Z>KFpG^_eQt4^ddy~^`1sl??B$hEhi_K z-G3Evr3J!KK-5jm+G3COvf3B_R#pFF`bs_xgNs)H5F-|W6=V(t6=5MTpC1coB? zN#C!$fg_7Mh#uX}`!(^!LqUQ)AAt}=!?i>$QXaJ$$jX}E{!Hbakf&P?17o0~4s}6c zN&9)VW%RArxaBu@9?!h{vAFcScbXY++ng4W0B4rh!TO@t{=4hS3l{fZ4Q3K|_fXGQ zkN^XVN{DkrPLVNrj^+R`zt8^sIT4B$CfEcbayDJvse^%B`NGrraD-*;jd67WB;XYn z^;t)!eyqvWiwr=6Z}PrsU`J4EB4|ka^BtrS#JDkBbxj}%CtOdDjVGR&t4D7;yNzgn z=ldxzOe>>n2aiXEG0NxsL#I4Cyf{gq8{(9c7kr$GITS&~KUB*xJF(9j0HK(`kg|g; z7!s0y(o&zs!NNXjZOsNEE~Ec@hZ`GcZ?@1!?rNY}w`vFNcNCe&5v0Y24V51*0teEu zu~H}4(efSr$!u@M$C8dsgT`pjP0BKgQCwgUsNJQHb~@oD#s4R{Ygx_pG0C?29F?Zj z(c?=kSy+Jnz=sn*-Hr76NbJC<4Z^gClCtnu`NxS!891*$F48ao1HbY)Y`+omTl>HL?wwj{vh71ziHY-Xbrz=t zv+$nc-)+{ruOy`E9ss$=6x?k`iCSxO;I^E`9T)l4>KHqGDR5TdO?|& ziwD9og8c#FCeayM`OnhEzm=I))cTEx$E4WWw0~gJ?Zi!zZyNiLE_KIvtkM6Qozro$pX_FlY87>#4K)f z1RmxuPMy=cm(K*@56ku+>_Lsb81G*EHyt zBxOX3Sem%QYsKnw_|>K6-AYPezkAqQd&vVMdeT{22SC z$$+U(G?kimfBTexaYYhuE9&NKk5ns%+udEa8yBZZw-foiMVrfE0re5diVtTPrCyF=HV0Tw~?BnrkF= zWpq0bbH*Wh``6Z^7SKyHBs2Vu*pO4**Tp*>{Ybi9iyPxii zA^5C%rT>cXY)5l?9G+U8;}v2W8bhOJlzb@)T?pX-;i^Fs2uDtgU@YCJ4IqgB5s*u? zxQyE{ik5H)VQfka8{n2A0VOt%sQ~RwyYC$jzBV16%F?$?OX$48%eZc%y)KxTNON$; zD;Vx5?PB)W4!8AoW7}7B>|V}z<6plb5~6ORXlTgSR-ILSSTou_*~aHGsVB#UH3zaR zO&xQ$s4GvMK0>`VC@K!6ypidHgRI#YtntDd2Rni;peIC!v=aOqfo2Oy=z^TSIKfbr zn?UXCh`#d}49>}^!uW1RJh9V#Kzsf=l$vbh&#jFv>~p>0M(ciWhl}Zc6UNpISqpmw zT$xxs(8^j~)2FN)>Gtc& zQO|2B74a|hVyfN-zWaIXF(~kjwfJ?3_C3eL8)GG|qKIEFq_?3?Bg&CoXh-Zx})RCp*`_<3Tsw*Hb*rukh0CSE6E!`#I`vvn#p_2nQs!Kfb4> zRmCjozq#^b()pM>Nja@;NF^xHA6?q*AbD!a#K!hrNCdvLg~fI$V!dDu3fGBR)8*_^+*6P;}DUvRbx$z;{8~!gbn;$rrkesTUYFz@RP9=$AYLX=@qyd$M82=P512|w*cya5Atol* z@&8`ZR?J`XKMqRG@sk{<`!uIvu zEqk4pkNh;5KUVyEdi6Zjle^(DS`2&B-)Fb$bW*HIq|nCkpyJKJm1KSx>NefqBWMkWPyKDR_p^+!+ z`-Z9K7H2PhY!X)EJtK?j2we(cm}ScjYsCxe4%pMj_En(LF~wf&E!vYsG9tFNvWdFl zIKeFse4G($JOoYpuk^v-8ZmbVNa-cqD4~$-_kI0(B|`eKp<%k!R5rHMm0Y8@AIgqR zCe+VtpL)9KKegO?6_)&h{zgi|U3AH2@~N(ObBZD~R-ws_5dvEefB%cN>I{%ZccOT` zQ5QXMGQlvAzp`lR*t>e;O87dlRCIL>?^ld8x4~OL0iqIN$O-!Z~yqyp;(w`;)$GZNyL+%pj5J9X#~^R(g&7bRrO3cfnO=1OhAsSDf%7 zqQDyWjShA$v&Q{|B=y`sn?K&X|%(DaN_y zU$f*jG>W+5DHco}W#7b+T!};-3F*QK8<|#rRgVYL?Kf_zVg&qqtD5&8)k;{jwA7xJ zF|}%RmT`}z`7+P4{j^cKrR_-ilCd$kX=GZF^fsG<)i3M8+=}N7M~y?hgB0>av*a;^ zlr)uCPbGmnEC_<9MF{Q&t10AEumS&eIS3#IZ*(SlddUsETxM|mfJ;_3+6WQ+D1%Ej zVg&+0gtL3jSCq6c9zEmwY)bH=@lE{)&7QzMuLqVSFWFlIzE5IqgbdPEXND7M{(5Z5 z#fc_^r!H*Qzf)Dcz+%KKU4uv0(H{hxR=>25=8q+Dz|^QN8VL!Bs+t--ZYHzca!-uc zPA3`S-vg?F#|j*TE-2`)6d`{L0S3yOc0)&<0z6D?ZNEdGlN)(w6kQeVPXU;#{P<+8 z4iTXLRK{VQzOSD|O^P-)RfVQ@%2kDZV__@L)Gf<;`Yg+}IWOea%g5;O1-;9%${}Te=Wkyv(*XNmaBl9-;~RYSX=|xYji=@Yi1-2OI`T&J3 zZmC`sfo2*c2@2qX1Mb&(RlAGRPjTDgK#|i0G9EBjw*!VPxGRyelpU5iG$l{;dFPHQjSdKo#SfVDIc~8WcRxk3E)>!T>~4 zO@=TX4OY@k_=$pc0?8tSbVb<4II%_5Xz*)BkU%(uM+@9eKu^ix{BdQFD9i$rIpmNE zBjXq;)8hwaWo{&cumuzXIIFTDr6-bJ*jO%44a%qqP(6X!6$E*;@Mzzft$n!4Ed3^T ztjb&-f!ScrPr+>#eD5tr+9-tL+@Kbe0#_h#6*mTXGrT{VyO0MkYEwhL_yhfZf}D%r z4|K?RFEwg4xIPb9+Q9cM ztEiYDn7B=TRzvbO8EgC~gyqEv_zL0!VUEY+pB3`%ayuyLsK+7G;`0EO{=5Ir1#p~4 z0)r?7UgnZtBXo&{piju+HUkd|1iA#})HZZq>N-02TzMBAktYH_J~}O40x-X68yHYR z77Pp5)ltNK>lO(~C%hwYj;M7nB6dLm5+4#3wF0>Wzv0&1IX<2Q&i}yxm>1<-ixU6e z?uak~iwGE2n4F~2smNT()ws{a!&CB?txW{^Ug6Fg^ZvaYEY40m*tFm%n*#CzXkEdP zH;qv~abal*F@6^ST@z)5^jdHmde!I8(oAu%9&ZL;E!@!qg)*=bU~|bJZ?qm(HZOfLnn9ynsoX3OZd} zFEBV@dD0LVEKt_MZ}U0+MFvg8-MPOQeuB7kHM;B<=)f^TJZ*@H;H%))~E zf0g5{QlT?O4+sWYs&Q`iOireNyxi2$Q5oJKBsDi7FW?6h!)-`7iAhfG0aq;WkzWGc z&>aZG1qeJDu4qMfaWC7ux}=H4BlKNe`Nqb^vhm&Se)c+k3F+}T*RRJ5hSIHK!1ELb z)(bMafI9hSs)iM&DACX^33Eb40JrC_aDnUX=~<{5H#7wkelQy&qM+yns699G-kBKu z??>lj1{&dLd`e&n1!>P*gk>>av4i>(31vJwa+j2pbO$eh6i9wxWMov6`GJRg-i!Nhq{!;J@=8m? zA=@f+m{y`NunFb=QXnBAVL$DOy8`$INVvH0-nwv>UuI9FI6y<1I~V4xl^v{9R=n}P-S6Shj4{e*fZJI}{B5m_Iyu8Xo0vUaFDVrD z?X0#eWC|Op;gW3qcPTiS9V{Qq4Kl``Y@H?#8}qoJ1GjlLfIX0xBhapmIVFvH0j`Vz@! z&+y+wq;O?zx$(ZL&PgZ`Njp!w=2Y{%G zMv*}=5^)}Wy|*9iV1n;i>U#kTK$btO?!`GNbf?Z+9yIeCBHe%;NC$CGxeE}GE=XQN zNFM%+PMrtRAxP>xP~Aam$bv)+fmiQOn8I|;%^9v)3Fg6luCRj&8XTp632XxfgE_io zU&p4xNom}!+buqU@Ab~d&u}v}Ha9t&xUh@B;>MM$i#2d5m>ZRLVks|e>*`A$ z@OkhL$L%itXdHp|boYTJ-c1=7KU*)vlN}-|VZ8wmLbsYLh(`y-=@FP-S6K8@f)8&y z9!C?He`H%F^lw^=c@l9?xHBaHE>dr(5}H>3)9s^2#Tqxq%8>t@^2lhD&DuC zd<|Baf8XBS&4#cvxFTYtX{-=Gf{*TgHpVm!6m76@h8x{iWwG-B`0MEKT7qtl@nGJ1 zX+-_E-__Q{@g=JZOC9nCAaw1BI}nW&+#ex<_ZD5vN7RUcUW=D)kTf#s%H^M=EyH8J zf5(G=|GAW&SWxXwyM9)N`Y)u606z^nk}R;)>3h!?=6kk8>oL|_RnhczpR2btO7Z8H zUkb!8A47||ka{a^mFTBmxqsg1t-qX!-x1)!(${isb2|2$4PSR`HS$XEP>y#N7y-^9 zhtIk)g-t-S8k)>Y0-IoueNTv)3QE3OAcLgI)YkID{eA(yb35=f zWi4U8RIC`~#`~t@!QhuCL-_PNYgStAU7pig3wZxco9r|vJ_%zsAAco9$Lc94ISIVZ zs~!KhoKOhvK(8DU-}lEntcHlnHiZ|Rt44qMrLDqDkw1LO*9hD|!5 z3T;Gg0CFJQU!WW#0ghn!xb{@JWAdM{=)7wpekDsX>vH%X@}BRDcX*vWw`;JVU_A0> zPvZGg|Dh#u@R;h@j3hq$;?lv98=V4I9aEYK_c{raiuS2{O^{7OECdna=EW8luL%`w zuq$43`<#`QB*HGTVZCXtT-Z2^uH5>a5AWk_b%pS$pff%Rx6i zj_!9SBX=IMCj|~C={R$Tgs$17|0U=N7iG6zv)TJ{NU-%6>YW9oU74(T-UWu0ET|=f z+%}>GLo@x6UJ*v)igtFTqpA#=wpVyvR#hOY{RmP^@_HCX{lQQMxhu)aUIW5Ez)cU; zWIXI{7}S4lb^BmE-gzD1t8KS(QsHsYg_il?5=ffPUvXM^$)u`X^(gj(qK}h6P!M;Y zy?wOaw6Te0Slc(kz(5jQR{7=p{HXZKhN{@mYYLy@Pa1fMg`~?O)eA%BTC@&oo4F|A_^82$Eq0 z0$|Gi_`1;heBUFPw#T-dbC*eZDgwzIMXBWu>1t1vZn2is&0b#|N}#%3_r5b_RR4Z) zF;HW`F4@G)6v1G-v=U#XJEjchL||zoCS~2f-?Hb$inm+TTcl?k91 zYBDE!O8LLH7xT#vIxL6SdlFuG)S8JGj*A%Gkk?4C6HVB%uMg{)Ga&gd(TqI z?1~OqTtK-$qci<9S5;buVFZpf?|UKjpFkL&eqPX_Su|EE>=P)UXWIMVzNJF9c>bY- zM8#gSj0B*d2#6UOMsZv5TYj-_L|!vB)4+q|#=7T=8{w6m?aEiYHQw9snIFH7w=h?I zpZ4UL7U{RDm$%FMR>^*T%}g}Beto~iGRvq8wcVXU+%iKgqcEii2ioF~ubVW5!GvoM z!JGgwp#vMjJFdiwjwdT(m%T_j72Mgox}wTUa)#W4udaTERtvm3qM_-AyDPGk??Oly zT*i-<y}uD@B=7GNOzYrCySPltKQcGFx*~#lddd>Ma?z3>^IU-*dU!x|Dh2A5~p= z$nc0vr9bVP_o@3GQcq4f-ARk8?I1`^{9br)apw6HyJ=&sJkwU8<>)6KVq6W*@c<#5 z;X@+Zn>T2n^6i3g01Kc#G9mx_-T*?b0tzp99J4=RlDG=+srk>25Rj+%LslGb@Nb;J zUnE`)o4oVXb>euhJVmLd{0dU*#*dGeXXq1YWzBYmzt6DU2U;0*SIWWaTYhz#x-F4=$y|RSpT?8ygU4x9M#Rl#W|W4GW=a8lTlRjWHxYEBbBjA3F1sLtl&X>=wEi<;fFF3j4I%Not zL>^+I*WXDroYOfyGWqq#udl+G&9403gGDVHTt*faVhhV9W6R#<3c9XWxLxg^53-eW z{K{5-96eu>jddj5bC^$Tny$^85I(x&4NVLZI2I6q0j(L>-8Oq7CYWGm0tSIFLl{!c zMr$ZTUA2OEEiaJcL%S>&BOBcVS6qNza6nmU2ck02PZ!-Cv;MWi+El&x2+cXedJsl> zu1bGHTVgvc8-GqX-NEOPmoN4Acf!H#>S@|?9HF}EYEE+8D@$yslu5^$YO!_THB`$u z`1!EJxqmQ=mEYZaQ}t)H0~gGiL@9@`e#q#r4-cL3F>WCVh6`W(&d&A*b@bM?Zt1|? z1eLFTM(!GOLsZF2sMtB;JnJI6xYz>Tj@ReY+r0vq6Lk&7L^BvvVn$}Jd`nhz8l9cX zjQaW8-^}C9&97Vk`hI$@i#l$7lAzmv6Z-*|r|tDbT+0UU}HJRXbd_Y;c0D!wDUNC#~t52#-N;5q`6 zg=;r&CJDwS!cD`_kh*{FXkB5Grmet#%G6ZrRCuLq5j}4^R&yO6{4-+L5QQKkU}PU@;}tBv|qLSJX6i zfmRvkc-%@|c5c9i^Gc8M(=Wqq0h8!@JXy}1(m{N!Jx^OT+gM9(R-d==8RFW0Yl=zx zXR)RN4yZ`(E3{-@5CRD1NUBwAu+^KYPB?9q41P_zUBK$$QA*?+U%SUz^6bSbVTOa4 zyzCPaw>$kWaP1>wihP0#C6{*&|=d1yl3x}3~a24!j4NKM#x?GBb}icG7$sboL>+@jn!_3c%S z#Do~?SBXtW7%(+B8VrtvFe>$ROYO9}wi4S&01Ud`lV#-Ry>sC6b5|=aE+t5=mR-_X^H#v9YVP=);kEhwja0twq9^xW2KqSQ z>c*z^_fLS9pF7xYv7P^}AmM_BbH7PQaV!OuQpLg$W0%YI<##J>Ta%np8H(w`DWGix ztd5U?ZR7 z%Ci(oe6N;uHd_10#XtEDXTmK)548AXUMl6ynN3aQbmY%<+{tD6Xg^KQKRnpB;+rs2 z_cg+_`yOl0N`HEm7jGt?784)dgxJzZ@+W#+Djrjm^41eu>$ct`P%787CN-x%A(nF#0nM6LUK|1t5`nuiSF!14L5*ntZUWU}j@0HJr}9 zaNi~rX`PhQ!X2xb2xsu9ALQ_^cC*ZORzVGoLW6f|4bU-y~Es^-gI9= zcQ#+Gi1XH9KmbJWn)9zQs7D)IZ4nCq0Mg;M0S~RX z%Mv^Y{|x7Jn@$RAYijoAlBhPdY~fVjovzwR5QR4Klc;mGPVW$=Cq=y#M^JhTMg+N^ z!E-d;mJ6(?K!|s`;*Y&_q9OBpOIYT6J(WOQddWTo%r%i;us`Mlmrm&;ZQod@5{>MR z{qNvB2&nlEoIX6@J3Hrl>0Mk>GHk_*gnR=441Xj)a-z@6i@Ip-P52-w=-m#vU1A2VSekvy_k zt>{(BdSm~x@9pONOq@E;yUgO^{PtFU!qh~)ZhR`TK*_HF)IO^;!9*x}08Ar6(Hm1W zVK8WcOl$zbk?iLvmrDStT+Zx>w3xa&1}LE+awCcYY@+&M8vR)bJ8{bNu-P)shW}fd z?kLnM?D6eC!$g-Y=Q^}W64lBN3A~@2;tAI)bS1S5R&C`OB2DY=yEFjV80AX!05X07 z2=xuPA49i+%%||bi6B4~!x=w}vmoiL6YkB)yw-${O*>d{Yed|SKumtMwg9g`nQJL# z%a=(cNh{+*$sr^*Ipy)6>AuJ7O;uIzt${DHa7w-KTXrGZSf`=kVlzX2o?K7&=eXF_ z@dITpXXop)t&>`&x!r)R68*h-M|ddVfsviX{r1xjU8EjsD=$;rGXKN_VJ5=lha8ITptFZfDFj@8>s8 zFPUxrda_Wl7_TQZa-PcGH{}(2zvcj#0`}*RE-rZV8=S%(I{PwnoesBFShFEt1~Om= z-PPJmy2abfbs*7rd3(z$DSaGdlSBt4-xnlz7~n1BKt>2Pa7>7wUvf}rYHA|6r7&C+ z8TD4q|2_(Uk33zh-nfGvSesH2hk-@&k%cP~TG*7Y{S61!79MA)dh( zzbz=(FORXYAJ{s2*_oL26w8W`kFx|oh>mCRXaewPO5WQc00NaLnU zx$oac-+`R7t@JmM&Ar7oLeYl5J8#5dOFH&DF9tq!X!FF^bH^A~;-iDD2$>|#mI?3l z^m#kxf;#dSHM+lL^dtOgN$6DoCel zvqh`V{*h@r<<7*(saRr$1R9U0=hbwIw_U?2bC;6?+Kq!d^E^M7xhe3BR=;(lPjl~& zxA#qZ8a7ny=N1+Q13~FND{E1nEE9~3kl`5Edd(ghULNrTJxf*QhTq`z@RL8vWx%E; zC?!>iYq?-H+5PmVnAMA%nLdsCNa1+oPQdl7^V87^EwGe^1^=2`eqE&S->Iq1J{{Ks z^f~Tc(LeAN>73)jAAb&A=3t#Z(|hR)Cn z%r~8XeIxM!-=otxh#zM<|NS@9l8FDI;-6vHuvyfO+l(ymPKoD@iePr!S0Y+s& z4PoTssvH<>I_$po+d-`TC)Z*VDyJSLU~bOWPPX}L&%nvuZR@3{)Q^hab6RclrWSw2 zhobKk_SvrW`i zeZ>VJORM6aKuMoNOqH_X?URNa1w)dEX?5zI%~|ZnT)#GZxzNT@_BgBa8Jb+Z|4s%E zDAPUxbnx}&U?7+6UazzeVLX7s&?e9x1)7ZH%GN{@dN(l!l^=dE{nbIEur?yKt`C@K zTI*R{T2eJm=b3>}fK*StzYG1;wZGdhK1imHd)!>ysovfGB4&cd%!9ki z=POkE?bCRD(+GP&o@pbOcRSZbb@>j71lYtug9OyH>oAi*Tl3;7EiDCh%3WX>XsjP0 zfZEmpPie+X*f90gPXn{}*zTUKmodz#EsD_*I$IVn?>ggLwxj7!f9`BHl@XnEdmr_% z+bZoL7q{7uQBjv&=lm6Zy?qBo@JT0DU!Y6|W(~~I{2Ch@VGNTcQCy7%{@(5$9zeV* z1j}7wQ2r}0K_bieaXq3UFVpLWQ}9zHP=DxmPYqoDIh!0GIrLntPanXh+7XkfTz*RL z<9!l>xk$+A?85MTh)4gM`pEuz*$kVOOYCa_@loOUX2Z|J-v{WUGGVAPIZ0d_(gh|qJQ?y<3%K@CTADEEkQ zujAu$m73%GLpcMm?bXAmK2}Y_o!J{+x7j9+#R9br^9$UZsve!}eRB0kDRXa zfD>aoYd>Ezh9X8ymtz~eXHTX${E-c9J)64IHiCXz-l6F<5bP;AA)N(mD+Sy($l)d1 z!P%is?|MQ~(jGKH)c|X%Ydzs%uA(kK7)nPa0{UOT@j1F6W=ZTh-OC;OMl-wf^BwO7 zG5}qA;*D}@Lh0}VZ@~hON~!$NL+hrNJrd>pwgX3{y~?bY@G3ew!h%4a`_~4`E5rl@ zbhva`T44GPg%l3-Ct6yh(5~lzI*U&_5D-ncXdn?n@x{ff{Jz*<(vrekt#~<|I`2G8 zmv1jO5@YMEXju;L@86*%@5RpxO5gh&XH+agy#-HdPl1&d%cnICcv^&crL=Gxb=jDR zfl*r!baJ4_PJx(g@FIkc8F;`_%brF%YU51M1w^1~QZUV3E-k2@aASh006p3KC;5S{ zaqM|RZymQ~Zb!!(N|Fbe(dv9g-N+yyzK zkO_b5Vhb{!W}b-xzkWD}&4Gc;z`($1i(>Qgc2vGp1#^2%%^eUN?alDp*2Sv#&~hAp z9XQ)cC!)j;KmA2#YZ|)~75rsnOtF~u$q}uzg~gGK^vf~@#qsi5o3d-%oNqc!+jc5z z&9-kg+VUz|m6~xQhCyE7nGKc&JKdktbUyu246bF}lXgL!*lDR!q_v*JT}|P-K*y#h z^DJAvZtKY5<+1;J`kd_e`48DLXNQLtXAkT7nblq<@0>b?5Tb4Z<61fgMrimQ5Z;Lt ztcg<@f{Xl9Qt_0Olvnv2*V@i8u&}scF@X)60Eq8FvGWyj$G?4(`}`oidnzeCKP9yc z#g}0x)W5c*y|Xq+gl9<>5{u^=lluPWbS*6l=CA8UpQ664HO>d8vM!$qlyPAGx{t6f z+3|-d`S>`+TCR5c5KWvOQ;~oFdm+bOQfTt!u(7ucEkpF2^DJZk@!NlPP0c%Nlmjb3 zdRLD>kZz?u|DePD@Pp%_(ewR~uG1%(YOg&bGRUvkkdr>p@8KB~_J=?ID?^QzS4M%J z1eaT&!0#u>SV^UOtg#@$9$~3= z%(X@@7QQ7(M79~};CSv9o|ZJS^XCV4U_ye_wLt7&BgG_ceqY0Nnvy0y>6bZQ-kwQ^ zk!ZMKRjca5Cr7pzPZj)rc*BH%{?Vi4|08NpHhGeYqXBsfm_pk%)zhTvl4g zZS#L6aM}$UmNqpxInEn4ylHz}3BYoWI8E?mzi`fXKLGtE69 zU-p*V;3|?QGIYPMvOWWY@8gRvn9w>CsS8pOlz%i%ek91XS}?nuIAeeAV0O=RGpxBz zGyiAVe)pEsyC7Mh;#|+m%PZBdj|Ea%*l+B&+vGE~Xg0GAHTT{G$n3U%4YL#Sjo{;e zSiL{9`!~%=G^Nfeh-&IF=R`c-9+}T&KH{^Dd>r>Y-Jq$pD=6lBw=4GZpP~Nyw591l zs0i1sA|qqvX`vc>vOW`&%VwzSij$;OR=IbxaOuX|2i(!U+keJVuhxY7iEWJNuKxzi z=Ua#fGJ~xtpwzb1fTM!56k4DJxlXuLBP~!w8#;+N*#muun{x8aVHGwkdfnM{*)2Qw z=f_(Z_UW$7>HCJ4)HAvtxKlo3_r$)|Fqq?PxvYf>*KeXVH{VZNdT7zb=5gnPqNVyX z+Bi1jHfBpeWV&Nt!ali8l+!rqDDez)kX__WUT_4cj775vsWe5+O|?Pz>}N6M&oMDVab zSSgvZH>D+OMC1WMr&e{uU~22O-gV}^V(T(^V&g8e9E26I_T<(= zXM(Ex#QVJ+@y%Q53Ekz8@Jg62;z38o5sRO0`eV8^N1AP}pyo;iP;r4bU%h*jKso63 zm{IzT%RE>QTF*5JK%M^Y8(V@iA}i|-R-vur?!9P9N?)hY8W##U?pi>IFYKJdz^9 znAHM5RmdB0|C5*W-@5yT-v5Ijf*=5Ue{i!v)s`0-;FK-FSboCi4u0*n134vyY`l^? z;Y@p5o#wOtMqexCK&r`sC_}~B*QT?>SgjH^PZHh(2+{>Sec@TD#-7)%{i?;`;ZKYq z9!gQsX`Qf(qx(TM$6^Dj9`q_q)fMqjwxjROE&huib6`Bp`B6O@wR4ldJ9C75VM=1-@Q2Ri@v-UV?}1M> zGDT|h_O!Pb%sSuWP37NcH>KWxkr(Y-5dIbKobA9El41<92aoYBQc_ZS3 zh-AHPrDfuhBQMJjs2%=vhsfr@w=fE2lsj;TlA@Q`Qy~ax^8ZDG_w;oT7E)O`V-xav@$VdMksn5jKr~+SI6tY8sUYiRnMq$SMaojf3LvhS#K-Pfmct=q1 zM0VvCWg!d}3S=s%no~Wjs53qv;3N(VOkiWm$14&K@)A{lJk3e9^AoxMHS=4W%>A{~S z=3QaNik~6}C&8bnUO=h;y)1knWh`!r2h`~%OOs}RUN?Eppj~iP;J0=%IX@IpZVQM~ zf|FIT_2PszC@2U#)};|3Kw3YX^? zZazNALPGM%N`=ZnU0shpTYvXGFj(-A_>;B-03LMh0`s&@;+0jqG_QuJVIv@Y&!1+! z=VtDQ+U92wSA7H$gVo>NASb7Uom8ldnwy)bYuuTW-gJ03_-QvQrNkg2yEO|~q7BxI zZGni1J6IYZ{Y}olFPJLpsItmWpNQArV6a<`I8PE~eC@j^tBG_Jj{EkjRIl!nwRU%c zr0K7r+-uUmExfe8XYUl-;D==|!2*8KGV6N<#33B6V>IU}&FrLjoz`YEk|(B~#w$dJ`Y8l?Tgj8kFh^^;)IO;w zOL==O&Hs3?Wluw^k zS&%!(ddilJ{))Uysi}dQWE#n?SIr^%*tS0^CMz5ye>y(@s!%=>_fYz1t}~j9H~^Sb-L*RS za}~qvrGT5?6+%WLmq7ccIKWI~T0+hKKcc=etg0^B`p`&=w3MK9Bi(|4bc=|DbT`r+ z(x4z94N?jS(%of%gn)D@U4k@t=XvjUpYJcZ&)R#hHRqTkFTYu!nRv(uol`bkOa^ISW129^$RQv%Nl1Iea*sHQZmG9wqu;it} z$qAL}E0*+J_EjdPsZ)%7|Kwv_Q@Il6iV4e0zwJ+QgS!rcOX#qv*m?+*>DisKilSsJ znwYzuNmP&>1!n~!T*G<4B-l!IzwQbVN%5(csqGzHevpyKm?>H|15PepIcscO*u_(k7b-{nwM$R`{S*!45~&Cyv^?x*p|%SJCdGwKv&#k3XF99Cr^+ACS{t3L`Pk+HbG4WxFvsST#tWp3Ax#7cW=tNoRb_WKBGw!_F5vE3Laq zy4n`CZ-MziG12#U>lD89-6SY3$J(FMx|#snqQ|beAZ_;YG>~$)qb^2LX@J|n#7wiMdqUPWF2Z5$Bk#M~goy)a@j9g=;_B<~ zHloj2K;BYq46dD;O~Nsr&5kUv#el>Nr|S&#Eq|1C@oWm8A+CJbZm_Ep0#*!L?>gJl zK&j*u-R+jdJP$ubJG#2ev7Oe6>wF!yUJ~IbFj7weqy_xj*pM{z+X+wzMMj!|34c-N zMjoJ-=Hw~2Mhu(3%tVZp>U;ap-{OvXz{%n754G#tiTe!`*#r$g)2enZYkdu$1lDD> z%LCUMn5etd>?|MX*?4c+RUMRS;4*6Sml=~s4FCk^vCRK!RGQADfHU!+h42uUhx9Y3 zs<)o^NmPvE!#_8;apT4|5UYSw3{!KWqt9?BkWU2U9k8;q`)0yWv|i|1wDql*HI112 zFU*u5KVTUqTvBrPVmUc0Yidgm9!xctVgb>Y6_Q3=EUp?f_4^8M*RRj%T>PB(W~EWK zV#XuwLvCqVDaBYAg-XxIT=%G7e=&E*b+vE`aU_=py~@DsCHc&W4s|V9jvbv=bd; zdluv?eFK($l-3-X|2Kkk|It0TN;x#2Fh z{pXJ>0_Y7I32TPKtHdP(arw7jX6WF<^@@Qut5Slcm5F zgpiM?9t0Zc?j<@KV{T*h;q-JdI!l6X@YOh>27^D4Ic%{Ln)WQaec>IC4B(?kWe?I* z4&GD3q0hH4%*Hr9J{IjA+MnloIm~IrLYk{%=klQvwcBUx!qZ-l@^om(#6=kI9-|7z z!Vgld@aVdC&6JUGokl~k$2$|_f!>dbIwtD zSKlAgcD99}vxr!n-Fu$MPNMZ?_{xqbeSiY25kHLe`lbUvr#+LAod+CUnRWl3VTWDzAld z6~;5reUpPM(Qx2vL`gkvcqPvYeaE+oJk`<(hVpmBdP8cl<_s#hF})!vuM5X}7K-r(SN@A2*D?;An6K%^# z-D!EEaU;9Hj6Z<^TU1#=K}UI&rNYrra$){6b@s8}^wAyJCT*RlYe#~`PcTd#`|>_d z&(3s15pI&hS85uU>Y(|^P>D=>y^=(B65izZ7JnI~-y!(QBz%uM1_y5-yU;OB?ED$z ziv}L?a15AJ8Iq?O>EY9+qAk!0;ybveU^f1$t#qdc(|y6e{=Xb~ujHx%)qUTk@>Uz@ z`Pkq)m>K7C(Pey#z-Sasw`NrKRe1MCXY@_$?wa0Y;Ha`4h0+hcPk41pr8;at55QG^ z!x7vl^3yn$KJCu{29bsk{w|;{L^0{<-RkabnK(E7GVMD8Og@HeM;lnz7`VPttf8EQ zTwgM>0iy5f*#21#N!p!X>sLic8Kyr+Ae`IFFeg0gOk9rs+C4+PKJ%ICc}MFm81^+l=eyPzcqf{0$g_x`#F zcVYj!(Yx=tJT|?luK&%!;jIt0%>BWxA^GB|1DD_Yf~qW!Sw=Tz8Xw$j@EiRYRk!kU z{BLUHg{PC%?bRD`hhHEE?j#}W6Ak4Hg&s&}VjvP;UmmK1 zwnLdN@;)2k?spZB{pWiA)S-{yk6s#XyH9(Oi93J&3M<#t^Sy7F2$gzU6-IC-wDo z!lZg?fHWFxVhi);`XALV3X$=p#~S`KlYX%~+X-rM7vQlK6(k-~<{ z%=4ChlAq|#?k(=sY5AVvZsf(P#2IuGrC2!bFW2?j8X|YnG@FAk}E6w@|K(|Tm>3QB>mq`Eec+3uk1g6ZO zv0nl^sqr;ae71Y{Fo5uQ_tZ}kEE$oO9+1r=>@2QmcV=sISB(Z|_b{`%dcw22^tLCWl`RQ>7m3;^1WXsXVQgQlhil6tLU77`aL9 zM*E(2eG*|}XM zvF+^l?WyVMx@!dEtCW;*%>wyOFmr`@VmI_1GE!TXSD`{_@{Vu}7-miJ5D`K2+0f0K$?l|1G$1Kqe(g2#)A3j(6NN z7b};^-H#N64;@9tC;jo8U$QRh>&w1+6LhKiLF4+Q zBI4v@?Fseo3B><#0cuJY((l|L`LvK3^&U5{(_!T4zL$+zx5IC)Y-i&}l;{wZQ@@6f zb3f?{6C^I*)}CCgvyi3YH-^nede1fmywk8Vv8XtaYD@^@2r;36mzGv+Mhg}YcHj@7?kzr=Ef`WqJd2j96{R!%K2>l_< z@js>nr-RP5LtcPRKvs+aQ*%hbbNet{nqcPMS4FE6sCB*{m9I4C#alD;>7;}32G&e8 zVTS3g#VX5N_iJ@ou9vzaE|UG)X+OUbZ)iWFv{#S|ap=Vg+_&~BpFw@-LZEaalr`*q zj$64tM@QX8GfUQT15whdW-MQG8cCRUQ@7jg+gc^1C@nSn?R~l!=>$re1^Wud;V*T| zp>)Iq^@V$$r;u%O=zdiDu0A4KYo3Paum(c zZSSYLo+*nILrCjtK4?x@_nGsx;!nPwKfn^Wny>9;l-QH~sNh?BvbM2q_hHYh=CA&M zvS9R0`V{#Y`*ur-E^G~}_CQ(Ul87W-#NyvSXo!JoZJU!1qJb0$6DiBLLv98bcoSdv zzE1?pn)17g-(RLmrknryolE@qRv8vSWVn+_V5PH z`@z~wtsQx0Ru+;7B%hs=IY}fpCTbxb0HWNNv9WTlu6&HLZ8W;=kxA9n{N-0m|BTh? zi4cg{xw(uM33m1mKi0^%-(mA}li#5F{Va9O zDR;^E6jd`!ez!ShtlWxT9uwgk zS!dTaaz9wEe&OmEi=!6i5*gU6wDA)f7w8#Rx@FBxpWUiSnCbc_LlTG*vd|v9mH8OK zMQ+g^5I%o9pm6S^#LuT=*^_1bl5|a|e5m7%U^fB8I4q=+lP9pp4o58de?M#?f(mXr zr1T*TR9;hMyL)=dgRLBRwOF^?1Hn&qegVp8@KAvOkFZd9L9?~#`qyL4B-T4cwpDA< zJeau+#ftm>83%SPeJ{w1+gim*$;A%k(g`v9(C#Na7Zv;d6W-=#bi@c%?rzuzeu0yg zzf$dO2{W_91qqXSIeY5L{jYG?6OShsYOc}7{bjG6pIWWBh}0yh`q+udZV9&p=CQ>ML8qlzPD;ddI5WK$b%AgC7S}Sb4xJ>Vm*}5JF4m-`#r-Z zhYqH{mr=G%&K0eF|9cG5^CanN4CW*arT%D35u2+FFkokGfm_l8*lF}C3{!A$U!mKp@jajWJ#5YFt&0gnPX=mv*DBt?dM;a6Q)6_TSDA0X$@J-Nc zzrj_gC&Qk)e0)p;4 z8lolG>ep};)?_2}BlUWuQT3G{{s|dgL2d0(c3q!?>0GUJ;e}9v{lc2_)1x=L!=v%r z<=`-7XIV0g^*&0{@Lz9I-i7DW#D_$plOQ|esE_GpmIXP5&&q31wTTDTMWC&=6kv?Bw$|K7{ z0%==I!1+V6d1326)XS-E?JvpEq5%8mmHq@q4i1%%$d!ChGP-V5WAt%(|Ac6v38!2K zPxBIk#KHGA^;4_{?thg_lDpgi=tkU7pR`fwt`W*UV+@mLd+;sg*1)rNB3kYj9CU~y zzXMrk`)$pmxu->U*juC;oQk_PUK{&RVH>V0%DU3Wt9CcwjA z^A&OA+x3-7f*W>YRZ49JXn2A0C)2lUmlT5DvITAOi+_e4t1a-dvj7EADCH0)uY>R5 za4)*9eeB33H4yuYB%0>s6;_Gxr`zv8^H~=@A2$+<%OoTp)pI~I2vC1&_U}|>U;IK^ zGx3Jfhu~+YuSHL6)Q>0r)T{~ckFTK>j&y{~90^#wLu?uW-RLGHBas{XC5=$SA?iiJ z8gxzfBMOY@9sn~~n<}n|KS%!K?tH5Da4!MNO!kmL1?EIOs5Kb zPIgs+%%ll{aN__|m4L!^;Uh#n|d>m{7j!jU~yQ-RuT2#h0ywgd%r|q(Db_P zsa7>uCc`^xLCzb#c`_z2eMKA4YG6nAN?J2Ga#zdKRFh<8^zHMf2dpzUlg5K&S$9N- z3M9GvKSzAJEs;)js%|;w|I2Fu(iOytn_+%{-l8MAGJ_UF_Y83gozx2 z9t*8m(q`83i6ENhIArD{Oj!763X~1tnJ1;D{^}0Pjy#_0p`Ev#ZE#WPH=&Nn7j+8U z1iwsR6;VL25z@QLAP`}i%!0F6J|CJxp3s|_=YM-TXWkoA;qi~wkLUWT@7Xm|$QeaG zqZulYhh?&wiNwemLw#>&{3kgsH2R;XLoZeA$1M4yrV*Q?{UZ3QX6N|?H1#D(kGbBZb$)LWo>OQ zpa4i*Ocbo|I5-B*o)&W}S9|UlTtX5YG9uYZigY9@^Hd!B-qEeD$ba8oSe@m3jL7G~ zHAqL#Z#ZN*KMq{fpBb$PX4ovT;5(`uk$8lTP+;#qW>v7G-8tJ5`Kd3>`I_=RuIafB zU%Dg8T|El8f^en65T!zf4;>rnhFtv+K8hsjYZ(&Dm)6yhfno}7uVFDq=v6L2HQkew zfC(p6v-o{^=G=YP(ClNp(lKj-c`e z4b};{R7kjbd&EVpN9jVvwdi{}h9i%#EVD;(SMZ!$)eo9`{4_MP@t|W4QE!8F3&&UTi&??`F>NL-oB-n4y1V5&<}lYdEZ-_KU-te|K2% z3>vVhY56l+DAAz$HsQB_@Zn@@GhFqlO%KEAx$pOBSN2UcViEi9UDZXC-q5T>k&@n0P(qR7=`X)$0H7P{l)0+PmrLA5X_e?>-Wj;`2_jJO2)A79gvfJ}|tUEHQ`iLhsJ{&?6^Q_1M8m@>Y_8jufgKIEI677ath7Ryxj!B2l(# zNKdf0PdoFqQ^C_h{G6IHIauyJh3xLU5qCc){1B0OHNVxz(RuzRBhr96F1k)7K7P!b z7Rz&wEgcUh9QC^5=&T>Wv6iefkw-_K<1}+eFKR>I$)j_gwJ=O8>OI4Ms!Y3cUT0fM z{6niyP1782Qz2-8MEL4wlLtEh_fdCbBY2RvMAyiO0HW97mJ9))i2zm~F*3i_=cptv z`_=b?j%)&r5`h7t@%uFg4b(>-wUH|NgC=}k$pdWRh#(mwwVLDR-jo_UXFS%*Vzgbd zu?y#y#kXpb$^Niu&E9fg{6NAmFqSm&MoVk*W7_5n=N;|EBX$TuLU!LE4oHU=>f5U? zze*}9B0hXzAJe3;sze;h~hR&Cbj<@#N;S(*;h>O@0DDNX9_4eW=ctU)j9 zl63XD2K!#sY3YUQ$1mHmhKN%?^)g?(@yA)t;E_PGt%GbvP7IBgA(wf|JEvO?1r_gm z79(+TSG$y^o~9S1796Mr1Fm#l<$vJ`3SOkHS4H8@OUoPRs0dgIJbeTTQXLct(Ay&5 zu@Qa-4+uHFHSD`pb!Rc0-!QF=WK1?ke`(FwSV`PTNGkfbfV-1cO;zIESh$zSgTj$z zBO4Fd+mYWw0@eAN9@XGK)J)XVGtCuiby{G3-y{7(p%SmTP9Y=lCjC`ud3i68J*#Yg zusJtowbhZxxC5>?*S8J8v@;3`1;tCSKUjRNF3Gz(M(jfj(D_XG1A`ew|h;df*Bz4x5ZjqCQ{!=pBdqL1vW7KKO%KBi0yyHG29 zpXvO#`(aWQeGL8jFt`HZfq^pMAjsF$K*zw~L@16#8WV;*i%1(~uL8LPoiuxC{+$vRw#DZHVH%(Tt1<~XfPD|UOOOM|T=^NW1R3&I}D zl#ywL^QH$nnkeN4-!%BTbQG1on3xTA2W7|g#v2yOJTMX|@Nd2cVh2l0OY@%Pn>Muu zNFsKcRG=g^b?lx2cZin;Xi1S?Ky9Z8G>q?n-0-?A%lUz%KO(?(`DXZEmxtl{YEQ8{ zM`L49cHo1?t@VSA^%#Ds;^lyGH!;=TEP{KOQ5W)pAf3g+fT?7;kJy zCoX-iBgr~Y62WxMba^9+z^5#gA081==IC3Ob9cB|pCkXgTzD_V;u5y+hdx!eL-t;l ziZ={pgw@>#jQ-DDic5$E_$*|f0#e=}wtt6>!9T+q1!EvOhDS1&xm*O6*|APx{!KHlWsFKl^<-EXCQ)vFdrkJ+%vOWj;HkU(qi&M>OmRmZ7#KK{pEi20lIgh> zad0S<9;Tt%SYW&(X3fX+8JYfQ{)S@bNGR74{UwQX0+WacB~qRXgBm7AU=ze5uB7B~ z=n-CPSudPH7=M?EiH|gRJ*XtuNZ6Y>$CKv!dYgo)jbp_01J``T`>LASxGt)kO!)in zYXA8bBW?yrn0_-m77fex)Wbe-?-~?4t2urYY*fO@9cjR1#X@PiZF`72(mjC_Dt=pt zEcwHI;Z(h_5|ma0ue;SSAWBC)d5N429_=mAfuH0?N1%#;()612KL<$U3Gx`Xrd5l@JW~y;^ z+(K(H@Orm2PJTj2MZQQtkP$xJ^&mp)W}li?TEe4X+M2Dv3iCJ8?{&6faVT%`VeF1z zqoZyww9|nRE6Nv&NfG9kkzi5)(Ku8{B8q6Vrb!tBH3&>=X%ynAWx(jxVBEochBB$!f6bipnND`%?Z4muL_Wb*_dND~p%%;3V_4$*_Jj`!~DOCHO zIPRbMA&FGT5hGJ+X(=KI=Ii{KJ;FQ&2&C~Ww5Hu)`IsInQ-hY#+odhj>U@_qbwbj= zl2Dj~%+~cV?S?!BXSiaRK`^2k9E+wC?*8iPAHB#l6gdlHhY-|(Jh!K+-xRa&Y z#m~WP_H;3n5URtPk<)F1$}C6`82LQpu! zeKEm!E>76JF_pV7MyCI%e2u8T zwXm*6+<)HO-5Fi1J=@O1h7ho@NYvHkO^)Re>^gEkJ*w=bkUEJF8loAB$vodogcg@q$_s(1Sk5Uaar!Dzdx7Xy~}_otcY1BG%#HE}UqG z3a6%z-KlB_kCqNlXxD9q6@Fb^;1w~(Vib<#3yR%RmOmdm-~Tzzu!{3gU+}&0A)z3# zIgVk!{ah0%GCmW+C6P1UZ{VH-2B3;4M~DKJAcP5Qn=ca*`k|7Bt-hwyT1p80*u7(y z{m4d!J9ZbVw1n)(+1WSoSLJK$Iwb$YGkFh8?ztfygmO83T zH7#sUrYCv7RD^>E%%pmolPRaPxNe8VDFaTn z-9qHe#32cXU*&z_Wo$r4rnD;TDMlF6b@|k*@_>F4QM&*6PiDfH0wGg-EU$oN^f< z{&j;wzPAJ?7o69hgSb_{JwQT-jTF{SuVLEYd8eY3pEpywXKPPb04?^htNfEuRk%ee zZ0f3Lg*q#RE=M%Q?c%TTpzuzUoQh00akw8CMn_%dHRW^7Rqj8x(wP`7SMvGp?E=ro zzl2`%bw`+?qg;M=!eP;4T1kWRZ>p-H+%F)M>d_!>fzG|}&8#T7q)rcjdSd03#8 zC(e4qju|f~!(oLf*rJy2(r-u7D4htsuBtp>pNKX!G{p?Hid_`{`NNe|-s=iaeV#5l zi>QO#`@p(;q5V2F(m;k%GDk9Y;@a1c>AeL`8KqESp7~CJR%xyd+aO7U;HYga(li7D zCULE-Rty?n(Hnqq02<7eadIOmo}r9`QO4ICDLSy*2?A}UC;?chKK4~vUkm;?JfByq zfWIEz+3J(1nd?}>AVA6yN3}&uSX8^$l8K}2MnD$J6K8kVD0ZE&R4dNE>L_JlK8gxG zTtE8r*H}y1NA9}69zF7EX(x~H63RxmW5szoG}js1+2&kdZ@VH6_U|4J&novX_<1T+==pC70 zO_hE}@z?{+6k~*@3vcKKwyYLA2}`>_E_VXh?mzt!aqOBzZJedrcRT4$Wfe~x>w}8A zH~IpCL^;(pf@6GRXnQuFOe^iBltge0!j%*n|tghgsY27=ToQFa8TGX=5Q3- zA3=e=JQ&5 z!5%Jr63kn53HltE((*ym%OD#Tez8k!zEtl($Hn4!SuySmUBN)W#|O<+uTQox2OcN(>H5*m95Q6&rEm9t5k?QeBB8 zC&oe74$sI}jZg8=I!-$R7z7(oCjeMTEM~_Ox{|bv45Y+YMS)jT{wX3oBVz?PVckyc zR}rAwfIXp{Vtxi=4C`X?Z}(*0h+qXa`S!-k!lf$>GCIQA2c0Gl^0hUkT~zQc4RmZ{o6yYm}2YaaxPeY^*8E{bDf~VL7W}XxbS3H$z&6g!-SYH z3sTKil6ljW;_4H6Odok6E}yAd9bwVK<-X6&MS$IP$N%eeX> z71TAo;L%hCIMS!Rh#OO)?0D%R2HPzvIb27-zAicQSQVTaaL5k69MW@wy4Od5}lCq=Et)sUQqh!AXT5X)F8Rt=A+85BzsU4!P^F ztV;6Fpce)+=ulWtYEgH1isjf_s85k^#cj2Dxmul{-yQ8t<2i@pr60;;%H6w%$8=XT z)Zk7e{Wa#BgiZBkNmRtVByr@O`JpsYTdE9M!L{Cz;CP8Eh{V7xT6(aVHBPvf|ZdyGI#seWh;ZrCV#HX>{2mo<7ml73Q%d)uoo20nkA$&meKBW=Z;XCuiUQ?lsOQYvP+sTy>b~h4 zMIzh5klJR`S|qjjYL61IYquZ#09sn)|&3HZ+AzWg5-phvpMHj`}i zYkkMAcY#JyQtr9d?06{VhuY6gz7Z zT{mMh!$H>x=5ol`BS-*5a-Kw20cwr}^a0$*pSHUR zS@2;bLbivX zO~2xB8Dpxm_D1O@@vPhrh+VzWtN-WFf&+gqa_xaXrOK2yR4_eX@3itd?O|Zw`xD{m z%kGwGC~*TP81Znjfbx@JIgQA0VAo zR#^q0(~3I{loFZFt139E?#FVCEtc+UHtrT73gCh zL0cbP@vOUFJCYvG^_{x7V#lS?tLK+>?&9_wKyI|A;1YZ@Rd3BfaL?Qf8l0WFho;y1 z{EqZ+!y+2WlJmmkd`XR3aT=wA3fLKJl%KMXQ+=KGCjhM2K3gOc-gVic4gpR^Ml^*4 z+FtgLEnUbfuMEW^WT2-ZPSPq51#d&E@6g2|8(>&p@B;*OLflrz{lp zEs5P*isGZFG+}InkQERO96(?pl@eJE?i!KfKcp#k*vSp=QVvOJ%Jos={^@ zp>9N|{CS5GB z#%``eKXcpAmIb1ye$5_nHs$PXb_P_~uVP>1P{(4oXQ#EFFzooy1I`orrH`>PlG9b~ z3O6Ne+tjiwY~w_enl93zx|PtRGb3Q~CB^m5 zSZIU>K#j{bd+~7tPD{T)G9{0dO(!T14ZZ$8I;s*-m>4wENQ%phQ+wzm`X`^;^h>p= zzDR^oi}5hoBD_BOVx}gkPrrwPO0pIVV=I5!a~QW}BI{LCu-Wed+XpnrK`knvka*1z zgrMi+BLeW^hATs0dv}wwd)4TM)fH%arcJG{BZ`uQ_|{BP#shW zfuot}vvEV%EqHe9pG=!_80ebfJ+Lxa3Cu zuRKkK5iQ4S9&!uZy?MV9y1Uza12bmZizNs+GJ+T%HBLFdYEx-%ZYAX^_r>v~!f;i~ z3{sMp4@uKYPYIu`wyNR~M;J@h6ss4?1#gHAcL0zk#Z8OrNlra+S9@_i;CzGmr5wYj zQxwxtkbH!LAO+;B5gRN_o$qjS!6AD&lu)t*cqGx=+xAfIIxLzy1P4ya-FeK?b(Ce5 zt0BZ?HDp@7KlAD!P3$YVh?QCa8*^mmJ76Har+U(#&^bAQ_t+@s!P8wTgrua)tr_mZ z#mT}Ip3%vayyQL$c(1qL3Q^HzD|Xbyu-41~8J+0xH;Hhyz<^``wNh1DUb5W7X^GWm zEkUXMgdIa`3S&hM(WGqRwL2v5lmy~G2B7*GO&WenR0+0wr}yCi-i_zruj}k_FUKOx z=9S`Iq&<0+{ZGE4f;7C>$bkd=ggkJafz<&UZ+_f)(-gz`-%>>`;^&|1#zY@}pT{t( z^mCV5&6bM%lblQzFXxWwV#CUpK-U{YJJE1CZJwn{Cv^1!mo7`OhdZW!DZR6mQpqOs zO&FHR%(kZ2Spo&JNoxM~B{(5XsJLV7Rp{3Og*&%!D3yNtr`(;FyZ&L_a@b?yceSmU zwaok{BT!R|@!`>J?L|kYAD6)@)KD+415_h#WJLQ!CyK`g`~|}vQR7S4U&!2ce!kXVCwF zF=nOp2m`Xtt~DRVf6Y5ONy=;-jfCsHJSRn_6=8aq#Y5E2NfY_m*JeQC1sPRJbl$r; z1V-}RIl0}!7%!V`#ATBgwU;5m4i~b;-b8y`WpnP_a_&VWE29eMqU?dZIri_p)|Yf< z>%N8u-j=6U5ARyyA3Qw%$#0%q{krs*>+0nR-ReUO;W}4hviNs7>q;cH$Vbg_72DZo z=QZ{pXLFWP(}7>E*0ly&2$TlrB~)T!;(Z8=`LBZF5EZ4G{Ak<{iDq!KwDfhz&um*^ zk`9alIS9Ce;GJ&RjcJx$F^Ox)YPh9 zW3=KC05XfC564MX%`Uc`p{q4slcdXOh)Phq`~2c?Y0mLujzZPJIA_a?`?L#RsiFi{ z*4g7UN%niwpL(t3cnP1Kb-RtTRElI;!c!ckqaS=(ZM9_H*A?|6NwinBRwrt3=2|ws zlFVvCG8|=>0PmhaCMd%i8X6X1>O>*t$uZ_a0je2{bd?B5cl+-XZbQZvR!nafI47jT zCu~wdK}Ue?BsVg^0+LZp!HNyatm!2+&3jA^SLChKXTo(p#D?}9HudDX$~+$ZPi8sG zBi2eroY%)9^WHs+(P5G63_q;YU_TiC+<$YvKhComw(iXZvwdxQ(EtNsGPlUrHcmUW zS6Vnmk`ysQ46>Cj=R8@huhix^JIW;1I+tg<>@Lx@7PlUG8&E}meAd0*R&nZf21zVq zA1%IoBv~2UQiD#l9jW9AC6_;i zEO>wNhb>Y*-1l1fxHT2PiXm}{ws?~av+l{HVPWZ*X3*j;O>$4~ot6alfTyBrR;*hv zL+AI*l5sV?7hj$&r|0X7Zl#6G>o)lW(AAE;FMV6Fu9n!gJXXUPv#b>nzWNlIsHhZ5 zw&yUyiOw(3Oizue&!Gq-EXG4gRn6J)7pNvt-xCgn`*~`m`);V#@!6BBsDB`=SXx`_ zcc<=y2w?<-G$^X6VSd2n@=$Xp1dFjy4VE5YQNS&AZ8Ai;OM-I~rovCEEten-%>qC; zHC-)??0FZ=7cuMy7o#`Q?ssUfNkWZg(%JSUv8vITPJpV8mKN`>z%!*J?C{4G(mDpQ z>cs@TTS=XwzFFoelgsIQ!_q8zi>q&$iIyL6mHFxLJ}FGAyTZGCw&Q%-8zuT7w)DP% zpO^T}47G)j(0U>9t}avgiZScdx0ik-#;vGWZe#=;lnEmNj-n@L0OcwNw8d7TNVquv zT&eu+k|C@$NlHT#2SnN2nwpv!u8YjnR4geeDacidEPMMftqi=A0U^UiR<6J!ON%!{ zeo*xP;1i_&1}Kt9+u03aIWg98sV9nX`rvaIy?r(>Dz)c{?=$sG;b55DOS7ePjgEf$i{&l@9ep>r z!^VMoZd%e4^sp!&@R?7_&W?xf;*U^1T9Y|^WDpuE*xIr=&NcmTkXlupJ62}(`ukl# zT%?lxn2Q=s!EVlmEz-%-tk;Q`sIWqE z)a4G<(8s;$ig+zlwX~`Vz-46Ca6&{lWh0C6f}ATU*-mrV%Ahy0Q*9BwqhH*SS~~9a z&lZ39pz36XyXtreeq4~hl&rxLJ@@7m!CZF)%otW(Ih|bxjtzFGA+8^HD zL_KpsS{!}NnxXsh=8CT|E~;j{_0PXp0EcAd`=5T^sW0ilHGL}(INgtAi-3PqQ31~~ zxPJG7ooZrgs=^yoW|fI(Qg-L#?I)1b7y4z#Xw2(x;TQ1qs)SK2@;C*10$FQo>n%e1 z*jE}1{i8VrF5)7D9~CfH3Rv@0v)5;VF!CkPu)FYxVanI1!(m-R|HRppp7$`B_=b`b z(*v(P$(AgKN`FrZ7}g7TFBm(SFG6Jiuz6Ro#jSzAl9=wC&3MCmjfg0%@VP@q9?l+x zHAxZ->X*YkTYG7wR#q`teLVE&?UzU+8 z^Xi^5ywIPT+)?B-NtjgM0wI*Ir@y~@cDCg&)=l}S{GhB;G?OnbO>JdkwZfPB1WI?~oo-;d7%&#L05qqJL9zZB@`sUR6j|m3>ZV;uy3j-6G z6bkl$)@W_kGcQw}Kznb;cSD=za%&%wSgZcje-I|hiF%UfzrVB1lV!0O+@wp?o}ktn z5$GiIPNSQZhe!gGAk6#4DOm=pZ48cPz=h!Zm}sJveO+LkeK-i ziBTU$yjb`#M0Acg?K3 zUFc>iV^Eedp)e*!XJ-yi&_W~?*`YagnWLC|ZKRKdC3Chm)s zLM%e<=f6Zvg1$v7fk)T$+D3p%$HMY~*9n1Kmy+CRv)Pq)g20S1s0!zW{m|MC+Y^uj z^%vFM9(prdyAu`9V&1*G3)44v6kwD@3ZnTK00)<$SBD{yjQc>N+Zqo{K|Uo_(Yhnc zNyu-F1w4xmIa0*Ic`izjhXq4JBuI|}=7bU?%3WygZ(J*0z%BqMPG4!W_u>DL7qU@pK9cfmRfSLaXNGtA(A7``5v>2MdwUueATCQ~zCx-coH;r`NyI zeiIvcdH>jO#fq-xe?4aXzew;7T{`QlyNNdE+)WEK?2zD{VAdmfy(a?p&wM(n{vm{q zcF+X-Ysf!pj8Ei?bCUkpILXsuE76F%ZZ``&MLja2dV)mwyxEh8O8`M+%$=Ca^-Hcw^;<}7D%kZeyP|PEqoRjL^P_W;^(tr*Ejl-I>o5h(vK(sWXf&K|!y45>qAioAz4i zD@x3lU{!9Xsk5Vq@>J$2>M<7Q6b9b+u5R>0gEFd5D8*?UD*ckB$l~yM*_j8a_k=kaKTYKKf(r|Q(}e{g=Oa%p`IDWfgyIOn zMiNH}IO~4aJfKz_=vsKxm^h$Y>9K=i2N!;_iJ{EAVt_8qnv}~VMVNo-_cA&f#hN!S zhdG;WGiD>yr{2r-h37*~!OWH@w)*&LwA)l?{vxqgIp259D8CH@?u}Bo#1)<2YDf;!j)9efTqkQG$Xth+0R)ji0D6Fls9NfG2Uw<4&~jISXDm`-@-=Ha z1o{}Sy&UvRRd%^BWA&Dslp)0-KDA!Nh2TxVR7=$V>d<*xP4e^mdzHVRc8`9;e^cSO z*xcAT+KIw}W@KGL^y7h{0p=qc*5nYaaqJH!-}n{GbFqprea)unorLu_cB77Rfr$vS zbjrZH=6Uti_>ymlqX9$C$YN5Y;I7q+xtiqPABmDno*1Y!c}HiIK@p31d_hLX45-gt zR&x0NE=xu?f#0#6Nn^UZjA5!0Nr?gE?q8`p2R?hI6vfUgNhS1{I_(eco zsgaqdAYZJgmiiHT-vB3&`IpII`i$ZLd;hozoqZZvG(Dk0=oNL5Rz_B~LPe6jE32)ctRfN> zLfILG$VjEkL}WIM$nSVwpZojA?|NL1$93Pu`~7-7$2pGkIFEAlL8`|HGaWpZ0QYc`g?9A4*8_o+t2Ms<&+%C0b1Pa2Va9yM!0AJv6~a{c=ePfK_% zSaEU>SvBS5rRU~GJbKEs_dXZTHyf6T$q0quFY@pEg)>(^->P_=efN5X4|azXzi)Qw z5Dv=iD?0LL0{VFl$}o09SK`TzKO&WCSK@2f^GaiyK0D{G$L=kq;8 zVT0Hv%(K%p> z_=k#fJ3!1ha5_&buI*rd?cggR%b%qwnzigeW)-Txq_;LU$rRMLE;FrPXFA{#A~ki@ zG^5ChI=^J9C#K$|rnZ*a1>yh!Jd{`(x$(#Qa~d6}+7GpF`f9wH+xX3hah?+EbJdTQ z64l)z=@oZ4hQ3GD;6GmnKYYvBEq2+#+h~`+T^SKYi2vnRzj~LU2K2CPL!ESRrT>gt z8UQLwK1`!V@-XYD7b+|Cy??yPUj3T>MQgg+^3dpY22H#}AxBtqQ6~B8Q)V+Gdv;(> zw))#Fg@^gpnkA)+bcIyL0!l?E>8>*E? zX^dZm4Y0L1g&pV-{U)!L6rfqwMQ#lwAl*S~{M$Ek?9h*iNO+$|J=7*PtdIb3wq?Lu;rMek@ zPlc0!=m$r?e`SfKbWb z$M=V|Yi*%p~)(2SVmO?yXqp`oEw^yAz z@h|rv=BqD!yX{?GPk-UblP3)9?2YK-RtQ-R_>Ur#3Qq5pjRza5R{vc@gX)9f)6!p_ z0uIJx;XyDFOV7=3xq0wof(?N2`BEb|%m}UZb@HOTppDiNsTl{Q2dkph{n@+!CSLI0 z_b1gb>WQntku~{zuh#F|ogl?6zK=QP(%z4P>#a+?99R@&bUtOOPfqM?n`YMx&VINl zH@kE3<>O|ho;xFF`$kWH4Z8ccQgc2^WSs9&H(RZa+?iKj_XmpT*lmzh+(ozJ=zZ$4 zX>pPUIAMW53NR8NR@;c!99XcgC>3Gvkb*VJg9E;2RUq{?uWh=}CsKH7O0Bpgy5c*!< zTc08Khw(Gf5VR}F(J`>LF=KMenu{{QGM0E>}R`}rLl?*4l zG>j;e&dRLQ=JYM)x)$G+wA=V9S6$QNT=}s_7aUTrDQIQN<$1dHWsMGocnM6^hg47Y zh$bi7QuuvM;wSq!8&-#n?N$!+_Sxw7h%A2|)K%^Np?P z8HeVTdB65Dc;Pk;4i1ue-^ z-mU1rxB79PE%I${w|*dD1mM6*VoKAe7ncS0%EP&Qw` zlp~!{@G1MZ%NH~5w1?}zdnc`cXQ7%2)X?AZir_1K5sj1GwvHOlLAr ztRV{|lB?A7?dGjp5?i*gv5Tm=71>0{g3&D?AP}%HBmV2xuMdfxS*Rw*f;T!)f01{f z-O8ZW^{!@UVol_ly5?r0SmU62Aa8v;*lQ407{aANw{ujk$Cq=B95po~3UNlQHIZk& zFKm<&#B-3BUhX(JA*vT0-F`Y&VEq4a0ZfiE30&xK+bT)5ZYN{WHJ&}H?u>B^JuB?T z%^jq-G4H)}#4;j&`A}Ante%d!IF^Ab$m;OB{kugfc5TWyvu8I;w@HJ2zQ3J~!NsngBA-}rmy2?*k zl8TDlBv>~xVma;SCpewJVf^mhyRq&(ULtOqxre=ESn=j_@BR$TD?aUeWkzbt4d&qB zV8ok4=h&(XL%~N) zx_K*GA`Tr}cRixnDcRx>TVK^btJj7_;T*9vbcPB=H`Wy_Iex?{-rC#IZhST7!BBP# zgPo_J?QIl^EUm00wr#uYAG?9o95>}fOVIieZV82Kis8=afYm3Zv52Smwwd%6sGfd=BV6WwSf}&=Lh4N zGiOv(RnY+)KdBO>7*|**kIBhI=I3SL?+fIQWFWhWzJ($5e0l8|JcknbfR9Rk;MPmF zFZ{JJO%KEudsQZJ*4mMi3a)R3+&nxwVzOJqn9^2Vj~{21l#~PwO;T0$Hueb=>#1;bvVJ#?CTUKfqN4iWXD463exg{?ou8loLsh7zkdTm=^y2Z(EsPEq(FKJ< z3y~X&JKybn*mG{uYSg>N`}E9!Ho^;x7r8pQ>Hl5;A_AOfjv*qu!m-Kv&Mq!7$P&^@ zOH(E{1<7I19J+438r`;`?=uGKD1NOHL8I6#!tO-hsFh#ja}Zh-rnAiET2T^hceh< z8gA^vhteC2QnV)C-{M@SwW{F%!;;XgH=937+c_FFU0PN)2x2wK^s$Nw4=~7sh(+v= z{CfcHX89$Qxj%pOG5om@l|bTn#Nfg=$;i}RlHB9gmg}NNXDo+RlJ^@og-R5&s78>z zDx3IkWw&-QB0PkqSyi(0pZP?H-&+}J+S77yuqs_Mhz0zVsdh={zBn; zbnO=CM5}>QgxnbM4&>u`v15b_*4Nhuo92j(NO1g6=rGpxaIKQ=f_{EJMMhd0vuad% zM%imOToD^4qr60?+=CC^?fu$=wJOgz2%8a&oL_ zT^pDU@Y*BsHSgE&-wJ1@YzVfCNeNz!;$L6j=N#~WimZ>rVZ115-SK1um!!J7Fgk@C za3}13Pd^}su&Dnavlg8(4J^zMiX){bN0|O+9><+gJ}}s~FY?9>UUDEwvz$ukKQ)bz zhR&z~>) z6FU=)TTIl)$PEGB*X%Dex2nQYC+`(Iu!oI}4KyAdv|9WZQ0O-CLNP$+;6Zk<-x-*h zGY8Mq&bzeTO@L*BzEUhqmN=Tz8yXrm$;m~hr116nR`TCy-i;7b-e*9ooWalvRH8P= zZ!I@TZGC@_mxCMam>{{LlKSyvBKM^`cd+w1l?ITT!TAW78KA;wCbDy9aQ^GC>A+wc zu};aUsjSk{(v@36C}KE}NDwfjdo31pF(Z~;v=-Bp;KJZa>2SLQ&m%fUNNj9uTph)@ zHu>7MdjIu0l{ttSEivd1iwgo62^=e%!RStZ^vEca^84zQD|Pdo{g6Nz7#gC+m!BT* zI*&!}l2%sc!C91-mk-O%-q`DFAeTc~VuK+e_%UBh4Vb`}Zr^Su-{6LtpIdK<+$5{s z!rizyhLC@YuC%nYVJ;^_Z9PBG{>G0Q?A==n21|oKx6U|3%rBzSl!h}46B~hF=)Lsc zr(<+#Nj=Qr%4IH3A! z`7%Hu!HTO=8ABs~O?|ojhctCv%XgGI-OJ5XrN+~;!REmQ z>mELQg)FrPW{l3v&ek9td#EZenns<&xiVA7u7c^K2kxq!$BI0K!P$l0q#AevG8`mt zG^&6E#c-Tp%6aW^f46X!=`;Kfe6_w{@)aJ>FXet8%1&(rL4wHrAnOUl$6f+O;^55B zpOLV4{!^b{<1CCTLR0F)hY!^a4fMDvL(Fh3$ypAjUj68E$S(&!2t?>Vuo7TW7 zNG6bZCm2*f_FYAY?RfhVt45V~#Bv`Y!=s{RaPVNk{3(Iq;Nk4~yLLU4#f1eib@kL; zRhM|&QG4<^d9oHfBDcLe@4*+ICte@kx3H*4&)ArQ82oDpzVOzwWA7x<0H8Qgw&TQS z1`7)d(g%V-iGh{%`1_yFd0NZ*+N-N6m>SjUU%9kRK_MJfP3)D~^r&LP--fN8>dkN7 z!1cU3BdH}NCFSGm8-DHDx`cnpJfi@M3ZFl31ib0s=?Od1p@k!sPMlv~p9m(rZO7Mt z#4_d6v+z;YtXV@|7$>jQzpu^CJU>5QTw3}({IW%G`qvwI7@Md@{GB^O0xRNjZvnX4BaBm3RRs;^=A_D3K_9-5 zvZ(q-Y)LGxP)%dwn(NoEV=J@hMUr9}*KQa<^boN0%l(lf$G@`N4@v$Y`6hmoQ`Q-afyi_m#W|TDwL3q>L=Mh z2rd7a?kAyeF1cG9Hf;D`U?8?$&xvt4F2tj?y1t$clgNoW`M(MmR6BQ^k$;H>%=q~DH1suK=^QH0c1BYWvsjWf5bt#QCve=}zVd#d@ z)6qo-?%#=?>N-Ls%*>?0hi;OV*0!?~g|m3-* zNsG1fFuq&P^S;NmCiu`HE+Zo&v5gzoz|k(_N(THM@8-1|tx8($Keh5VxZvR9`RRVr z5`Yjc9I2PAtSrL4Se#HketvCq3uF0k3i(~*B1M8dn)3X0cW_qJrOWQeHji3bz@|_|?5`BvwO%EN4Ajkv2$^Uc2v=j~w z4hlk@lv}rkh`cK&r-Lfjq6mw-IXbEAachSsCtG3QJc^I^D(ma%MWJ4HAo3n_n1Zsh z%eL*?Z=$!;1$r(llyBqRdBqwfkL|SW%D2kNCA{nEI)-@sCK{X{q@{(G0lvz7`0%xx zr{{Kw5B3OF?_XN<_~E4Z?e9~( zf&T8@BY8=l(#UD}K%4o7gW|PEJj#JDX7=Y#r>N*?ers#%n8slQn2OXgVU~ry!}>9G zgYtLp-o?+6>)gmED7YO?&IpG!@-B1QryG+Xgro`di~k~@D@MDizAq? zm1n6#-6)Eai^~z$8F5cXTn@$Y{O#L9?Ck6$;@3q9B=f<8hUJCfBqJnHJ4CY) zXH!+<4tAGLTQjG@V#rfGY=qywdE-ctM-27&w)$iJ)sip*IED zrbkj|dwY(_La<@*T#zS863r(6;L_4lGIMi3rFr$=+N`NrHLkqn#^3thok9nSofLnK zTJa*0)W>Zazk@ws2@|Lf!h7czj9P2jB?QNNtD7IklJV*4){(a)F}~#H&DO)uj2Hx= ze8A5ZTZf1j!Mox%Z0^^wu_hmtJ5~ikYimo!#@zG|97t5yv7?}8_|&OW{CoE7c>uQ? zUsQCu`$l<8$j#MH!AQ215?7`*q0RDJFSQt8@ zc&BJjrxCYYa%8t*)_cR7H{*FpPS)uDN8->5VB(o@G)Dt;-3NVCAU@IiF;V9hMM>NK z9k%X?b2AT7WkH%S*!=!k*6;CmnQMTKgd=WpfTXfr@baT0&kA8RNG6PU!(PMI-u;&i z7L%gkSF#k87`iGgBja)PClVr~+sdqn` z7%zdIUxCJSRnqG>Z>TAimdkyXnikl13}<8nh(QU1#1QpD|GC*C$M^FIY~JG(qAxgX z3CoM=B>D3@KMqYzv0g(&f|4409R;H(* zP4Pv;Pk?J{)YU7zca4Vnst93JDP^H+!{_SiG>@O}L(iDsMyEVw039bviL*0B@6V-FM9VU$;_cu2^A!)3zO{9?w6MSgf|U0- zaB_!t-=jY%p=U-te+aRv>mLs7(+pZLHZv$T1zdovCjyCS*8TekSZKNHm$>Ph)vsZw#J44UDK4^L25@J?UCzwPynC)%%Gz)$z+9 zX0`*5k5;@n{k~4C$@V^~@|K0zQ;CoIt^s(I`bMuCx!mIJ|@yYMnwaW;ZR(_J~1@D8wUv0t){Ivq`sOG^fLV$HUi7 zwe7~|s?d8+3JdMQW#mJbB(XE!d!KI|9v`>L495+ldIq5%1{#_BDnCQ}wB-%y}!J$Kk;xXC|?z1_t>f6?e zX}^&Zk9zqZ{L}1NvMll9@2og?CdPqp18M{cii*c@+WBz*T$UGRjZUBbb!ODw-G)bL$vaKs`|aqIPs3&Rlh7`z|Rl>I$!c6z!mbFiST^Cbf&x3I1DfJq?ns4C^j9W;7);8a1c2Hjk#&GH|D$iuyqiVa z7|4#6kns)H?-sS}pKb+yzl89XexXGKm~EDb21b zL|#!*4>$#k4;3Zn;ln{>A&BlG($gI>{cI$&q)EsIlGmoKTU}L|Ji*UNfBcwDJpKA& z+X{l%w7k5Fz{25y7*a4^jG2VpNKQ|DF0J8BjOzmnE5n65`@is62T!<`8_C@b%#(EQ)8hitm?3Do*OO0Ihw;yQJae zVl>WC$1{m(C_rFcEntSwr1^H8-gLAw=ruqSf)11K2ZcEL=1u+t+DkmHz>7%!LBs?7 zuew+uEC+j@=Xv362R>xz_JC6fK@SI#VgOWRX0VQP-MVN%4o>(*95LBU>bNAbY6tJC zX_Et)olU@q5=qVokgvs22m*8vh71arEV&Kq04yPz9R4rcboVc?tD*;p1>f>9@lY#* zz?p?vryrlouD~l&K3AOM#YV&c>=IEAi~=q8neh1{iYaCGCfw@~P9e$8&Yx$*WTv-x zy%~t<4Z$3soE=MU4l^@Bsn-Et!m(yRt0gcCa}=UbdC^5yk8iyH@}&^8Q&YCPvJv7C z{O+4n$iYA(DFPcq_{EO%wGypKSW%Qd=y7GpJPKr&5Kv9mdVW?)E)w$u@pI9*w0r^r zz((#7LqI{rg;_~S35y*+xv5nKO%-rH;RJOu$rVxy;D9CaId#fOmC3zcX7lDJz?VsE zf)<5(V9?@{l6C&F9TCPEfhY_P)eGp;L;2X4T&xTaD^Hyx4nZyI4&P{=?2~r zb~bl+{_3LI_%K4ulUyBE(J?WOGXph9zH8B1PLjhC$xA#Jn9|JcQPl!4UJFO6;P{FG z_l3F3>_EpHIY;E%k!%7WhW;v$K8XQnnuSx*&@;5$1yqfYHi)%~Z`#Cw=n3cLB#sS0YqHCUP%FnEV?GXS5us`9uG0+w{B za;HfVY-Pz0RPs0+xr4B3OsuHx!iYmIMO5CSYtt>RJ(l>~=z#%yBx3ex%mty6Jclu$ znMP!aw2lJg+fbvmel_F`Y==lMDBoM3;DN-ey|XhC8S7n*;0q94#xa<=biGsXilPk4 zBSbR^=^+rQJphvdJ*6YB^;eV#Lwque)zCFFTZemvV6Xw}18kDqcH4=v6I5wXfTWo7_nsc<--4$^$Aaz`a*qa z=jPd9s2*YfC#K;2{;hfK+O@)G&mv(%0K!|g>R81BB=|Mi%L|CV6agfz-@^s*{wO9f zDl$S4U&n$R7)w)!c*%XQkV7kQ8De`0SL@fYoM~=L<+G1yChmOnI`ugZ%mMlLIN5b;+$ss27K|RL}-$MT%7eG(X z9=FM>Q6eb54!Ns8Lq`y)O5WdhBLnsv4Fh`r*)%=cF5GsMjiLYvfaWNo8^Z!1g_8s5 zPArTS_9F(;+Tjj-q%{aJ2kHOp-=APzC?IS*~2Hx22Pey!qDJF?f^+60608zCKnUSS?Ev~HH6AZdsVI0V2IokN6`3`&3_;1xynD~b|>uMxMCfFhF6^cj?j=VLry7BU* z8a}S}!v{VD?DcTdq;6N@bjh3?CMgLCYG}B9esP8!<&`l6#{ZR$che@rM?kG?0Qn&* z@-4S_928T|UEjJ{O-%^aCcM0S7m$_Y98xTWGN0R=~bOT*<40}Di)k^c%ErEEAP@Eh+J`9kEvu3o)5{!-`d@T?Ci|8a4O zDCZfiG=O#OV2u%SS(xV(q}k-$!QYy8!pg&D4&o(M z&d#mJ{n-1Z%;4Et5J3P$C5mb$!q7PZ(=qc@W>_?bS=@t4Tk8DJQ&d@bdDc#EuycH9 zdhV7TO*(%*JT`U>IR`+EU3P99^acQ28%9Ng+ZT@C97c#!%rh6INGB6!kni&L&UrL? z)-^RT06-=&9wg5{Chn$ffsLN|`g-lv>(`004qoX*pGsJnsR;Ck?-T<osJC6>$6PcbDKx9NTPmKZx` zvE#=la}W3cI2!A#P{rWA9(~O=bd?NFPTCf?aggx=BKTC;w>_zy%#2hTegPBsK@8xy zh^sgV@9qFx11yf)#EG#TDz&&%7T#!Ez(g?cf1%qC``l0i(xusy~3W!Rdhqis?z9#;Bp;L5A z!Bpspk@cUnv=6adIOJ%Y%&oiTtl?>ojnROE|EY$ZqMH#A3n1~@ zsLlhEr-7Tmv{+VbVLEvsx-eqWT9LvkfB_fhYstM4^ zBcUH+;@kIMaERZl2>i`->RXF8vR9xj4lXVY{_AVAQE(=<2`O0m)2CeIWlDV|!Ty;D zV!P2eu-ijfPwoWfihT*N&x&b<zAMEIaG(H%ZIr?mfZjxP9)Dttk`5O;RH2gv=Zvi&dz~IOZg7qxUS1Ja+qF zRVcvqc4X_&t#Ww(=!goMU?eLK-^2b{D4BAgc7zN8Df#TTR>4=VUbWLP9?EOGZOglE zodbk|;GCSnTS)VMoTUvv5(E25Vm!p?2ydgGKi@%yFd{S&D?c-x2*2addP@lm1}#L( zum}ds1%{Cib3kt!X$DEF;8w@)erX>Tn&m5(6Pis7OIMa?MC!9M!IV4XC^7cMgfn=T zj6)xL>bqx=)a4~N_N+PkTE8{L+9u}A&BeIAt-FtTupP~Eahgxy{r^)J`8w~b1G@)` z0iNW$9Da>+zs5LWHGnpl{0;%l5|k2Z*yo6{va?t2+?dHm*#$RhpyU3&|HLvN!3;hJ zjwA;~ky@`(7c3fV5GMym7;-|wIJB~|f@P&%4~hUtDpA0ckyhT$|FYHH7RWe4b=*)O z!*|{vuTDtezH{z+7)Y-ME0(mor4{0Thm3+Muov|K96kInQUw-v_M}|8vM9x3!X_y#t;YNo zL|NfgRhqlbOttvS)~@Lbm5n(Vz!Ef7r8D#~xScD*3IYVus*S-t*%#vlM7ej^v#y!n^4lKD?IL!I+qa z9jUQ1*%$;z&J?I1gv$)3j;?2|!9{&K4tLY({;Eb;P?BnrnXV7&FS(!@nRWB#&3gL! zwI4s;)Q>bsy}QxX>D<^UTJ?lj5P4gEmD|6L6B+xM{?GGupYu0K32ElG%g;D2U5#vF z-)ZpIlD2R4%(=Bg4BAFIEEz+N96W2+j4#Q~@%c3GH92A)GuLs*)cjvTebME3+SAuY z6by73gS)S`+vzgGJiI@*WUH^um>5;nAUf~JapPK2`mQ&Y+n;QG@atWaG0h*_o!}_| zBeZs94rGgs1cq1xO>{tlzcscDdf&@_u~Su57j$MMDT-ebW{AhTLOeI(d1SBQtb)pf zdqD6`ID87SfDaf`M0BE%XjZqiv4hj-j8sm1s&XYP-{P(~h~{6+MxJJB{;QX) zO~U7vgRYM7rah7uy)UYnA+dv3YG)Zji`W1`PL_@nNusKG_IkW#EYpvuecx^6T~dpw zDZU~7;glp-&(Q~IvANVj8-CTl6y4R6mtqBO4XWG5-P@M8lPWb!B&osI)PyTP;}$!3 zPXi-hS^xd}>%EM&2twY0ZHI3+sKDL3WrTt$v`Xy8P!spA|DEP|KsglvXZ0d`@5a>(VCKJgu3ZdN%P!I%9d=xorCOj^V_!#WD*35pJQWV_FxL7U{WfHx$we~knsaE zl;9E2+Y0j+=9P!FEByAuj$AAHRI?|}<6`k+8CIjiWiJm(N`)u&5B+#Xd)3TGJo7VK z$M~(}p7yY)%ZIid|PbZbHxU>G=$hr5>q7EG7%h{Vnp^&QYcLC^s_7eb;@R8kuJ z{{1|%;HSbDlf89tOO-*YswXueFi`>f2&nr1KzASw;t|?vk`JS?7+H!vK7bI6qAa}} zjBXu0YPhq+=0cc-CHK;4(}><8alX!cr_`jR<~On43xdnD14`R|UKG#HW#X4R-sI`7 zYntmj|HobCD?vhquK!~FYK zk8cK!bsT*>{x8%!j49~WALX-i$B+i-AQLjQTkFnGrVk>zK}=7BXyn1viKuB4oWn1u zme2uofS(5-$`3p&GBJ^@_ldz~iz7$Q6XqQFM1(W>Z(*1n+7|EvS&X!5fy^(`_E0Dm z!!;?IM$g}Vx1H~MP$TzC6zu<~taz5Mr~TfQVMSi@_W2k_V*9X^?N~K7Vc%1>W|squPRcwG9*4cb7cpMwt9FL|3d6;4ep4C zyaWZ1wgaT^u6fsP-%I&j#c^KWTIj7+IwbUQjJrC@K3VB-89zm^twl%{`t0<*KU2y;z`=O4X#>` zb=MS4^sYJSF}^UAA*Nw=`ATGb=uxpzTUpbuQcwJYL}j;)u_;`-MaO~wZT`j9!P8UT zo0|6yq&=j1DHK`{92H(58abyUKttJ=OdWSRIy;FgURYXk9HFG zhRh`epaDxl1q~|_w>vr}yolT>zUqhY5}a!8ZQHhyrc@A^tNqu{CGgR!_>4;ZSPi0O z-R&-zvzw3%VNRr`434{-@nM~j(H^(AlCuoBvc;oD}f0ht>Dzt zs|=O2)v@Bj6~Bnkm&CShmtTIguG_Kg1 zUZDGThI3AW(|`D{edkr};*zLDbYaXLX7?T4FT<@=+#WJOF^~!FeUP^NIHQ#B`>NWW zKY4sP(gu#xM(ReBd1ez%G^>Hk6Sg5^Y4;;Ln-ktycizVRV*Mk1^KgKeWJ#g)5 z3=RKTuId};;-p($#mIUB9K%i#esr!`p1r3hGY&{e<|XqV@W+HI37Y^mWCYUd&Lw&c z5`CRw>!+m%6)$mWj=%pga|3VSS98#3tZ#Euk%fM2x~{n4?k$wGj2u4EyK4IHZ|~r{ z-^+RQ*>BG;49em!C=WCo0&ZPBZ@WFl@q3PlY?jpi5o9Zt&s@|v**;B_@)TsrZNy+s*K&@v{49%pJL&A}|2iN?dgjb?HElo;QBY7Op$Fi7vO*5Wj=l1i)w#89omSLV zbWSjVv0saVGx%-ZB*jF@b)cv=aeq`WOP!d!JO@a?p!3$Fmk3(_`=fU>KV{nIaB@K4 zXh>82ES-~lx@+rx)dD@Ky3f;Wd!u)^*VwN=W;)UL>uQ(jQRA?y=jM%*|A;)+k962k zc`$)zXnNaEg~@qOm*P=mmgD2Cwb>+IXdj4@;+`dd-mOUs3aEI@qc z2tb^G&u9ZVPv#9_vfP8M53&r2L@X%Y#4v;W3BhU7H%~UN3rk)9@W0_=VWQ2 z7d2XTOkZd!yx=$-S`}mQebHF(1SR@Fs^#8K<=n2Wl8f*67d_mh+n%AIbj!$eF2K@X zO(=G{(Z2t)gXOk}TarcVQB*bCA5F1G2HEv7A$fHV9?kRn_wU3oq8Py(<3K2qpcieD zHpw&ZSRi#F4C@m|^#=uz?ACL)wHIC}8qK|A!fGs>daYtm@=)W`ImA{jE56L@j;~Ks z3Q>FYUBV!9$lO$H)}L=~?UezSZM1ofzaKvN=JB(hLB%d8f?Vj44}CvqrOXI{dUSLR zK_~CuuK}45S?{kG7m`C^*Nd1yJS8bGa3s`_3sGPu$&{^a>ZIF$#S$VhgPNo~xVq>zF zSI2``95YnPbM>R4nc7Fss1$uPX@+>f85>4y}7}d zv+Jq`uLfca9po>Vnt2{XQ32XIK=m(Jgz08!(-n1`$X;8Ik69P>a`zTXJzkuebG4R zHv!xl#A{|JC#RP|i(K&2rpp|M4w6OO4}1m2gA^r6U!-X0GAGzHV;`PKk`(Of)mQ#{ z9eaQM__!7^M`_5)F0_0Q42slEW@mp%6;?jIxL4uz1XIzcy~B5YFL-!{pmVcjXAtwo zyA9f|wX{vSJ)ddf6qA&;LLdNgS0rhX>-SUpF;{fp-9A7ixKH2$KsW@1l@M z#al@8VLg!s^W#ogmYtM>^y2>GpQ_Sw9&u&*M~hVz396(m(9?s*VFPy1LSbK0N zZM(0;F;mr`L4&L^Q*K_#siECD`aJ_b=28h6 z7zab5IHWz|pjE>NA=TH~yLWuzAdmddpc8|Kj&aorKge)h6P??u$*Z~X-Yaca&~-QG z6{cKXws1CNbkx;Jla`AA>whILpHW68?-j#5^CtYqF0n!B)`sM*`@ly5g&%;u7%Iy6 z0x}>cB$T6r0SF0oQseL-Ff?MNLBD{0!v%~0l=;A*CtK^<%1DN@}PAZ zQHl~WVGN5mPyr(iR(KGCpMhGQ0F1Qe)vGA|=%zExuV06w=LD!or1J4k)oRjo z&%N&~*P=(<@TkRr@^OYAjq~~b{qOd8+kZbJC?KhymVaW$b*{@xo8G?F_iCbR5zL~o zyLhR43JuxeCNFdf{T($`4)Ql0s=YpqQZ7mCA|fIXmr!8=Np}FQr(Nha@ht5eMr!fZ zj&$8MU~$kB$sbnMXmk~z1Bew>4-_}iK^Zw27nKC6A~}iRF_1z6I-iozGD;2xPzr8e=jfCDA*RbSREAPgZiyl^t9!f)M^5S$|NV9P zlhf_j9Hlmi)ss7>kL>-h&9}@(NIj4{lAg6TO*@i|c!#YMM`I4I4z72H9=1D+2WgGj zmtjPbr2UMH@BoXOQ1QX)p%PG&>fgJ6KP%(|RKXTwrJZr7SBGv9V-u4a|CcU5 z_f^{#adJjfb?0~-&g6Q=tgzC~+T`JuF+5Q7gKj;mM0Pq=ak}o4+1kTVFAW~~_rG~k zp>RKGgKUuZu#@l3eT>$Lfb9{hYN4`)3I>Rk^`PIPVnJhca~qwG&adC`$q)2G>lhX< z;8N6qA&*u4@Z4K;Xn2@NYLixI1myowDkKIEqmqRt6B797U4(RmJgLRBy7k%rmrbA1gPbKkHP%U_g z8@@*+WLX8ZCQ=U|B}qt9a;#hDtST~im^8#f4+KVvWO#_;OWwFJGJd2RtB_`>`uAaN zuThNY3vZJF^##q4*ewkkII>FygU&bR>v_%4xy30247suRzFVCf$nDV%e{z;m(bqI?`P|^!&S<q^@fMeaC|`mBMhfJ z$_zh9*#T$Iile!o3{k{iragRk9&w2{T7D)2`lSZmS=*euNsayMzHsmBXPuPAKK;KU z>NyMUbgzybOVc>HUhRbb(9p#!kNL8;_Q*K-HF=RDPp>|9Cl(q&H#z9I#bV;(;lSd> z@I7!Oc^) zS&ieErpf<*T!2Y8*2@-$5>E&os4Kn4!`6~l+2`=|%%-XJBN;!7zH>Zq&&t;yY8B*o zy?0N`gPYy&15-n|Vt2-R4^>8T#*A-zmQpy2-5Lh}2E2C7cId#O;*JsTi-QaxLy-G8=#*;zG zXXERiJ2!5et8lLD*}mr6&H280zxv?$FR@5Le?R_Wlva~9w|l>sUxhN z%BgQG*SFdws-OEKhQ&fbCSc^61^7WS{AOKAAgf7O0Vp9mJ~vR;0+_C`G$u*F?agF;x52t_u|r?iWfD<%U9365F^i-3w{J0=N?<;BCU@eTKp& zIl*KA=FpI1fQv!qySOh~_@!Ypmq#^MwM|So`$AWPL1vTypAqJ90aTjwLk~}QYE~V) zHt2SM?r4Rziqn^=B1a)jJGOZT1s2WVsNUon<9%5Ho3|V>w%e&8wBZnA-K({3Y@{onWHDAAdtL^RWmEWcq zwBUnACz}<$F4t7FOoQK{L55Ur;$|Yz0>+G}V2nq>_XTiXib_gqo0})f?apZY9 zA)=4gJrCVb4LS<;h6*iZTF`4iQh5;y4ED(Al25tlWE_o^)Y8*GAaO|yIlK4ES$a$# zNCJ^Z?RVEo6qxkukDQnYsYcu)p?9ntVYT%f9{VLgi;YGMW*cV7Qyast*7X9)uEw= ze}gfqo7Iq1jBjYrhe`l7q6su=sKHJUZuk>xDM?W9FJN9B8*&+vSc7a#vTqC(ABOH5 znoQiYok*)6CI*4KZ~x}1G={^iC{^{n_eoUTNx|QR4 z$>6?EJYUqRaI4Pl!+Gf^yd7ekB_n70!cB9Edv>J$g>?Z?ab|z$==a}&Bw7R>LQE5G zKcP9HxOheguR(En_Q$ifAvi>Eyle3>9H>w}?++0~2LZ{w zp_>Yw6&J-FJ1*jwGeY!_>v) zuzI3iHgoH3X6N^rj(uB6J2yB}uiz@nKR^Y8AVD4E;3;7!jiJ|+wN1}M zQGke(peEjktL@ z0u(Q-$^$}~<8wpAN?D8q9ryH{aGt-^Jo3Ll8=)p$@998w>eX3c7q+>Yb3gY8jDL6j z9cNk0PjzKO-F2#$2Rn024f+@4`fdwK@=1NZYZG_Gb2zzT{3vbt2b(`1P|+p@nURr^ zRDFri#Sz|L6Fuzrg60@2m*@7GT39%d(~BYu$WDygAMe<$XOE{og;1Z!$B=UfY?OdJ zS%;+Q*+t^fwQLWv4bidkUL#JTGHlZHod1q88#bul5IP|2ZOl^mqeDgkXLz?Ay_v)@ zzprgktxQ^ERn;smnQoq%>uvCto^>J`)Ek}GZL_=Und5$X*TV9OwX7h2RA|5Om+vX4 z)H-0)305n0!*p`<2yV3&7@??15^k~DOf?OlxtAk7I(q8NG#nSqFb=(iWLohb-w8Ou6Z7{4-+0@~!e(WuZ{pL% z(J^-(;h@(tZym^g-bd}@ur0D#0sLGvJ^1w4p7A}4_2jA=k{TaI-+35X^MxQaqIdkb z9J;iV2m?t;X&nJFKukmOF!O5d!2t~7&~b9g{2AO6XZr#3(V4%IDgBrugkyLCJ5tKhi_ttyy!_4R>o^_w^E()wP^ii$RNYbv#JaK&Y z_>O-23A+tLQ`(g=OZzmXhB`$<^&20v8{65jr%rw9q5r?h@>3^cbR;C`#U#odaFs}D zg-k9(Yre$x?Q7A>O;~Znok7*X9#JIZ5b#{%E&>Vkd+#5bdGX{S<6(^t39i$r7e*iK z=Kr(u)spAmQw0&ui@drrH|z!xh|^!1#fU87F>o)gAW=a63iwgCKi6Ok=~>lRFVj?7Sky z>1XKw+cqxlal(}=Yo{vzp7@MpgR&)_3ki|nUGc!7^BavVEY<*8V}UFxdWUo{_5v?Y zaQ#a!vTqlw=JRPJnn%oZeoC*YIdy-APs!@l+oqI{iwhYl8X|kzZa4)MZVY37mUS>8 zLE9sr3(R!Lp#U^rD=oZblHe5ltu0Z^k*imei$eT{zxdh9~$;VWm5C&3Rx zWa|H`>bt|Ke&6_yy~i;kg&ZR#GBQHuF+#G5Y?&ElhKM+h{gIRr9V3qH9YP2V>*R#2 z>~ZYs*z|ke`u?up^*fh8T(0weJn!eZpZk8@uX{Wr%qY&dgs&@k?zJAZoM)#m0}Cwn zv{6C%{kOC`A&)?s1UEw&cV&$@1uSUv&W8~k*LEYl3KHMK`2_t5dldG3IS&M<0HB=( zU1+rR^ge<#CV<4~VUY&coIWu_cVc*Xc{$Ymbm>|$>zEuSJ<*pzWg5NFw7e-A+QV?B zN{T>JIxzuTexTSu8mMtMQ{bM{6Omb8kuHw}bg&>e6q3Q9yc88}e)Vl>={52^Q1TMG z)lLO6^lhJ*{;~O;?@8HNAJAR*ECtEiCV-dVO9B zUncb4z3YQ$(=j;rZGfgu^fKFsZ2s#T92P=(8|~o8wV*(F1~Mq1$p@D^H#awEhXS>$ z0|~*P{4x@{1gfsC4hG}pv(&ck@LQ)ZJHFdd#MjnO+ljN^` zVnoj0^owmgj^5R=ypzobBT+#&bN8FdpueAi6V-r#BHL1Q-aw`uG{|u7%M=_nv$cg> z3DAK6@J!qgj|V&=kOdEEa~Mclj{)g}YU=7BCE9s!6O`-#-w6HSW`jmpB1>!1@P8*> z&tl(a3c32r3$Lvq*AjwRG8#!~qe-uuf-JybH&yHKi|3ro@bRN(;Y1ne|BrkC?Z?C? zcL2VlCj!g~9IshW?(wO=U*-q`+;0gTWRoEF1==A=CY%S$0kv=eyc87f1u9{+njFdp z04#wbxVjx!raft1bBl3$UagtmXZNLbU{8t@i3^N;O;@7J>SkJeQz}x~1By8OACSXw zL;wyEO*BB01Gh&NRBRP_GB7ZJ{tK$W)dsH&BDYUL?<6RWkeHZ=nrmrk0ic%*$QKB+ zaD^R$SPP)hB!8^$mmEMG73fy74a}zl@VOxtL;`qNB}q2y_{+GTUG>I&diIp8jYnqn z6QcTB)?AHodieN&r1AFA!{?Tk{B1i-B%g3xf<#dR5C8wNID1}FAn`TexWOk8;9Ns> za-h7V$Ppo7dFD1u9Zqy}f&YzN_+?QG+^uf;L4xj>$JDi^QD$N?hH$MoBgzEgx9Zu?^pF z94`NaY`MUR_Ww6Yt2=xZ^>+<^*VaUt6=~HO2?oi4zTBG76k|cBBRsQ%SW#giXaz%& z9R|TF;KFtm&>I!Aflxk(w}>ABEd-?i12u>G3I1ljUJRxLL>xxnwn`vujd@he)Q#A5 zw(L&DxBWfd1d#=Ho+6X>&pLcq(@WzQOt(4-#3oMtga>|$0sm!X2?y!Rl9G}O zoQ+^+LqkL2Z^1Vx6E%lc>coaYbS28Wb)OP&#a-a0qG+J-)R{1?buGrQw_b*-c?N#+ z9JsVkY_#}VEO}l0X4#lBJ{Il81~iBv(>&-G2GV|jG4pG@t))c;N;M2Xt`OL-_DxW6 z02tnUke>^9*uhm|C1@5j5is(7A2tn}4{ZEG!~CHiau3k|_?Ok*Pn9ZUz*R-VlluoFI3cfp0IFAu-t@t_L? zDY)NH#n^brbRgsbQW(5{_~RqP5B$`_S9@94h6aa8SVM-gnx9x4=`_4YITabS)ymHDu`Eo%?fOz6yTC6_#9 zzb{YY+R4J-T}k;bbTiwd;dWA?ECpxGD!EwV!)~|4?^K7Hgc5g5ZEF28i(&xH{C>Op z9Efh9>~+@Sc};Hptu!+LOBb7x2)o3(Ljo(!@1Y@>J=L-e_>0O{fWhkdi)eRwAZUtNaVRJC zW1+1|HZ?<~LaN+*?OuSs!HB8akO5ia?>Ef3rU9))CYV!Fxq*3_!`4-Q@v5q}s{c1y zV)RDCZg1D_ca3#->mn{VE^$`W2Z{9S6jM6<-QyxNFIj9aVd`iP)*FUd!IP-A2ujaX zAC6B-(7`Z@kFi7bi&9!C#1C5~t&7B*x!KvO;6yHYQ;}yq*QW-d=_W?ogv{J<6xviQ zri8YE1Dbo2JqOu;mgD=m{w*mK7j|Q8BVy~gb(BMSz8JTujEp5LJ`Z{xesXYccQF>Yj@WRL5dP?8pa8#ze;h=29;knDks3Nyp=-tRio1 zhEOiO&ZN;6sN|AkQ5vYwI5-lAaSZ=#l%uHY9ftG0{u}q}_TR*ByRS90Olh2C7%QoH zDceR`qwjru1AnPpKI6;31cPn5C?VV1c#-}2sdivup?`_d_T$7?tHBlDEnLGw21c#! zT3VQw4CU9UlX*nnEv-jmU#2Wq)UL_G;W+kX;*%fwuczbh1=`vBVRDr`)@{yvzGw%% zS47U=5-{-zxU69i z*zq$CJzr*EGD1JODa1|;tVi`CX-(Bz(AkE!^SnINLqZfN-;L1{DC%_f~Nyy zzsJ-j*Ky@IMA2KEfmCY7g~9KPo^}3+0L>dWt*k|1c0S5lsyE6?8FM#w9BKhlhO7{L z^0_!uNV>M5_9Zwfo6m;PwZa9K=P_hvPNn7HD|hV0wU+734v0ed%*q7_+>b+w8Fx$K z<=4TVU1y5lbW}o_6cT=zg9xd%9sB2 z9`$b@72SJj(Ao_enNq|eBRQ4rN3RF8<{5*c^QJqe3pa!cR%pK~rXK3`2BQ@Agvw;$ z4e&;9L{}Hk+vhL>cTE}}N60iB9*j#NDL0pUhF(5^01fU|YL+&9WiHBbDY~KgR{U~I z;aISr@cwyb|FXIQYAOEr6m|U97sS(U!WZVQ7?~sSR?uUH4e4A{OwC5|d>Eky0AYZ#04zrb_4 z7fN30{u3DUj0D{@XmOWZSIU@vx7n{G3*fH1ni7RkMAj-P1fdMLmT0UM{jY~nC5hK( zzv+?_qj4v$S1v~uwpbScUtfA5r)Q0?RXol5K>J}6mdjssJ)Mox{hGx+qXX6wQZPIq8>>ocNu&;CfYC--9&c! zXcmPbiQFB7Xy8<-_i5V4ZS&=T{&`?xl9~*lzSSt*A+)k6w{V2)XZDw_NGuGlz@Imj z0BvJa-#a@dIV+J$!ArIVwn~!J3|(EX7?Q+rWDHBym2yJN&XTO^*Q+YIz%l5J|I^md z4kRY&^vOiM%U#y+sXe}%a${1C7^XE*c5I*0MI#rd`4xR zAW8ygUsU~$e!dzY7h+H6JNXSxwq-s0sJ2)dWr=ORQ5b!Q5!x?BXQxPDb#$Za)KJQa zv@Ce#4|CY>+Ue9xbNP{aj|-PevexF`XLE?(&66- zEE6dVFc>rCO{A&`dE=NpV4Tc0b=mPns7H?I1}UwG159QDy>=~^qO?5*)4JnB*?a3& zrPCe^ENxzE)ajYe2=7bynDXl3TA#24UVvq~t~uYp)`b4voecCOZoR`_Am|mnu0}btl^uKpG_B}8Vm^Zbzb&kcH46kw4 z%3Q<9C|DKrF9#S(gI`7HDkh{&#J?Di=XGSNpWR!HqPEEVT<${&d=rQ;L^hZtyr$bY zI~!jVuKmO;&`tAQgbkyG(q+!HB7N1NDKUo_Ki!px>bV}=&+liL65(P#2fucPDkW3r zwiP-x+>_7;5cP>@T9~pGXIU!AtlaTIVNUudI7hQ|3|e@TY!f8hS_ z=GDh*a@H14>tdv~b|yHJPMC3?ApQm>a#uM-r1Tv>Tg8Tsnz~0!u2;TzSMzUFzR*Rg zFTWn1hWUre)xHW?3<-b5bA*{hm7(VhemqJQIbDHk$Cn;_bKl}|2X zucB!`)cWu~=e|x~W+`eE=qK`=M}gAHnDxCxNIK?Z{qt|R|4id{7nA3kl#zbpr&`w2 z-oq;{nIo|SL=Yq0sz|1PQ+0RTtx)}3<%satnk*9%q4VG0-yjy`fnV|8rN%I&RK1*otY`~31s@!_1aP0 z;NiTuNn#?HkgRUecZ};#C1lW;{r6ts_Ilod`MHv)^Y7d21lF$THOrB~ab^FOCJkT+ zB`f(eyxe*{JM(Jo^~oGbY6UOJXlHj+#&Ni=v!Bx0d6npElFZibO3*w^)#a6<^~l;T zy2|LdkG1Pf{&gz5|C?17$4c_P;YrZmYChKNExmtZEZe!4$xn-$xVy9%$%2pB`_;M& zFW+FOYkailuwwD7{z zv%sxCKw^{!tY_B_yRpx;BGRi~td!Xg+IjYzwmD4%UTgFjgWhrWaNW zg_R!CGelF&Ueh-J4oqm|jb;h?5A3TO@$C0SioM&(Uh{UW@r4CB>0zOX(Lhp_g5_!9 zr>Bddu$#F^1%gReM|Xbx$=w?-pq}3xPwwvM5bIZQa5KN0((FNn3LbS>%I~}?Fp*}Gltneaax{1)bxViHR}TIqCUk$y*YYU76M6|vTijO73zvMJ4u3X1G3N54kg zd@8T8-v2wkNPhlv`G>+7p$|j-Mg?NAXt88SO(sOP>bD}2T>X@`;rwnT` z`OhP_^Bd+U`s)kx`nf`n-!970X&EJD7a-lA4rhHlfN`q@S=A z$X=`qIQg4@g2k%A&UEuPSHm5rek1|ZH@ZFQl>S+d9twu?9B#-BbAOH+L8tgI(y@Yp ziyHGtDMIF$XT0KDZtF$2MElGyCEjq^o~<|DBis&jz*VZGGzlP@B$J>`GCVu&9$yxo z5w-W8R&i^e&(%-9QwE?BX*w-O;sjhcDOv0DcFDvDhC^4`xP(q~xPt zLmI84J~hZ?!w1ESkaP#>8wf_%*RO)6C^UKoz61F?ND zu75R#)>wHo(Y{q|^w%e(Mfw-=@!RvJvm14Xm}eR7A+rL)_dy7&Ht^fq@4`xc%!yqZ zdwWxcLBcWxLrNt9LNm4sx98?G^4|?J!HLw6v^(LAbXUdqk6+KM+eNP@>!af98tp>d z)ajhdf5gkWLCVxNNPtqlXKaVFGlCI#WgW-T)%Nh>1 zd;WU?`#}K45^4v)IlZ!fBtBLk8I+I|omf;voxO9HTSrhQi6^=`kG`TWp?fHmG9Y;B zVnY*6&0p`U?pfnitJ{}2=NCr1egK(eg>V^>bDc}{9$^aC)?{9@w}q?6Qk8j{i&CI_S%H;G7>$vS-TZ=)gTyw9f zo|$t>Oy1;9-ORFY<4J07myU{+o;Jiadt(zjSweCMO_Tni+V6y=1vf{BiDQvp#4sLGnMqT3i2m&Yl=g20L+Y{9{DWN_npMOJ95pI+*tqY5Q}H%R|Bu8YIp@c7uDnd(EDc zcY-)i^+@{GsE^{gzwyd3ghI2?rsmGE5vG9Fu>Y(qC%XH+U+I@=IvPrF(ft}UF`>^O zezm*u07KEF?ZFeqx1Md(&+Ip0&lVd#=4gFYiE>JkLf1C}t=FB1}h2 zRUPrh`;%M9bp=hB}d~!MldBFXCwZOiVFV^*k?}3xR7In?d&2XV;E*)XL9>b!GRN zuKRZX6kF))bUNjW9!DOUY>Qg^fse8-03P8KQa|pVS$ha&6anX#dDbhYEr}Pc(Mf9W z7C>~d22WQh*RW|lyzFzOg2n%fOnSPuy_+{xMj06!E4i zw0vgf;8X!J#Fy2Cfx%#8B)_bj2ZUP;(!T(Xg2It((x(T;^l>L}^;cs~JTQSf&d;YO zkBdjM3?mH!LlhMrF>l1vtko`G0@Le(0fh9p|HrK5`?G;(lwbA7yvd-CkJ;_vcT|Hf z+K2ZoLFubTh7A2|BNvLXjKvH7SXb8w{lJ;mrm-fZ(P|`(9rQxZv>!f_%im3}KSZ$7 zz_NM90@$yuuP+-EV{onv=sQ|b0g3#uTlA|^xrDravGAR@ML3jI1Q6cHW2OJgvQew3*|4|Xc*K*Xzm%u* z0|l%r#GRz7AM%ZjoE0*f}WQ$)zYCIRX@+$M73foY2q+hg+Yr zNq-b9vw04sbhp)wcmj&p^J{Y{7+*MZDtzFsirC;}UycJjK%SC9s`U@Fk?{p;9&A-VzpNOr(nqz6~ZsW;z320R}V9U~BgZpx*USi7Fz~ z+-pXRjqIax;D7=+^&ik)Mqn=?mdl_7Y40iZh0DqSH;RJB5i!NOnW+L|x2lY+>F05c zhldw!F_IMQ?Yq&t)H?mcv_WHcI(l%lU_x6D-ih;dd@pPQ8r|dc^g$9%*`^OL5>Q7< yPa&WHJ_q!WP6Qu{xBlLA6AlQa2{wa)&rJ<|WVxDMhV%xgG#JI8X^8Ma=|HE6oy1G}d z?$x`h_Bp5asS~BDEQ5+fhy(!vfhs2}sSW`FS@n6Ji~#r9PN$Nrd|u$bE67MfeEfIh zcagoMb&Q|5bp+tfu)3Rt#qVDpE669vqq|Dl5xFoz*@{tM8b@ zclc&DHIdWi>)F5%s>GopoaF0yF|pBhnEmvY>yZonClpJb{{ORP44%z$1?Ep^y@WZA z3FK2PgE%5hva0x(NRFC{iW4$%1T7<@tuL|CZ14r94gVKEhiXwxLBTH1JlDCTAgjpD zG4jH#HN3p2mX=mEJS&Xkieg7LG5Q7ukHlwZ>jh*vNaKx(;)glIP0Ns{1vI)|C;Tg( zHcPc`I#enJ0)=FhvTIvV%Q7=Fi+klvle&N|NKTeydEn}li1q*@Fn;flVXKf$ABEcQ z^;E!FBTUaeVh5={VvG ztYrlszp(nfeLPVpGoPndhkR_=3&>FSocHREYCD#1@o+;ICHBr7m;1kK#Th6{0RSy- zIflTlht@8}51$9N$2XuO4mRCcEQWOaBo|e-=Y6>#Czy1v-L@wNgP1wgtF5Huz|HIl_hY?%Pyr#a*f2Z6us04^Ev{A7Fw6@39)onZhJ!L-h~ZG!W|cV`qbXzT z&j>lXO4^^bw#m4;7pWXyp6&!)K7g_PmZUKl-=p=J@%A|JjeDb2-X& zVh2-FQamC0`wliKu2E{S9`msBa?0~YJAMf=v_O17IY7}m>^$!U=ixD&eC-13fV#(n z*MnYyBZ9z?5<$_yB2a*zTSlq?Mp+dll%wA-OB}hqsn8 zXJIY3DL9|!XB7x-9FjDsxGRzdwo9AY!@P^VcXZgTxncP?e!0N=*aEP6rojx5?dAS3 z*!CB~d2<1%bLnCAFqjo&b;AuA!NG1};O>4W5{h9nldt#Ev20kb-ThLf*YG-czGf8o zq~<;6dgrR8D2xs<*?o8mMGoHx`2ex$Bk=wE;q#$t(j@#&YZBGHMCZxf*KWi+ z_F zd$F%Z%qbl`tHZE@%f{jOz(RZ~W(&|`F?uv^y6;N!IJ$q;c&Kcc=9faG(A3q{WyFg% z+=tFlZ8jRRfp}L=kovtyunO+KJYP=V-d8K;Tf2nC#9czx>E|ddb|A zp;k8OAM-hnU=J^TJZ&`UbBFN)^d_4l1n`ulbn?!>u{?n3lJ*kuEoTtAR0DLV=P&Z3 zAV3a#iPqlX!zgQ^iDC%_L5>zH$`ZQtjXfIGSn!D8cwIUt=)v`t-iPX^&B1RA6Bq+qSIZ-R2+$DgqIyHm&8 zhd<6|-rF$&*@0M>P+>FEZuYG)3@$PkSfN_XmHvunW0?j1{lQ?{%73uioWhfX79TBl zqGw9?^FZDow)G#Bq{z2Gp=qVRg3$wEJ6L3lWIy}5U@w!X0J-_3^QO;(e^ zCKl?Q$B;P?+sF26w|*6>QfFchl80 zC0<}H;k-HRCtF9Kh84V_LsU~t`I)%*eEI{n_4Rd`c#u{0>pi#e_o3Sm;os)zROnfM z&CsXV->;4Y1Dy6IsXkx&Fw)Ry$gi$Gccp>gNuIk7^3eB}76E89MGOJCI*^XCq>#!# zTz$N!hHW?22&Xl^&f9hw?7o1Y8fc_`i6ye@3Yudwct|J!<5zxt4-gCgNp(Ns+Yo z!#f((GplWCsn!8!!nn#UEf-E01zT?Gf(*!44|vdx>o@1>XaviDcQQtc+SAh6Ix9k% zW#f&!<3JRVFiwj@l=Co`F5TfDD4K7r)0~aG$Iv;2_hN!&S+}QA{^_d@heD8kkaQNe zbW+2wx>;?;%l~!~!Rf-?z6J>;b5ayp)9xa<7o6+kccLq0QRf&!CW8*Bx*Ac+cG|P_Jbrmi zXhs1K%k6!Hy|aLpB$*Lt<8EmI8j($J%Em7pQg~+$3OdM*Qf|exKv*28!pD(4RV7Bi z&ZC0wwD;q)*B@yT8;cQM4 zxBk(ir^cKL-YNR7zgBO(s6FQBH~fnKn0epZ8tR!4a3BM{Y$NEUx);o(f)&$tcqiNg zU!o6QSN&Jsh9)@>nQBNAGg5l&Um=mQ#X39+SvEK-?Kf!UZdif)&vAPqr23X&mv`IT z#5f!~f7i08u!Do_n`+V%ivKzGaYpTZ_xRHD*~9Wu@Vcm3)5jeH=qt9H#IqTX z#;x~Fv_Xi5(|7$WcdH(0is)h;*5wdOj=$TcDYFZF>^9D7N=Un!gM)xK(iLWp>6 zVVCAumg}iE2O7>5bW^=!`m&h;yWie-FrNc*`{ZKX5yAA?e26zq(SKU)QQnAr8ZOXg zOd61e$+o_HpLn4im887F-OltACVLx@nZ^bfJ_VwDQEyRrPhYfMzUe0e^$9(^$Nu0- z)slZB&zgMyEmZik-+`u3Ib8#iIF?iG;YW7W-(0VK9jO<=7rECcs*=&rgcp~0qlgCMb72(ag?hf_(KStErSrU@%!LgC3gaa>lc#H5Z|9aq z#w#04M*oe=@Xn%WjWr-_&qZ-t^RG^2b%lo0Sl%~oq_FMid66MH+MLPAf9zu@R%;UF zjrhelIofn=4qmaRM%_6rS@TPUCqt+OOsi=WUUXa(%$FX0D{sS zkKIp{s8r%y+Oth<`-aYgRZQa_U%|0|bp{j)pZxl{e@!+L_#z!IuRRsS=Ax9 z)M$9eeg110w6gkYkyd!jL}Ka}4i7kex;jb8&@$6Ru0+lB{Q;?s(m`1~cjle`c;oRQ zJWq8&qM(NK#aq9}6;#>s%Brf@0LGMOH$hT-m2>-`cV|&{&>wAkeVmhuwj-vR}B;03^ZaGLAe9G?pP?h_o_O09xaI4T)X zexMU0n!g*mTn!Is8NX59xrca+lM%%N%!^V|m_W}ejZOQTWr;lMj-LbmqyGBRrZ05o z^}gM;Tv=L>`9yxb(ZixH1-%*yobq5K7bydqVxeY}h>0SR53!yq_&TB9{ge~|?X@WA zc<&GkfX=Hu;4uo)sKuI{8Tr)!F5=jwUNP59MQzh|VU&?0T7T%t>r6JsPZv!xmaI+(jIOsAw}&0H=f}1mBbTnlO=4^gU%mG9v4T8o!uX4RQd**k zGz@BrGvk;dxYeR^t?kGCrklHkV5iL|wOa-&gOxr&pvm(Decpv{gD)R>_1c?V- z^->8I&i;o$SzoJ9v?Od~56mE zF+RHio%_if!a3DR6rS=dY;oCk1kXFJ#hl^w2nl+P};`0=|qdvAVGNAU0%P+-~Z#OjUs;oDX6q z^KVhy%i#xXvZF>+AkaB0ttB2K9~g`dK#y0x<6ys2o4o}Ml@*r2GKvC%1TVnyK}x~X zrqy`0K?Sv1SSa^4;;EF0k2lBETTf^D4u4w#6P5M(#C@ZJHPTC718WKPJiQgDYkg$T9 zVbQP~Y2sJly0rj6%i~0ro2bA>jXT;i6#s}^(jcma!^wGK9eO>dTxo^ci~{vDOmkpc zVK&>__n;_u0ox{Q^l*evAg?$+!WWjuZEN0tT}8zkMp|D4WCW(e&C1pbY<2}7&`hTu za-L_VSN*Q_5c;%`69A|qsK4YJ@>5CuC)so*5j)kTy!NqDue;c2S&RLaQq@t)<1~_D z3W91hb3ZcP=(KA#9=re73o?2mRP#O`sBb>IBMgEcI|7(}V;iO?-<-eAH3XodXhato2< z82S107v_eZ$c~PBLwTvfqXM>gc%a>Br(N?^p|?L|f?k&!&CShhsWe#Mm(qI3yDy){ zWm8kmQ+v1x*;Mv946BUp{V3-bZDg$rlX0<@Y7}%-AY{P2-qfCfAA%?pBf5!q=eo)0 z7O_Duz*7Agny-r?t;wbF3b<~VOIm{jnmfO>^#O750s9WxKo+4=uRP~FVvm_`wA^T> zI{vs?qx5Q~-`E*(au}lTvO09uI-}@vjr;mMmy!qmn`IGDb{Jn2n4QGb>nfJCGHRsqC_5du$SFz9#zX$cHBJ$8*NU9} z2=H`(1T9Ci4HxPt|8By!tw@|f!laY01c1_|>r$GJ?IHXNsP!A`9?0zes*)0+#|$96 z-qg1}uBYfWUc!)5kLl-{SshBM>P~}(-KHO&>nCL__D+Zl)K!KlpVs-j&ca8)(AHEKM!sR|{8WSghO&H_+Ef3v|zVW8UmGo zurtxXVIV60?OVxLMMU;wQJ)f>zss#PEU^~Dvz-iJ@Z9EnywfbJu2l%Xu2z3&=_hnOM9!_pqd*OZw zGTXL+unFgEwqb)%=Lm~+!E0MjG^ICSlJ(+oP}x;Sd!c6!khBXtxA2L}g5A@3-?gV6P>GC;~?my}Ad zr(U&BXzgjeY4=tXTRcCxh#Mqbxu~ps(7E^1nJXBJdlCG<=tT z3SW#KE1rm`?e^6JkQy2K{52&Azv;BPX$~RlgVe!?mfuSemJa&kJcuFo`qhb<33DZW zNK8pkJRb!^-UNB@Kr^mcGuOa~GTAkIXZ)R=i}K5JdpP4t5UCNOC}V0%k+;3KKwGCc z!ne0t=WMTRc(pvTPk!Wt1BM`opy6#oBKspqp@OQZy{ky6EmVDO__ZD*M`WZaMfD0I z8x4@FomhkfZ%yWM&V0EqLDQ*(OElao=1o^F%S7$j$=lBv08!>0!cY*>4n+O2$6lAz zhZ4Ew5W7-mscVpfC0By_$U?Xlaw3s73w=wq-AYtPitt6{-!g|40?XWZ|sXnEVA$t>0X&b`ke|El)z3etV^!1 zM~O$siAGSYHp7jq&fjd*-{dL=kOzs?5eE1z;BO%33~dZmV4#!dDb?-f&|$5*0oJ$* zGlG9;Wz0$ru}^5~k9X9?R^n0Lv?U|>I>i4DftV=!8`g$#uY(<0wk9pdw$p+!gq}1tl{~-!m`$doz_)LHf zl+IzdD&1ZTqOO`BCXw2R6gX&_I7Yilh-kqLi7+n2gj8TKQF}6Qukpo5i$ThRGYA>{ zAd&(;!vFEjy#_y2a)m#nM9+_pu2_pYJ&t^F5TFi1kMP=#wBOh1It!~uwcX+fWuGgt zzCZcCnvsPWwU5e~d19ka!&4K$l%eD)-M6)NgK&Ce?Ta1He;_xdF2qdl$r;_8h({n? zaNQ+y>xRDTDS+1j_ZE;t$uy2Y9{AUl+7B-+8W0Y;#QJG*s!`;E1xd{BzMwo@2`Q&T zwtDEr=D!hpgM*!*3MY=J*o9d9@Dw!~Er$z-*3}B1~UuQn=tUso|hm?G2YJQ$5oi9hwA4xA!Xx?T$#xm#u{ zlAfYGJVJCM&O_G@7J<+x14i2QIiNsOkm-j7W^GZZV_YqLCJy~i2Qji^%-XTbk-9|} z%VL?((W^pqXEv@MDv}UQuv>}EgY5^riA10A5`w|ZN>4vL%38YPW>vGqUqXXpv+hp} z`;B9KvKrmQ)uqjKD}}OU?YSi=ijpZ>cnDW2Ro!t(J7H0)k9#F6g(fDy&mh}x#}k=< zm>%S7YzX78Y?~&bQ-U~O60=qA#r>PPsyryll)Yes!Nr!L(xmM%ax6;Z&TR*FSG#6$ zp9*J3Wh#G0LOMSD$lBs*(|>S4B0*pEY2)Xk!S8^++YT(=e|lZE#3HZTLYcMCOvh78 zkI%S3nuhWzR*IOK2Z5v+uEI_Fj%r(1SMLf}e|3g09(}ni8j$nsDB&QA@uP**iZF8>sCb!(#j83JH2djKu@Z2B{?X1L$qrcQ^ZvLA%z?}_R( zu53&mH*$*klp(%39pPYEieqbUHB={%!p$bF6G5O17so zt^Q~!90dO6lv>xEa$Ut{H3ciXQ&in?Q#<1zy=;Tg$GbYt2Tf-$5}}C~9?8z7i;tx( zA|;=tbczR%+>@)Ym2sCEXZ(u7S)ROM`YRp7a;(|at+|D|ILA1s(Y7xYv{(HZ$V(K7 zVNir*y&vc3$O01kGT?RjkF zn=yj@qGC=hzq;ZAd3+(poU;6xuEV_R|Wd50K0r%oI6_vh1Q_U`oO*Lg0 z3G}#H5*^s1HWul#W$G?jW(rUo>TC5KuJ#jSRvU-r6+vmi))aPNXKF1h;M6ydeavZ_ z|5pou6}}_b;jMq4E@1H}$XYy{ah+^)s0AZisoJT(hbnFcz?{0PZG{`Y?dYX>hv%S|_JhBX*5%seO0( z5WMg`V#p12rT}$62lBZ+F9^p24iSuRT(s~0`^x+E%24uvb7Kz=mTk!l7j2>M<>|rvNg6n zg+6Z8`j(@A+J2^38DT`)6+2SDQWQL*5+ zBu^{Jh$>(iab)hWa#HrHcB_Vd5UbHUR3hkEdL-bFfnH+6eTzh|^1;F2ge z;KKdg2~5>l=foiLG0V-xweWCCI5$R^kz9o#4KvS`_QldI=1Z(6QJDU^}4nv z>paC=4$*U#;#VlE;7+Hl#S0m2p-Y?FGBLmVzJ5NtG1(K_F+o*cF6N>eo=sid+L}ux zLxwnOT5t6dd{*Ri@*ZG0HH|?`jd4`8nj_8zWX=0&1 zay%6e_r`7j#lH64Ey1?+_DF4n2T{REFn|+{a2&wKmw8oa+r=LY>uQkDM3a%-mr7Gr zS>RLDhtGDs;mv=#O2Z<79d?@#LQe@)7tS{7Efw*(!&6}h3vR+y1HV-w1I{!%eF70| z9rnt5!85_5{t-$CEp!3VQCcrB%v1`x=H{s}W8axN${_4jF)_O^#IIh|Z@f0**nL`I zaR;|nyM6e6|67*feFy+n{4*)z&IyIm*AuM29u!KcMT(9~`Hh|zz|2{qv_EeAf(d3e zngTk0Agc&IaTvdDNM(CJ%e}R|G8Qjv1q;a+B+IWiYt^AX2$0G`TZGnECTU>+X;Jq& z@Hx6tq)XeQ2-`^iTIc-uf7so5Ai25H48!M$bjuPis& zj*+a@+K(dNc)I_WP5!!#_OQlF8c36u=|-VeK+0%g9MPv?d!^#IKGhs^CMVlQGkGGKtRXhYwd{9DtNbyOKpZeIu{Ek%e$^GwYVTS>7(^Fb*q^&vJjxgZFzz~&6!sZ2FR+Ak z=kU&&iix#QFs9I(r5nYus(eAV6?oA?_gugKJ<{yun_Op)U&ZKJ+gU86#AAEj_g_{e zFxVIhZd5P#Wu#iM!o^9vKz1DE+ylP3v6DumnR`+G_+P}X`@~9x@Z)wR4nbehoEy%o z=Xxh@sX(x&__yE(?G{cCO$x|Sc9^`EiQzJWjo!>A7R}|Q0=q#^;0Ge8YX{%&EO2Un zkL(4(nUlQ{q$z(65-_#D`Ie6424VVV*D&e7ta!Qdbr#sTH&l%~_fOvFdSj^_Geg%W zf?%_liw0p( zU-*Xfbyp2)kj<3 z$hoSxnvE;%!KnSFP4JT`-Fc$<{?6j>aGQ7M8yZsG)#mU1GI6LReAmqzl-`k)^cWQn z(;LO-FUW%5;UTq0RY!TqCu%cW)4n_njOYFo>iOKK$oUBn2>)Cu#KAd0=T{}RO!p*Z zrq4fFXINSUpnl02eM<*fes%PDWEjgfv)o=pVUdGmTB>Z@lkND84P zjtHW3OlfU-_v!KN@Q|qH-S_EEc>8kw&TaY@5TF_B#~LX3hy1Ao&CGx#ft=OyuYk4l;(_^%482Hu=SLr zA$al}#}(Uy#)4b=a@Q8kU|K*<6sS#z$vxrqcO^}?UpN)LaRe!sE9>F4In8l_F`&nNm=AN;Kmi;kBc!mxE z^au@er)Y0{tuFm^qid+?sk%%1`hH4au24n?I$lH9=wH9CnVFVRbx=sR0;cB}-Fz5q zmCE|u71qToMmW|IiBwZ&5;cL!V|7uOTP?4biya#wq^8>Qa~OKWW^(OuZFt((x$`5ibna?m$!(kt_6z_C4)5f%`HgY`QqQq{+*r zb?{|VWlvb**t$Wqy+DgZ@S>^XIo9sZVhd0(CyI(hMU}^~p+@KwQ(3MSP@FQeJFHKS z@0xrFrCE$al@~qpmzB(+3@iO8A)irBwm!Yp$4q}!z~d<}m679JdMU+Q6DPNiDrPQ9t~&i;daxRS z9hI)klYd<;wohwxl;Rzxe>B^(lzd%nw*L-N@TOJmY;gKYl%-czQFe=pR%&Zk{97Gw zBks9N{g={tHL=$i1$E_Ej=wlXf^0l>j#*Am@q-QWp&t=`C(`iR%9d3$Szt6YQHe<= zCRhNq+Cq-uc&b=c9+M_f+O+NEa60c}mkHSE@?01%+};6fF+y+jJ6FJWd5?&fNv93R zG`$_5vUczZ(WLGFd}B0L6nUC z^|}aZHOaq(KKd#*0*1*R&n(i|&$q?jrF5m2sHLZTgeYLe84g3 zwP&q0a(lbC6F&#oc(8L$UsD2J%j=FV>HBow|8{-_7gRdO^t9%*F~f=qm=6{pQ+QI{ za$D}I&xt+ybO{m=X!$ueW|+c!8P3Fkj#^yH6+gUvozhT`QL9a^3SjMY-bhn$abXA7 ztI1a2jhY;6_VNprOF9eW!LA(1n-A(c9i(9ZT`xy`e}|4d#KD5Y!@3XQ{>bzaPm-OE zQceD7^GMt4jKmuC&rxo(5F00TpPeIl*yp|fe)lz+ufKo09?bDvZbCe3&! zMl^U%=j@YyWt#7$Hi-!Q30)P4L+|ggYdN49&;yJz6MR52`$Oha;ceFn7J~&-R$vA^Sr2Dx&!ebvs&tLCJMQR>1 zk(1SBK2s$(CTKa{wY;RwqjjJb;>>CNaeNjd9-h3GpLpraJ}OSe793-d;q0?cGNT?l zDVm*~m)x{qk?P-xHKvuVOR=h9a}G5Y#htuiDa6UZgIoopnuxMWcamW;&oiE##W!@3 zsjsDe1HN9b7lYk`QLHu3u~Hw-SxYqsF?%IyhFaX)mlejL+%Eq{rg^!z2#(#>S|lS7 zen6^^CF+SW;}!ZpS*F)0L`0g%`T22n(R9Q9^QXq8uFkcrXHvlH?tgy)tI#Gnb3uWG zQVC(+(PT^KW2#7$dD7?zIfX=A7&LSlKLYZ`HOdo5aZe9{B^MLu36ti~=ZC4_2gi3F zm`LfuI;wcVYukrSPY{VO8&neY7H zUhDCJKyC5A0+sAx%!nzc^}1#e!M2d4W)8AIm%qOog#B2{pcij`uk%V^y1^TEnyckti$dBL#x1z>mwVMt(c#LtSb1uc))BG@oosvyyntU*UM zlMm=-5}@<~I%VFtsbpc8P9|HWglyW6l)@xLlGLj%Hkfnd^A^ivq6H;^XgeOx?Ms_z zcl$WLgY=O>d}BZG_TP{_c?OjJ-i6A}c2xS%UM?ld1cY!?6N0cA9pOAW%-E!;ec5C> zVn9A4GXoj3@oqbkzp#R_80!vO$yR@5dHTn#Ux{+2IBQxzml9a`Eu@zn1uU!tvT16z z*8ms~pCTJu9i_t!i>tpgOS4s-WI11`rCfO8PWWgdjW>T;-0a;JbEhwcdeEwPd#y}h zEFL+$Sy))m^l*27+dppsIc&x{(;^zU55&B!II2_i(KF{BVJyQ{*hH|mnzHFF0ZaY_ zU__7wARN1;!jt6=x4WNO6AOeNq*OkQ8-Iv-GB#NEg?2q~C#V5lCHQs5jfbE)^LcCme)TI})GPk*cd_wX4+~d9XCoQ)O zx4!GAy8Fv&2w(uetJO=&Q748CBKsrPQ~oSYgDPhAyMsZ#TyPCDhMfq!vMy2H@?5{F zsL5Rh8OZiu&gRm<4`xJ$k+5cX|9PQ|ND((x$l8hGau0z=mF-;nJ~#L|6)G8m-t`p4 zw+@)1C1HjlWeQysC0)C3zZVRm^>fc_W`)#5)?GNhCxIHMZek8k2^GBW5ym7#j z){>!%@W+e1?SlzQ{_fI%ExWl+ZEBv^+y$@xaq6UM%{A=?^?T46ULLF~p_Bt3dWZbW zl;#K6QGjez2?(=47Koz75jZkXl1hmc^?muxv(J-B!T^pv_u_y34}7!@P*lTWMEAp} zer5!O_4wmqBk4OHyZw^e8@4AilA?06)6s&ezV$U;3O`Lmq3)7{`qivH(|N{1tCaoC z)i3GGIbdcj;C zYUFI5zUN_2F(d*@u5GO(V4lv7nRF1%HW|#P73kt&4=WY9pAQxIGPFV%dgf!nCgRFJ zb!@%}Vf|*hrXEjhJwI&zgHZrFPn$lZiXib=8B8uuq2OnHo<{bgD&5TvY2Vb-DH zhs_DPQ~%lc3ZrGdh$9@aY!r}N=X(y(N!Y_nq`w5qiIsBQSD!1Oik+^`A0P!8p2GIe z)X{-9gPEw-Ymv@0$8*d3PKMO`UyLRrO|p7!>Pcql`w(++^<#_wLyqG7&tfqq1>6JK z)vn*E%(+&@uQhz2&>e^0?MrFvGlOfQSK7pWyh`)4HAY^Qq3-)C{RrC2m(v};gC7+# zC1^<{8TZX0+RqP{v!CY!!cEyN#&GJa=Sw)nWnBiw8qG?VT}CM8znvVRe|0-jJvoKD zN^n)#KQMkZIbZ%%(g|gw@quof%K^4%On{ocX67wFe@JmTM^}D;!*a9vF%s>02V1kf zS*GQ-m}hW3FNDkA;bxk}ySFdt9ErNfhmOUkq;8zDUL8qN zI5zn{?k`gwcch5HAEX+Q{c&%6jvxMQbR`;w#DW%cK|__oh)jgB0WK5iU+3%*$MtQciT-Kw##7qx)nqnG_7D~u3%IT7{Nmu;!EiZ1M z7bV-_&NzPI6@y$G-5p{8*VBrU^Me)q{Q;ULl6H4v%xs|yJe(Od7DKsKij7D=yDT-9 zPO6l*x3|k!G6Nv2Rx&^KdXp$BiWM!JKlYe2-s~}QR#TH&AYSC%srzFSc=K53c~eLP zEN*X41btkw`Bdjq?pq0L23Ib*t7)f7A_QCLLOy)fzxEWrZ#nibFzdH5oUPa=lSBiw zJ_{sLhf4BoYkB98l*Jz=>lo`tc&qfsi=wbSP>l9nHHUhe&x72He^phov_Xr_HXh+h z$A4M*A8-7wM*;tCo?6v^Dbh-C5uX2TRF)_gEk&k?9Q>Y~l9C|*xf)Fz-zVTS=i0Q- z>9*tFUu6uKQzwmt*>qub-l_EdquO~lCw_98T@-G|bD9Bs55Kp9Lj)z>mRKjsQrFsN z8L&p!uPf#XW)wRzBihJ6Vl(@(%I9@(9HD8(W+zZ^;kF?bE3wZ1s$VA%XjW6sb3RK~D;!Rc9gs_<;d?HVc>4QL zbCAaC56WOSpd7c>Z_(r*FM2~O=;&B?E>E6GR=8^lL}}mF)74T^@Z~z2`5d=6KtIvZ z*$=i*`rD0S^&+j_vI4oji$h^3{l{m8g+;JVr_Vz*Z`XvVR$n`kPK5ZK6Fehq|2S-O za|Ilu(Ou?Jz(w1kayGyF3Hf+^#-uQOOF!<}|xr9gE;*5{7Q24e}sc@nfqgU$A1V`GBImEX8m zfw9k<{B=4padBux$uXN&f$HT_)L4|}`PIK4kb${APc$VY?v(0uP=wE4JTD7P{zUeg zGw;ywBP_ZWcTt2ejuy=e-~g9;9+7-ignM1@o-|fp8f$JHS30eQk{)u{x34OI-a6Ps zgoKneWWovbiY1<#q^$kz!QVbYPIC8_EqCj_ZQ2|1tBHC273t4n-!A$k4-@X9z2$~j z%`Tt<4}ZtCYrs04{?!*VdhnBkUV<0aRPQgho<72P=s)x>$erJsKjDWaY-t28ee7-iJW}HP+{WP8&UMgsr}D9U^$qQsZM|_ozGNY)cw^VU$_tKke`l#I&Sf z6U0!HwK`>JH^Z5ypf}MHxpv)UkBdI^Ql$U@d|X^yg76*d-a*20Qvy=@0~fx}D$_2V zWno|NBzC;eaBvb_SWcUJ{oWNLX@dW`TOKh|L* zv!OGmPR8iWI2xLjdUu-7ZG4hXxtP}qEbT^w=S4aJD!wq5!sz|TLiMO$}wzqP3M zi>`BU&)LvQ!IT_xO!VYiYLuh9l2FvjxgP^ecn4G};{-3@+YK#GwWI0YP%PvS*uKxO z{DrmKm;xqCPam08Zo1tCQ67#1KDwGo|J#l2QkA~KOG&eEPshT`)_qD^T48gey+%KZC>fok!Knl}?vrTAAA zSZ}83p}SMhZ~sZVJA+^&VufSMt+`PY&5B0%Vh9Vlr5b&i^#dhnLLj9+f(_Gn{SRwv ziWK?J>F?yD9!%QUAgU=V`z#FiK`*ymDw(&YOo%Lnl>IE*5=AUUOJ!ZxqNn^q&T7AN z6y=R`O9~H@EamCR%fiA!s`?0?&$B$stnQi|AKiB`LJFXmrThk#pY3qp$K6_7wzanY zG_dGThBcX9g-W%MR8~}sST+d3l>{yY(07fUikJ8M!gE(2RG5S>H1pUpru=1RR-C_0 zWPWuAabzgwgM6Yh+FmxQs!)k|dXKJad0W<~I`^)|NZ6*-$a0r}cd38ExN z_FaPaUYBn~jdt2zk+@qM?~0>0pNE5c+B=kj8#n%1rN?Eg&Cc#@mim6YCx8F&gY*o- zgU5g@r4$RQ@@a?iZK!bysr7k}u+S}{C~tU7a00n6&?Q5vLwcu;npU`l>JW&{&`MqeFS$~pSqsd(ks%*ltO3pL`r-NgzsPrjVzJ1KP%SMu^v zIcW&<2%6>(9Z+gQhan8bwYcW)LjA#UfG$0qREwtl8*epT-D8U)9NVyWKDvto9=|KU zh#y=?Pz6_!%BE@LNR>`Vf827!cC^F+Mn&>oQm#TGb+qjLT%U>=h4kwRvW^d7#Cifh z?7~NjfGN<6gZNtt()9|Obw;?1b^sthEe1@*A>uDJ{@uKRucD%&(^iHO^V8M)KqP;> z7_xL}zAzM5Dbmgp?_zfc)ebl%cS_N_-`@W?(0|)hKWY1^Ox{^gKq;y`xXc#N`rwZ1tEJ0EF0vb9~0SzDSljpWj6k;t_C6AHXz$9Z#J48hl3_TuDP`}IcVaaDOl zhT`&MtY>%xi1MOYJaFa@B99F9Uprb7*0N~M2k8W~eHfDCR1@FMYsJ0l%Zvj5uBpTB zy7&og%~5WRaq#!oR+VJe)ZKpX5_swpn8NRa)dF}ufsLh&{YwJhrlem1K@mOx|}t#6_&JWaV;IksQz&0Li{ z2T#LISL7M=5lo4~6Q(U2CQ~gY5v!SGy7>+*my+m`cVNiFRI%Wavv&RttMnr3E;!mxS#%qPneO2t z(_bSs+EJd+66?gu#Vy)&bigYB5bK$wtKExSYocH}kI4JB+UGINXm_El1mPP7)+HWDMp5>H!vi~b8jf)^ zesU71FSOvl7&4SW?X!ZuC&aYcv21<`K~)n8DY|nU3d#o~W~L0|S<~Cbw^i)@Vz;ff zYHly@kDq^$XS7>)2}{q<`+v0nBYn-;a)o_zUk16@xy2Csuawf_bnmE{mdEOwCM`Om z`P$3z<@qnfX}b0{2FZ&X6UK(rc~XlBY|svV|6)LZ1PI;wlDHK0Ve6&bc0<@Ih9MVj?qeOA~)~mxRA&rnuUda zOX|+{A8ek^hozooYe<>(dhgo|+_M%?aw1`wVdr$Jy8QYj4&E~CkuGGSB2Pque*YhM zu2AG}7vEx-q5vT!0iCqEyvL-W&P5nMXQ@sfXSJ-@P2FNe$R$`ElSY~9y|c1u>g=S9 z@D2dp$YXHP%@6-4z%4)0fZ1aGBd}Vqv_VSpwvDnSd1-?!QSRcl5mS?tKp0NVcYlI~ zn^e(*tn5Ms4(W3PN~MtQ@+VRSDstGq1t4wPsi|mN3Mnn>DQB1fTMWTYHWCaV3o5H;hJj%rvet4(31P#;Rlq2_IQ`kn_s?kHw_Kr`N1I zVI&Zu;1f24L7W(I0m*n7&Dk}iWV$ey+*h#C=*2kn7o`oDzZ9FqZpyklpbDw$!MIeg zQoQuP^Q^2w#d+IDZbCVPXDk0wkOgZ?p{R*e0g8@%2c+BG8x7-BTafbdGUl&oqOPWq zeG^r==FuZ#Yo1-U_2i>^F5tL)#cIZm8PPxWUBKYlWXaFk+MP0FwKd*q!X~g(DLF(+ z8)@ZPv1%P78tNPiN?F>H3gT;9M>hgRMKTt4{0^4pd9!oCC8JK52F9X$yex!6 zr0^3JJtqkJ6D-{Hb(=uZU4Pulbp?8Bw)qN;yVrAXP6E;w2dgfNA`x_?a&&a0cRF50 z-nT2yYPyHHjeiHe4tx=)TEBh?pL+Nkg=6Z*aKZ3>8Cx}yQOO2ImJg#LIfT7y_rk6Iar^erbIHIu(x_hW{A={z>6e9Ye1Mlu%N>Wn{C zWMO48SuXk9_5Amto45NOfAjmlGiLNKK62)9{@sTHOMvlf*0*xiw{GK4zrKcB?|g`T z$B*Rj>63a~=k|M^e=aO8}A!j6Cci0htFC(H@p)sm+5T>Zm;c*l$xRmY?W!x`03 z&Ct3e!v|N=P+QK}k+qnHU`SPr(ys9rGRfTxAFVH zS-?fWNw|)$jVri!T5r$5e>-@7?&kE|=JYfbZiH-3AYW&!(?F5LG*?VwCO)g!4x0PQ^p_~?VEsZNF+ z`@xb{3i#%z6Xuk}s};=~S<$?)$9doT%Bh@j;Ene@GP-FT?p$x2BTAS>ZdZ7*^K;=2d8;BR@tBpNY5!5fR#d?0ug*(u*L zt0@iq3HTFp8*70BfT?zOXPgykU^X zP;b7yoUxeOEfFanTjTI=m;v)*_H?ndUqx8qE zPy~xQ6uAPGf{7z4ob#0Ap?R-+c-1^__D;jA$W9*(vzj&o|J7Si<~Dv5_~XiT?~s)_ z;zo>^5%U1vR>4rF*n%rCFcfq#FBxzF`K>25^0@&Eu0zM&SBP6lU)kKA zuD~jzoZ+zs2dr8bv24R)_hx?H_^fV^#U z{Y?)tZU3<})K!7B!rtFG}c9s$#3E5U;|Q@z*z5^%l4C zy8;bhX7z#2ggd49(z9NlKgF!3*F%`-H`VNr+@zp|CB4_DpMRZGPMFT#V~4Z%*x}qd zxQ4~=uHcDh=kwuHk6`TRq294g%U5y#qt9UHEjF~aIoBy+VLkWK0xrAw45}-WlSdA( zUjR(;-^=KcLs+?L13&-mqnv;GOp*!5Wqiw>|K`}kCUVNLQ^E|w0f<+vof(D=N)nG4 zNH^AQY=wu~XGQfODZSrMRI(4N%6s2mvMLd`^1A|sfJ4UCV(3)*xpzF};Z^gz**g`l z2G-QXSxw7;jR32=)}es78S@&?)-E?&^u0+PQ?k9um~Igz*U~9mE4Wak^UwC&eG-9* zEeb`L`a@Q-MBK{ueaFno1F1GEP{7mcUSRR+1)+6S!rIw_v?-+y*q+4~w^WQa7LCx| zoe3<~7>$~kLJ*0V-u_H3&lhI@j59xU7?)mj28JODmRiyE6NwlrSGDLeLG-9_+fZM_ zA8)>bF(d1^^eea0*5L%VPd$Ero_l2}i{4!m(!{{AzC65Ix1ke&ShO%Mq?>x)kIla- zu~g1{D(NF9TVKo8D@LF{_gAlw3QPn2UZMI+Rwd$A{^}S9kE^D#%&~2rTezA9i&lSN zcoi9dI;|5o*$trHRmO9+sFEFuIDPyq!LLBU-|aT(|He6u)?IyjEZ zLml6Y=o@v!aa_g~9Yx0-#05k^1p(O!t1PmGB@oEgNvD(Uu6lpe?R0gemm~%Wq<)`I zf9iIvx9i?MzkAL-=UgWNHGnr*%Ta#`2kMH$4i?cgs|d2v&s!}t#!2D zdw}cbP7PIM7w~c0R+pLrX_Km|qgvvjicSNv*)7;@R<`fj$HimL<~R4upm(pX96Wps zyUj}P{H|OwZa5D=@h0xHWbAeuy?b^g5D0MWcm-CGoS}`=@zip-thV04A`w~X=dBbKY!;bw^W9{oXnn2dXchPV-<*>wuPm*u`6p!b z1jc+-ae(^ydxWYhqmpUOpl?!7 z9I_VoB1qW(15H|BUvfh)3WXD8I^iAX|_}Vj@6bi_@m!~ zw*oXzn~6->eXtGx@m;{w^M;+tS^e^Lsb}PHS(cF`i`Y*EsM+Y3b!o;YCB37!^w8F1 zNsCND0{ZpoM#-@X3eL=9=-}QgU$q6B&BFgoxsfENozB^r^zEI?vXz_Icc7Spemxj5 zygy$rThG?*d%0l5KnC{9qepHw$4V>cmDd@&&5DFdx10{-_2|sXwOcuSzW0q5yt?_!$|b zx@<^5X_b#(J@g)x<=%RERkU#GPcE-oqlTr~g)0tGQhSUJjx16wsrBA`A?tm8l2O&* z1E{JoHLnPlb~WrN7+Qf=RiV2xhfa=cE!Ha-WX;hHD3U_|c6~{rpR ztHw0wmqQDBGqj+0=xcjm>4;(dM}-21@A~DWh*V7H4(V0@eP}_C`qWG&T93Rse7J&= z3Lov$ouoN6Z`Gh27EB?cs*fKHZ6sZAsbVk2f@||H!y(y8v!+lU@YJ_$yt$5suB56k z4eH$4b-5>0RmCb<>79~~+oEYeRdv-&TlxfVZxin3vdle0Z)S3zE6GjiqAk5{N!0>> z9br;bR0TPzYmiD^(9f)$A2IE#XAnuhRNy|Q?0(h!-$%y_uYJV1!v->>;7tAQovNyK zX{l$Z>PgRz-V5*nW=_CUlduASb@_&UTya5f>{f}?BpaS;f5d<}5tIgILRF`6MaE~@ zWQm@g(@1k^MzZP}KYx4qD?a^ds}NT*H(xW1Yc4#Koc3CB?KGE#RF?%Zo%PRA6p-a; z{|SjeAjtcRzvt~rQ1uZ8&-pd$_OAk{6Bo!U z4i@rq;kzs=*+`}%i6ongB&$oeq=eL9c?lJjLp8W2d2MBRaG@Z2K7UdEl3gys2M-j90)3@>0P|X4sJQw%loDP%RuQ~pL4R_ z!?58tbk35MyV!f6)HDg6L{#ox=yT_d%LN7(ZaKi@i3M0KGETdNqZPGrB$e?KX(`f^ zRQ(WJlv5lQ`sSpOI<^FP|;{fd`)YoP|raqAFo|RkU#GNtIXDR@x8#3fzLP%<;*d`CQlQ za*`}g+S$@6@s}bY#Z4rsOL41fi12rXu)}*7TSvM%bFfG<3M%VMHZgwoolxfmC`XHU z-woUj{0higv3C_K_O8-yY)fWBk0A_be+Kz!Ik@aDs{B=yRF`tddyE(Neg`EDvUw~7 zrZ8pqX7eu}AH93#^3Kdhc;tzh{MT>)!ebBLv#VovR?n7ug4*o(tro^vfCorTbmER5 zih1?DmFn8UT~cOd=0=~-XLgiUAg{_iKF8u_qBw6}EW4 zr#heoR5lzPH0%cTYPO_Co*+r|apu$B-mX4C`28Q|^1Jzugw8kwOkm3HO}aWcfE$2o zfD3@MCP_wE0X)K#-JhHP@dn5MO7H#M&$#iLaZ#5S`DFf5W;{Eq=E14Ic8wc*UW+`m zOWv@OZL9v%zLmU#uN5UYT2%s3mlZj1u!Ps%`;JB5ZX_uwsj9B7Zkpfke*y5D-RI<@ z>qU>v0H(NHb{4++OOjkR6h&prp(<*8ff(XejUl^2YiWf&F(g@{SEpn$lkN4P60?`= zWct&eN64$9g;O`_>eM3gs%YWVRwJ)kGhwwCBZZW^hu+Ks!|pRyVzz-EmqpTdpry%l#{7@aDSNq0_emj6afo>Pnfd%i22uU8eu*E>PHM6(w`B-2O(Q5!9Dv9mrsBErOaS3$OHHNnqK+2 zKaQ~ShnH+!*=h)@T2&RFssu9iL{(Z^$r~RoXU@D;s>LD)EEenIUa$9WK$Y2TS`A^< z%d4BO8OA*~or@$%_yZ~%_gCT%#@6EN9WRtfbD&m~NKdko*Fkex2dMQcJpS)x%>I0B zguE(RICWo~`ZZZz6)l|F>g1Kw3hNX4Sa_nxLs+G00Iyv3Fqd~5kBo#j=wqjEHx&U- zeLPmf_^XDAn$qNC@?5#Nt>J-2k>>zIKD!5wX(}^8*0QwXdOkj0O4!7#fXy#3))wYJW$CM6}k=J9x@ z0Y}a5()#38&e%sDl4R>Nfa&*L&ZXz%AxTiWhdiy zg=cPxN<7^4$j9v4uO;DC)Zp6c{mZT|-}zaakXO=4dZtWx1yKD~jyOI%)y>b^FGeN@ zN#fJ%Utn0)AZ__+6-xZa*-^U}e*+iaa9daO9n)+4t(jNOF!ONxbyvWQO$aP%o2?dHn1?;w7M{F%3di z3$%AT$V#yb4?s;|`2ztyTe^ek&n-j|@=Edh7L*je_J_Liy=_KbooK=eHL5jAL&qx1 z5uM?(cKM7>e+!umEEZm$_*cf~jKv~b>bc3M_^4LAR0gX72`5(U7CUyyPKqTt!gEhi z6jmHr%~cB?1&>&RDxeEC za?9VJdBgF?yME3k<3`k9SaPhKS?_$tCtoaK+y%qAdGaK>cInVK!s<1$Wb?9rw+g~i zRh7ldHu2nR3u+G?KH^AEPhVYLUjAFajM4k4^xGdPuiU*Z$xq8Y|1Bgr!(y@U@E<2J z?(A+@>V3ymYWynR+8|ZF0I~`eyM)7H!ETjEaatm>_$Z3P%B_c){@i>HAJM{kR3&i4 zcYOPdV})B9^Uw3Al9$}Aae@G- ztn;wu=mw^K_Y(Vyw}%b{IaD`We!X~>ax732L#0kMLJ7tw>l@qH_{L)aXdSLy(yNFzw#k=-n&a2YkL-=6w1!Z@&9EdEL5j%@r4N z-tYm~MOvh2=-O*k$>wigJL!bAZsQL;_r@Y+%eEh6x7)p^qN3urzR&BN^$b5t_=9W$XCli>0hx zw~ZkMeHb_P97dfxn53l09DKQFj4Ii*^yCrNuD!*)@a7_Q*~(2)YHG^y>T2&jilWRi z2QVj;q)xcJO6oi$C!^Qp_sceCy}Y_)+(0fGo=?x5Ofu7*IIXf4RH1;cE{Lb5j_rF( z`0$I3EMK!bqSC=S&-U_N^JaPvt!>ltO8ODkS#+wiz5^J`jqMkd^6< zxT)VCVC9<4Ec$vCE7xve&f9;F(71AYpI@?Z>8n3EVHF=K=hgR?Fz?HiI2=xIAQ1So z-|v4GEl-<(W#0*;647m_t@4| z6}#qb5Gh&Ow7hB#Vd)z=TEkmoG`_JDMC%mRIBI(Rez}X?txU=sOAp6T9CCWx+6M7c zT(^TQ)m!9mD|aZKU>y;;kK;CcqDF4)=p$h02!SZ)1>&6B781wBo5T7BxM10Lg-m~9 zCcn6864zaMA+nek@OY|uf6gM_o4t_11%0@6^27*JVLWu}GrFX3$tyn@VO3Unnf1YP z-kud0<=d5!0w zQWMpUFww}w*wBp`T?7*}Y_w|vjXeK!*S6Auvfusp75vvtlOhO9k}cbSz5vHdE12=j zTWsI8pFiDmD`yYt8y=ekgM6`gCI5c=Gm?{>Tz%OXMvoj!N*vF@?tMm=Y*_O02`8-D zIzJzLx|&%ZEK#bxHBxeN@>?E{XDV92r->7*Rka#<@5@R-F*A^p_|qDkisUHtw`!K&4Z5Cg-15z|B7! zPj1(23ODWG`By&Viix9yi?Mc}jP`ln!+Y19kmR?^R&D3Ozt2%VS+Gj7+U%QZYiqBr zt*w0&u}ZCp6RKUA^6E5{SIyC|nlnOa?5M@oNLzEPb7D<`h8owXTE|kOW7%+P zh|A@VZRKnL01ll=L_t*K<$r(7J0EX~_HyzW9rC@0_pE9W4Qu~_V?6ot zLiPFA=1ZPxUqvt&ywm5a{e@}%JE@wMDX&f=c_lUD{1Y)T*;sc{qnd9VmB@w>iWK8F zs*UJa@odHlu{~-U>js+0^QJ91TttP-<+|JF^F1}7{~792kK7?=wsSZ7+V6Mn=Zi(( z@zv7xq`OlXd;Va?oqdUwpvA1AFivb zn<0daiIYoMiR6^gBxpQt6US_g)RatlbpqwpkBP&w#cAYtq@+JrHk6|^c)aLxqfJ-V zjhv#5m}1#qqvhW+MuEpt%NrA57fjG$;;Hm)I)GG`A;EH$GgUR903=DJd!Mdpw@|fPH3Doe~XSsB zBS71kqiQy+R_o0UhvQ#9pD+2A>nBLRx^)ssaptEQMOXtaDOvO7Q;i}lRfVsXeb3DI zRw##xOXRe)v?Z05m3IOg&DJ?3$xE;0M3WtQ`D2NpK}8#k%P}-FIR;5KM_xhfWkjOM zu+u_bNhkQ;MpK|n)3P8-2{fu$Ds>_^Pqh)m)zlRIjm>0RF(V3^eY5<5mzJzvkeIO67VhJf_r6nh?AkAzuOuRte2gitnyP`RG2~USHOMRJBs{Mvb5f1fsA7#& z68*kd9zI5uJ6eZ`rg6pc2r}2iYIzLJ$Vh*?tgO7}pn-kW>nh=_9F?)g|6sRjg4?qLE5t1L@*vS~ODC#w4bn6tctNxZUM) zK3HB}k)4s@R<4>jQod^92s(GnYSayS6-+8wHUIGjgmtL6f;VTcR~IfVlw2;Sr>4gD zKdP#}gytS>!bDTGGdYe4vMI7yO;AoG&(s*^#Z&GjpQ;^0YD77nc9lreGD+yv=o4cy zGc%`_m6eSHRT^Q_Y}mBx zuBFS@r+4nuK^ZZ;zkF7oTnf(0r%M#gOHmbkwE^ZV*vva~*C@5MbvPW3SG->DgTPVq z^`5qP1lSDxC&uxQfUBEuzgf31#bbx7_ug@x`sTm>q#ix=L&;sY zU8rWZOAkttq^6{#d@M$=CQfgh0qjQ0+ZPM{e#BrpmK>8Bp?MiCM8<@;b|UAtqTR!o zNLP;XY`uw6fA3`RZhD&CqByMl{MGxc9(_- z%wn;qfSQ_`x&k=cd{u1*F9W|j0h)ee`eGuppcI8@qNR~i4f~B{qicRNXOCEOX?FrI zn%$&%*lbXJ3xEk?1SA;WIe;NT)$MDzcNQ=a7;d=#K;8FFz&f)TPAlX9 zH@IA`r$k>d(YCM}7%IwV0!x53z)G~a9DpOb^NWCofxTjj7~rf1mZF)@6rIqYF7CA+ zxJ7rLL%_4ZB7-b?78s+m4y*=TqMv>MtN@k)bAVg}LF((NFYg1M2DSr#1YQUB5Vjb_ ztLHGW-$WCU3hdB*e_J%><3I_})j(JUq7hyM)`~{C0@wolP3-%?FNymBJPOPO zW&yKx&)p_eWfLY$ocizpX`=j{PFN2UWm!iEA@kb=A#XxCBG&=eiSjMF^DhNvitFq^ zA&{*jnJc5z=VG0_xfFO;lotcP)SY*W*rz9~c%Fmj3(^afyAXEPh*!@vpwjFnO~<*w zC!#zU_(6AURTRPsz3gtH0Xhr93Yippi~Rtf0ypTc%Mc@}&Ok@eAv=j<9|Px^&0)gC zX@Pw@O~nfsLmO;5;cpRTi-8nsbk}vz?VnDh=W$SIap>-IkM8()f;0*Rxy%)E4>a%8 z6kWYi1ldL-ENc|Pl67UtaDKdcb{E=YtAc!rb+RrbHqA`~<^vCk#y95fIRb>ze&-w1 z-tIz+ngRSnXm*{=|F#JeryaHucBs${9Ba!qs?Kh7FL^HVt=Wy)Iel-!%>~ZCp3pOO~@t$fxRj;rbR-N36WKC zoI1ILtkb*Fhvu-p6(lxW>(gCLUiz&PN3L1Z5R$A#J-i?DhMjqMvjZZhz5p@l6L&pbX zjB`#J+Pqa;K`0eofzTdpW;babZULSbxdc$Kpvbn2I*fITCFSbXW^K zXM%X&CSjRqbFiAPz(f;~g%-fr11;8VC0bm^a2mP>w|0cQ!T!+#_v(V?{PP}g8JZ_p z2q|dc6s9I>!h}g!CVoQ9K=Z;ppRnsKXr4?)*w8$%>V$?7G#r=E<{CSQ@^+wFlzXGi z(`*yh8~bgpAjlH5;BTYLu3KDJquZA(?pK_pG2qbtnUf@EZecs#1 z_nCv{>AHp3ftpEZSrOtT;+4faOGa}?_L*JPgb5RE89f9sywHequLH^m2PGZ=M(d8J zqGg@d2QF5krIIqMT zLK0NMx!9UQSZmNcP74SJlJ*pu*(@|~(J{bAv<%j^1cU|fiXh`2F$haW^KdVWkOz|3+xO1!!R()k1SMVZwxoHi>(MuuQbH1l07 zI0z@`)WbvkfY;E{+?FW&67|(8q1l8oYX=Eu<8CTkf^>{Tvk+~rs4>`#X5M=gE&Gb` z{Sf6Hd349EO{lAi=B=rBJXwL3X2^sIldw#*NvuUnTXi>@<3%vWdE3zDt{gzE*q4K5 z7JTgl)vW++iSm%pK1||h{c57S;_pN=tG$BeEYe)e0Is3oQsW`g8!w&_!o_HFe>>3T z(25CX?T&?=X!B}c17De4)Wqow*?a{ing9>lLU6r+F9;_I1#LN&iK6@rntSyVXcPb4 zMOgu!YoWTFLwE(%pQFV0PD6SO3yccFy`*wMTTFBAk;qkE6z(L4onV?5&xp$+~;3=%@xX|CB_O_(rY zLUV)&Ei+$^W{y+Q=D0#y`@=#5t3fmC`GDW(?h(!DqL0|8U!!a^kq!$XrvtyZccu9L z7Ln%0E@+MqD%xCHsJ?nLUq3YGidwXIE|qYSfVw;4P40!|5*GY2$ zBhZ`}D$yJ`))VOrk}b5HR6%h1gvy=J5`lIR^;u5j666@jFO7!tUZBmboum5=A*6|C zcNNcGM#BJT(AI4mg68Z}iWX$N)o^YGnh7*doU=w~kS0u+Fk#{(!OW#+!bBT|IfrG! fgb5QSP6_-!;a0pvpIA7(00000NkvXXu0mjfw;w9@ diff --git a/doc/source/images/instance-life-2.png b/doc/source/images/instance-life-2.png deleted file mode 100644 index 75a28d40344a6e6b2edff2062d69b647ece6ac94..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 39847 zcmX`S19&CR(>5G!tW7r9*tTtMY;Cl$H@0otwr$(Cb7EU3`Le(N`+V2YT&Jg}x~uQ1 zySk<(R9;pb0Tv4u1Ox;@QbI%#1O&9=>$n{n^6Q(^vW)xd2l9`UxCqGSzgJFYas1am zFn=X9>_I@_Q2%{ELDDiXK|lyVBt-<3UDnPsoil!jt_^oD^6J|Mkiu0!gTeacr6~0J z{OMuVLp(~I?hnvVo&dGyu;;1IxKY-*%Y?Bl0{-a;YbJWt2P#m(dh`r|>sc9Pxf^nNv+p+; zD$dnyKikG)WVdQvc5my%#>MT)e|`X`!KJH1c`6VCmLc$XDz;lrkMn~Ox1AOYLkVew zIhq;@Ndt)y`b(?Iv$D^*9PH+s{Nzde^DEHr3WF?BRw4vhhryoXsC#`SjRzi#|LH~nW){x^@9J3 z$#Jc+R)B%S=48$&xUr$ZW1rUu30HBmlk9p&A2>J=it6rAt+gc;Bs^mJgL@+)Wbk@$ zKSdQsK`py}IpjMA(6?E=!A@j|Ff3XqdrMV?4lf_+}J} z8EhEe^;#f%`rYF=hLG9!a%=u^=X(+q5;nsJn_GK(MA9J-H{LKzv!2L4M=pAXJanJF zH1*Vup592TIGzRsJZRP5OfN=;AIC^Ma2_Die=oXmo-tj%m;4^oVt#u#a~8B!n4PN( zLg0O#N7B8?zKiZob#;Bg465Elfj_hx8yybQ_;l~@o=41gqNNzoWiT9xarfo9Ds{hx z9`_GMKnCBtnT-Mu+7(_yq5jba6Zp4JUxX#>XS7mXUhX=&KN~kv3U~%I1StMrFa~TF z#1znTxcB3-4^=&=9aX*OSI(Aw?c9I%MM_gp6Ju1-m7YBLQ=B* zLj#=WRTnU?TT3(g1=a(w7VkN|YZ zpnwl8uGz4`9LC7^{x;=vxZkiHP!Zp7c7zG=k3o3~&w!JTk=j1sK14Yc;7=?r^D^w7 zba>GSE!Jy=y`4{6ol0IMW`_;;6%;U$>s4CG!=KvP2fS|oL(zvOCmXm}+ZZ&#+RO}t z&Bra^V~dtn2s#%2PfQd9Xq4HXH{bvQeq*QmYFpQ^&pwMjFSH2DPc(3Zsxw6JDo{kw zSFoeX1HADs%X#0~8w?-Lc6nDglqAQqdIq(8I6!5YK6%WLw!SBw9@5kPbH@TC9NZ-&A_o?o`_8}B38W49x-L%w6V15E(Q z69YhRqtk3Sjzi*4If37(@Gc&KC46_3aqJ#9>h|7b#k0C`3xtZU;c-!yBk<{DRX9iT zD3(0M@0oj)^#UZSnd`a&5+_yn+fwc8Xm+w_hC6elv=}`3n72BmYF6(7KP1*+tC~~+ zckE(m$>J91UfZN7)<2>gR9j&y5!k>8t1Vmu0KNXfQ^aNd^0Ce+E zwiDol7m^%HBYsh;QBw3Vc+9%+%++|@Ob_lJrV$N?RbE!!LzKyPHKlW%9A!d9fFVA# zRfwzO@x1@E%?U4|17|zG7v`GDx4x;?{zEGhTh-ZIa`WP+y*RB!sU)e--6#_&<)1rr z3QOIj^IF;j&i3^Gy00>Trj&K-B6f7tzB8;hnQ6|3@W*5;X+K!WN{W60Twef9`mgPN zmWkeyzgjoK0{_q=G?FGQ#r#2?ZOM=lS)gLU|Yy{3NiSlx<4+p#L^!pFZe0wAN)B2K* z2n3Ky1l_KKc~wbh{Y|t57nX$IEVzGMGgb;(K}JAAn)`uK ze3KP^DQlWtQ|F(%NafFET1jphftBGAZb)Jku~6`yZh7=(Q9uwX?4bguv%TP#ieFr% zz~4wJ;k1b&tMtxOG=5kFXzu)|jC8$bp+Umq`nC=doqs5W0Ipgw8LrJ|HWyRryz+4_ ztEp4KU!(4Ql`9UsfVYq_Kh<=3-NjDnrWq^NeIIjV-x$9c#+{^VKc6GPg_07(<;Hv9 zYy$ArZwPdlP)C|*>!Dw#O6{b(>D400=QL(SiFuMp2MJb+dge*i(vnQXTGw&@G?LA@ zTuW;C@O{miT3)vES@(hS;gvrW{NGB8U5yW}8QPOkiEnET6KdJ;=IhGH*tvcmzUoJW zShytuXAhs?1m0Jx>bu4!%b%pMi2`tW>a@F~ncT3)J#IUWiORvn%))zwu7SC6!% z2{2c7*UXaZ*R9+4SJ0zyr#ZCX#m9_kGu8ut-yLced1DLEjzKt6*JLjR+2jCCWrQsz zhs|UtqHZ~X&EU&hqoNK#-?udmr<#x7DXV&~JV;hNm`Hd$4bg%V!15;RIz4_V84|G~YZJ9j`EtY(0;4kNDc5(&~hRHZZ%C zQ5&*UhA29@D3!2z+)?BC&p$n*?)_CJT7^=WHI`ah4E+3$p5^VkG<8S~1Y17Y@EmRGRVaKsy~Ev?R)93{`5&=1BPsHg9kf+5eI zoNV4&HM%d^wXAnqOrUFGR)?mxn9=RI3ZqpqL zBtJKzEzw+g+Z(UaABtEO9Ag_9>#N-#EcqRJh6N~*=nyQqzR7%ee7Zkcrr_VAq(I#y zEE(y_Za{aUqVnj3!euct-htKQxGo^;6)cG$2i-AC@IHgxp;8WcUvm3c$$S|jLUrlN_WoSn* zIEA|)KCwX0=$uSf0ojJo+dL04T)X3|FQK;cL1^QyXOqq`QI*m@l^*IaAy&wFQBkx8 za~dn~iNuVx6bE1lNM~@sC*qP9`_KjzLS>(!hu1r6Z4`AC=4+`Qg_3}w`1>Xfs?)zw zc*+~mO#Ebda7Smo{`Wfk|KM<30@!upJ>C44$l#=%Q;lvZ4npeH{ptUggy<}p?yj-9 zL&EV&2L*tU3WF1I+XzHMQ)bKoU&aH$g%eCxLof(8W|huK`yYF+A28-Z*3O>RhdKE_SES-iDX+Q5t{}#;mHlAFZ3MslZCN)- zr&;C`#$eA{C~U8~awe#F0wSJ=!p=$;zNOxUo2!bbvzU18{cjy4HY?rY`;nk4AmmK?x^i#=UF%gLQY+ z{~ujCV7~N(mqhHm&E0TvmxuGphf}0c=ni35edkLun*69N(bLfzr?p?#(lL}Z01m%4 zR4Wus6l?dnknh;^z)+xSRHDb>=L$6+vdxM{M&i~; zOdFTG>ebs<>3Lkf2)|zp3AJ%XKN0uERb{uPxc zF1w~YCz2XWcslk;^LR-{br;aLG8jpwl(A1z45kV`XbeXo@|XKTZ$DLUILiHwAYm-h z^+BVAYI;5xK^AD10HDM@3t_~hb~%JL{ge+#<~-xmWp-CtLoye-%W;#4J|24X>tI^P z`EJMTOAi%zQQ}4ucrs8Ef67CZ<-15o&WGk&Ztf0N}4z@C3MON|I2+wXqkb z>M#gO1W7r4gUmaF*#8U8nrev0c$(oV5=ScwDY_MOJu6qS?F?3vR0u@a(j+I_PeJ^k z4V;|%Zg+C6T*w5Gyp1jW#*#c2G>PIe|Y zRfdZ+rJfK;)&9vZl!)Fcw}B<*K^ylk{@PQ`z`m3yi!SHQp*ubbT%#oXycSWu-!|Xp zSJMR^t8#L>uX=SSeE94?d(N@Rn5FZzZXnlMWP5w1|713HV0mvtqAEFj<|wtnq&`Jh<*1H8>9lGIC6* zffoO>&i{N4^+4bCF6i}ixrwJ)o44D6MvWz;vwZ*Gu8i_Lf7EbA(z_H^6H(->FP2jQwhH}ryk89;LFtXxi8*o za427<+qb!$dGE`VbTmIi?d`{F?GGok&mE8P+> z8knhY=R5z@?Q;YrpzNVLQ3RmD?gnTPVD;K)-Pc!m6=&FuMV0)0KT`cTX0$($c_)yoam=o~5()uyB-$$m_kxT}z1w{CBu{NGFyftqR#cImW`TrN-|J_xj?byB;-~ulh?S`a^ z)-6Wf{VtB9u$Et&TfMK&6`kWBp%o$lp*ztRB37%ZdA+!FaM3n6BXYZN#44E^dhEJD z)$}<7;gocSqL%xvQ5@@uVrylsqjS;N%#Te?HAdZioUu@jtxZ8Qa@3l< zRg{|xLrE&T^iZSx(rNxvsmhhl<7INX+SC#jw>igr*Z9We9j=<&O@s)GHfgc@2Vm!fA(15XEb2gz&4Mm3=I!Wgd7{&77Vn2-{%4;MoD2-D*F+K zDtaflBm!lzgrs)0Zqg=9Q?ZLyT`HLl`+b&t+smyN@bu8_F~)e_btln;&zkT#@g6YU z^%^<}Who?S%cn^7sbbvpSaiq`-FF5|oVa-~qlyuOV*;Sir6jz6*;4 z*IQV{GiT4xu|HX@WQ$KFFDCTW2W(gvbiY}6*x8R7!K2$ulS_~AF)%wP^$@YTQK2HY zL$i-)2+$*mNI94i(?;uXb%5R37vfp2%a?17dw`;%L#4E|w6hND<6h!_96o$g0^J40 z)jDM*|8m0>Um*sHBzl!qG-h|E#~r0fGmE?uHGoclfIk@<;?8xISz@$= zwCJrHrkQT0D^nFZGB{d75Ib4|k8iNvlD%@i+8ueeRJk4yUbOi+5VGqJK8(jkW^#N8 zT&<`p<`UAB7#CVY&-T(D%#7~DfPu90vNW|!TbNlnFd}-IG0T@l!eetv$Wx8Q6{qsE zmSOw>fLd2s-u$@&k=SO2R7uF|@_JHyWtL_WsK>Ku`X^fheZKRC{otDoua@1qzfB+p zeYuUPH6rf(+}v%`@&k{{SJG&a;hRedb0FI_IF+!Ni9S&cJlcCqHzB-}l#~=XJU-w1 zWEQVG9X$B(ti@FSs;8Bqd7b03m%sh;vBh(g@j%X?^@0Z-({%p+b__XAPA_~#e3%09(HnCix zN&2BXQuqHF@wjuD3Y?SV0Af3rvLz;%H4rP)?jm2;yxsv_2-srgF0GRB!BW2LWbGBG z)WAL9#rr4l8blt31`3v|^eg`{zEl&{Rnk}+b&nVQ-)JOuektE;o9I#NeYZfj-U5F{ z;}~^k`R|gwk)L^W3C@sP5VAV{ci*n@{>DD3|C810j~yt0n;GwBNn#$gkhbuEv;*D& z)A(N}Lhv`xYDY!w<~rE%QIp=o61~V@SGE6j3YM0JnmP$^^C%Ii1*+Y7pf(EZQEn0c zFE_b*O`92iXv$-*UAj0pNb&ifGF`qHZPQ1SYgqT5#awi2QIK;ix+(NY;=e%%vq=ll zs&yApfSqapalF+)p4_AR|0O_q8Umqw=DBmTP?vn3VdZ_%(_R1nh8G7}yEPM^e6_m` z`}>#wO;cF)9vt0SQTxtEu>5Za600Cb$XHly;uFXJ#AvO2V-<9knMG3<98LmKP}0zJ zEM7fn6&4o4`CwlYf&aH~!rUn#V>4Yvm`D?rM=5%;vQsavt*wOp3mfgJ+?7+rw~cMR95)*{t>{(3|LL4;~^sOjQXm?vUW{$V+RG8f*KQxGu5oM%d0N?-I)zEPNGbj4VWeN5X$~Y&Di^R6?*gM)*=Ym(x(qwdq;^Rj$m0sXnc9(>y!d3kx<3aDYc4tQBYFIY&^ zb@XzmYvJA~eN;IHUd#Ll`bNTZ=3S$vGRC2h)N}M+)ofGcBAlbmV0NmC-ZpyNo*qpR zt3tZoTAPC0ejg=(`F4|4HTZ<%xK%#j{gtJKc7cZ;!k595FN0;5tMV# z&pg1Mo0lHzQqK{^jZ|FG8X;t4xeHieTu@x#TB!FuI2`;fbW1;EY`~(y+^wK94alxD&#vl0;bN+839so14%Z&N=|Ex36^q_7u=J)Q zEG`fyra0&zB5ZM)A)bj0K0~QnKQDao?`xwzq@mBGn5sxwTXXpNovottZwFdoF1jL` z{+1sWgHKj@>ed;rw%f}~s_24R&pXP19fBd{3)Kam71>{4j?kSv%UP@U)r4y_FS`@G`v0^h)?cb4&HV7Ebmf~KCU!c zubyiW3E*I1vA}(MP8_CCD7>Ad=dV=AUM4`weqWvFYtmKDFqMfW*H+mxd8!*YZgLyN z1VKzWTGjjL${!AsH_+7im@i~>6hAa>kGBlBKiEcF$jpdU0+yC?9?_K{lh%FD9J7&5 zJ=ss%j9Y#QU2aufxkhJB!OKrWBvOs4P342Z^cAJAfmI z0RE;Ie12%4%G&HyzYb?!aa9Y&3smtvN%8*rr5EGXQtw0Tuo-;n_UIT%;x!w zGfkS!YX4&!VrzMMdJ!u+R#08bqSA)iGt$RP0PcapJmvB?3I}VxRoCpnZ2!aUd1Z8F znYd7}Dbw3D7Ef^ZDMn8um7Ww3D6->$R{K-r(1j+F`Kyn zhRbWtsQ`EAeBT7+Q}3g#O=a9jcWiGnA32AhUc~LE3-{#u+S2-SnO7oyQ=*bHr!fee zrByo}pwql~`Zu!cO0FK~F7ndj60so_Plg2Va#oW;r)&}qhNbnAIagms@I|=jv^#s= zIP~s8N~IgiN=LFSda!s>pZ~PGoBU1j9RE0ZpBp%*nzt0iWu92&RXK8^LobyKX$Kcz zg@IW_Gt!vZXDkd6NSySuPm=@-@aIf+&)Y}xucL>0kXf2I;OJ<3ZDmDWQ(e8MoStVS z$2Z-n#V;^CX|~u|F2HnqjL)u|)^)eEeb61>V`6c6dlS$e6jb~bNE9zFFJ~9YsNkt0 z+%#HUZ%y=CKy(Uq5DU zk!Y8hpc-wh%J_7U7Pe>XDtOP2;s%>OK4`BQ8FImou3$X!0D455E6!=q^8Z;WkH(H|(2axht_v){^*_QxcwM;vJiAW*ECBM8&9Xv=$ zMrI2v0p(mb3ID0EXS1?Avc1ftUs;A^G5zIq-%iiY-mElNmpPwvI$bUvoPV;I_E4!P zHo8!HzTQL3+KQS@RB_jw(XC_CcixgMZ@SnRd3pl>k#x4jUK-$LhH95@N5cxmOBiWl zYxe3JUFx;MkSl)cV|7H0stUO5NMZ)fI*M0?Oyf|zRh_$*LgAZ*{ielhXJ(O%%LT&U zGwrLcx8|#jsUA)>tq8;!(sfVknE^a|OWE(vk$&ou!g=4k zCK`dLmHLML53g$`XBN5A;o~?}F>RoA8?=0qaXY6fJ zm+V5d7m0SLT>|4A}{9^z6`Kzjq;EBfzNf z=f_w_5Cky6Bx>&E2?B3{bcoFVtT5kN-YCvD zF12T_TuIa7aA6^^1;qutp!=DA~W=KHAmc@gi#EtAXKKGd0Cr3z2<$HxJ^f^a5 zC1DM8BAk3(^e2fDk#v!)Lrn#N`quPq6P#BxyNYgsXVFv*GN}LtFtF}?H!A@@(!#pC z6SZk_#R32{!^XDfPKuukojlboXrKP-u;p#$lHwJPi4KZ-hQ+!GyRJp|mZa6(0w}XZ z_ZNP()GE|%K_rgSr~~M+)&H*+fPN<>|ML%o$Ek{`nfYu$qdE9#nW)r?esHDQOUFUU z$koxnKBCE7E)?d|#wUsM*4_11k=aLFcJ`->fx-Sa%Y!=$F0-6>xdgeT;%%v?x7Sqk zb5wc3?=X8SYTX#2g4O{E{BV_n5((XJy|P_LpSkav!!dZV%ikyFu;ggRFdpfFTSw*= zOSkY!2mJRo-NS`|$XO8P_`;HEi#JjQoHZoMseO?wjka`J#U2MwszPlSaKZ z`S{>z(uQ@vb;=v+F~QaW(KKs!n=ZP~IV7_%yv2jlw^h}2H10oT$3-%+Fx>IF@hfRW z34%gQa2k_~SS4>QZ@w7d>2`&-sh`r`6Q43VfmejvAlnMPvAvgnByu*kCx7_nn5uP7 z2K9`+dRO&SAr~=^&@{M*3+CqBNbe|qHtz|2;$_a@NvY0N8HtLDI=^^yamGp*e%4QO zrP6;qwb{~#iSwYb*t&Hv?%_u$}k0|hOpEOP7Y!TGAg=8#R6p)}T z*pzFNKz zx30*flqnL@ag<$C*9Q~O?sr?%ntR81MK0cJ!bW*y=?Buljjxs+`;E471ibPq)N9*{ zwlq$+8~7ewRam=Q=yWIVMY{w_BB@B3MWysU1v-cSgrAF?!8i{8Cma1u_xk6%p@@OW zcU4&(ovp#v*4A*Xv~)S{z3b3JlTUXUY0fK$TR6BFp@Kx~O{$Vo?JaY6cT#dp5}fGD zTUV>Zxm%{H3^%-N&-*ftYoD7g2@*S5YwOE60Ro1=#Oe7LW&_Hx^G~`}B?s+D*sQt+ zT6!n}&JkWm?4We>w>w6(NE^mau0OH1-+rQ}pm$c1^()#3CTSey z<-x6ErTjRtBcpOuFXiLgy?O@&w4Viqx#5XYRe19An&K7E{=z>4ZQ(wB3ymq>38iU_)XeF@@#>(zcJy%oOEqd^|cG#+}&lK%X2OH!15H1 z%Y+lOR?dsQ-lk#W{sdfRx~H zSgvY>l!9OwB-@`bQLdaf1to%1cA>ObQ~i(gS=({O;&Y%}j-OVRfanKqe0DZ}XKih* zVRq;6m=ryi6g*DN?THQl2T%I^REN*gmQG_M_B7WKlbBT}nm8__Z6!||zV}C>+42V6 zcG^o71qFrrci6xwN1UVB#14AAC8hhH?wxY+gH|8}5oB5f+F!@yNg4aTk>IG^}V zJp><4Jr2H>{{~htXi_LLfJi(Evk3J#XH1veYWq(y>G24-8ZpH_StY2qmZqkgji;yQ zwpG*5UFptyOM^EnZZ=*ES0Jp@iV;}1$I~vA_VasqQSGYFFIleLkj7UE{+9!|sutt6 zyVWCItii!SIBYDeDEm~uuM#Q|m8we7;_A?jvNX=Ym-?pN*I(-Hrp(!?xk*_N9;5eT z1b;clVpA?rviS{A5||6 z(jy`=2R^VXVTp5dVye>T`)nXOv>L;}Wyn`K$Rfzq<7)*{%OXMgi;LvXhK-GVOc^C- zSe>(bZBT92CB*4?2H(Be=b^l($^ByRG5oJJ}s?U z`KGJrg;H_s%p=t;Z&z>BChcN60#iI%(C%zknJ z=Yw2?Drp4o3C8~OPcj?5kH1wY@9^G_)*d3|U>PKXzpue?sW^1Xc zI=O#^kzT$60r)#F)^3r&f!rHlZvq zA?(A_v`&*ln5Q{-KLO2MA!0<|r;8gu;#81H#feXi6=QR7S;?_oDP<%U6indd zlYkC8Jsb7HR=Hl|b^)u(693sv;mPz&5jLW}O!YfA)rQi}^lq;wopCpxI|?!=@BoKM zHX$SerIt7#1KQSX7nO{!UmS#=lPH)-m#^r$Fa$0*I@xpT!{#^S z!uL>jcYSi1_pg0Uwv7l4MS~b=;z_l}AKJ!bR{G|y!ok)|up4xY0fV!SdF^xe2?_?X zHDJX1W7Gq^gODsZ^SN0qv39!s-QC@Eqt)A<3lAR0Ws2n9``uJduGhEtD84|A?5W@R zZ``LTN}Mix5QtoVJ%XoS_QGvA-a4dQKQpJMiO-RKh*@!)e-&P?f2*Q0R-nV=67UXRe)0_^Ku#l=^> zgj!8}5I|Ti$gqf@c#yb;hYZ^Gtu4;vfNVNd<&D;z&zWh3QGS-Qn9=nTc;<+$ZFz8% z@Ft6N50gRO_Rym_r# zf2qRed3Y7$b{48U$;aSG`w1|`ZLRlQlWVaQWE}M)=x3Ssd%ilyQ9jRb*gglFVtk^w z`WdSRDL|z;j3_JBivhf1aAoZ+J8M< zHka*qHrp$ch2y9+KTLRIUwYK;hOux1kd++w*xiP|yo8vXpjXJ}o3#g;m$SWq8?v>< zS;Z%(x4=$-xk0r3(F2PM%q4|)-xV=cPvSUHC!6;4@U$e_{&}F`e@91sY4X8aJu(pq z#zz_2BynE${Imo@KiU!(QEEz#9Ay$nug2_B8!%sC)O?r?|_rxF~5SyvAO%l(JUnyqg`J8;*F3eyT2Gwfzy z6;aKJbH9oleI{bV*2uKXe)H+iaj*F2K!e2vxex=hxgk%>P>7TLzg(5da_2V%DUb*; zBLHS(+F#3D<5qG$)NPDBGNNuMkDh~8i!Q?_#}5I?u9S0pF=+$13*?E~#6KUPVT znyHp3ujQQje#9@jnAcYD&*%VOv#pH%cSPF2*KqSHr9MyXyo4}R4<&0-y=BgNS`$%2 zqm!RqWm>63{_%s-&n{j~=s zsmJQQ39IZdZ6%%6?%+t3;=*z0?KH*9*6gj<)~)_y5JX=x4MVGY5QAerqU<`}yT?tr z(zbM#TkTaR8lZHtJunjOpvc5P47phjhQp+1Lx=4S?j`&Zd3vVC;PC7;kq*S|)zfP{ z>)x+=d-fDWW7p=d>_2+HJ+pkw{0rUv{`{?IwKm4ho1vQ&&)TpzrIjI!Rm| z8}G~Z7Oj$H45O_UcDPXNORSZ{tJ_hD$KhQQ38+(VoE4o~L#PEc6(!sf4isZ7h!Z>gT{merBMrYFG|6JC6Vy&-?TYPo>U_)IU{7G*m6na1LRULF(vdw zJABIyNj;R9)89^((1vvDa`E#S ztu8zlpQWzO(MwINg9q6OwQp1Wo;l5@(`bmz9laVpPS@#U>weyJ%La2GYDs-clgdB7{kwVQ>dR}9g^Uqb zAjJHeautqzZ$x_300V^Wdaj%352zR`hZpJjoCK!G4#j~)S_D zqKRG;J~3efr@#^^S5V@hd;RAao%;&~AzLrxd1TDj57mYNb;b1w`nxjMt9V@v)y)(Y z9iYN0+mc*gnhqyQXU*UoeP33_u*Wg$WVW>hE?C?_sAbtNsmHAL3ke_17c1{;&)82M zZ!B`&=}jmPZ#^Vr(-$)+D4o_5o29NHpP?H}_0_b5bAN^pI-c`mTX(4|&epUq50K6b zzG0*V2g&UB3LwQ|(=n?`EmM`UmYR+LV*T;;)uXNI@a!iY*=Ofa_D;*`v^*{H_h+2v z2|Zp;f*mT4*LLqNyPmwd-cPadTIk)0yyx*Fq=rM6={|aYD*&}-oGc}f?zW^rH))C+96+zKcrKTGSjS5GpF(eDkw3q->F^Kn6ZcfyB6w}d!b(1ny7dlP2!JKf|F1L- z`8-|+F;1QnJQVrMJunEr`?^p}J&nMEs1fJMJDJhK%dU4f+b1)H$GNcF*{M5W%Tnu^ zO539!v?2yIdMZkHy@A35Rc(~t?lxzH!tiC_>8VFRV}RO;ar4;hz{#Z(1nQLrmVu)g z|1;y6LmRfO+4a~Y+q*Z_B(uNx)7>W0YsSn(+IXgClH=^E_^WU=Elsm&%?-*6_+oK; zcdIA@fa4$#LTJ2;1W0*J)LX&fV1L~zh3#`gd>$X6b-x>F5M~Gt{SxcOVf!sHshUGZ>3pkAp+b+Jcw@NhK)#OHVSVh_ElP zxqDPicTz6?-}Ygl(t@zQD|V4!L2+t`T%2z_rp{LaW_)i^eGbxDHhjWtSFwqV7KO^G zjhUntA={e*p_NF9`9x#GD)6t;-{T=*#KoCRHJFSz< zc55#7+6Y7~9ZVT<04WHS6Pfp*?1wj&;Ly-(`pX6B5bK|ra;lzQ@FDw?7HpVTKWc4? z*(R%3Rm8(PIldh6=*w~-csKCT9g%+zv+M2f4)Lu31d;Cm8j;tKC>Zw@1HNAbv+FRa z$SBWg_vuBICoBPG?grX|n4r9%!)^D`;O9a$P7IAjgg!TuOpmegeylZIj8v1Mx%#MY zlXB^cnwTcqAZNls8`MadU&^=G6d9?BJW_MiDD{AS#3;V)SQPOv!=}jh9KokJoVFj3 zj0h^;8*v*C?=*PckKSgtWnV$ib(^`3edigdd-4`UL{1JIdjubxFtkt<5qRTB=FFgg z;RmXat@P^c>E^_L8|G|a9k#|cCA1m*Ji5jsqnGdnn`tLvZ z$cqsm%@gi5F?Yf;>30_{riYQm)+AUK@f8Od;JZFvwE_rN%%5v2h8+qVuC^kRbzLLt z>X;xAuvT{OJoxaV`ETf>_+D2I)higg96r>($vnV;E3Q#)mnN~9WpljRkiU5WQ#p*6 znpGyU)U33USSRzN_#p=q_pQOi5|T0gJ~DC>!3%O2qegLJ(zP0zt|sU#BoFY5n8RnS z6mMWs)}fM!#7m4-h=6esev?rM4~hxAkU;yjjcITbM!|}paN5V1*3UNE5(zLA!N<~6 zhwV= zDpz2US-504UTafItz(Fa0&?coBM16rV$`r~V@cwEn4RoKBA3R>_odMM*kxHK)I3LK z+#8KS5;r`yFWm}=?}&on<>nRC8Q&W>b4sIpO8(l7_P4e2XX1p;Sm#-v|pg3Mjh`Btb4uyjnezRU^dK+E&33Tye{XJLbQg97utTk1WbafoEHNfO7 z!3HypSdo+&M5e13HrAE2mh&7XsS0?vi&qtT#0)iHHIwwr@h>1X{vNh6kXdjCN2ny( z8yJ4P0rrdRl1k7ps-THd)_$OQG9y$t!)Dx7+lcb&ue@Ya!4Qzx4N z2iJ|WX!*E+3depgEFvyH;fG+WR2g&56E@u>wKl3$Y?{ExKrN+^)I##Hwb-6VX{a2k z-fi2@NVBh;6-t{DjGjhCk_PQIQ7mTPPL(zt#C-4vFu6GMxlBI^o`PqXl0^&_JIx|9 z7+o!}8ends9lKa&s+c-h9KQXKN`nxswf(jobv2>*()bb|S(DIR_68Qen@hLR4qTi% z)D{=PfrwI;49Dfo9+ymGQjV3B(X2Vj+IM;CL^3rsCmNq=>NolXJ{eqgxO#l<%U$4E zy!z`CM#vjX+Vpy7I0dx4zP1OFFR`1G02UGZ-N*qd8+LIJqGW=h1-0a@nFTyjW;(xQ z3!|7(3JL+~l0>%*tQ;6Z1Q7uPbK9u<*2E8~#>k#*fqUgcX>4=}To&=mEkenzl>Ng-og#3e~kET5>&9sNw}36Qpm zx2~j-bZ;Z6jwMniUOo1ERVRicg8MEYDyvqt!Fsa6?{ktU8QnRX!ChCf@hlz%qngiL zejPp})!Ktt+0RGB#J4HhAZ)lgLR~4sSIFsQ@NW3|#wOY=(KgM9ncV(O&DxfZ6aCOn zmv1aoROm|$m>~rhd^elG(upBRUblO_Zs3spH1896bEWLf%lrF9LATx0=BMQy<~&Bk z&m#JT1`l5a?FShhmW|_e@Au1qKK(7%PbIkun+}Pwa+x-CpU1lvn6VL@<(7sr5k@9x z8p+=wEO+{3W~90Tv6nn-1rp>8;I#D%T@b>lMEg1g0tDKQ;RW>4(CXi#>h)Uf)Kq>P z{e7J^h`TMDUOBt_Xb5Nol4-?T91{4Hpvn}Nur#jGaxg7gVVsygtjh4E79`G$i>4U^ zqRx=bKCyphw{ZW-%a@GJ6WER7mSBC8%iRv)ViaI#nnnzreCdGzsY2`ede>Lr5HG`W zKis_3GGoXYXv=JAX;ijQ;QnaJDbo2snN z+Lxg0KR${q*WSz@62WiTdADzU%X%|4gQ<+wxE?@_-rxT8N?lu9Yq%&nX_lGaMlr9d zwZQm@!7$gsbJ*^8HNl;pbmj@$mym0)83GpJ5LVehV zk8pKI;inWa)y4to<{)5wi~&5()eC*@7Y8lsHfq$QbespsK%j9&XeXRcnLhwcpA#7}jXoIqN_h zKjH=>HpKJ_U{HQ`LAT!w#E{;7T)!g0iK>V-yN(N||FcvZ(}vZY!PYLDanQ0`Wkd#&gs4J@?Bp;&07Y1XvyWoD3Cpep2Q~vb`CF8lw>bF_~D( zSn5!Ic2%Dj^-9xH?~dC=lORxPuD-<=n8&cd#fgQKI@jos-#olQXdzwfC#iW;Ay`QD zMC;GA*N_v`6eJR5Kq$a8Q_^>mK#%?)ifjoOpEfi!m1zkAnyo2O(~cWmT8Q$T_^Gr_ z_)&JJa>oz@Gfvg5ZqH$O<5#hNwF)1I-bU`@O0luhhv^IkGTlP)V66J?k&!spY7d$h zufQk;=wo6A_52XKH4-ne3dLYuP|wt=zi$6}8rzx?UU!8-8f8$1<*PQ-_$bqP@8b<$ zlelken%=T?|4~cWB*RP0dNQu{C2GDFFc4(J83O)@+drtbbt3r^38w|-)z5Ie*oX{X zV>!&)NXF~aXKDp9V@Oz+1+L+tQzidssfx{H#Y0C+&Dmo;O+%sNL_*BDLT0s-HNr*j zqrkNSK>@7)zGUh%NevlqQua_v^HU8;t4SZla(_Y0qv8K*0d&(~82lSh5Nrwp)e0;$ zN`oot)Egy=*a-c7=0+*>`DLFf8y*c{`ka1fX@cBK8~vp?WAAw0@esau%G=<+d>DD> zKW;TdQDpn0`BTA&xPs#(H5c^_O(8W1R*4W*G5>`+F8YmVo3uE`T0g3X<|_T{ODy4>SP=ft`W9fqj4> z04Hz0Z*sNRxU?~(A`>ISIhdL=m2Qu6yAli}j!4bkPE5d(=xpu@XOff{xRXr^3?W@h z@(P5I$w)Lki%aD$)Sb=I_2@X8hOID_yY*TRD`BK)!Zus zX+=fj@_G42AV^_x&z3+=0fu4ZN+}J1Er$(b)TmKB@ZiIoaYo0|ED0f^j_uf?=2~Ao z)|@cZkG%4-^S<9{_x-n+c+fr!8#<_CFPYEB&DTt3(g_!`_3**;@7v82_ZyxGFsr=U zf7ihnI{lH847f-IM;Nr8LzONIq{>{O<$y>CgamUWbD(N@gV3@oUF&dKX6!22X3~Hdic`8 zmX0Q?l))`C3J@~1t_)4gXyZpF=TNm)q+R~4)MzDB+KN^blv)4EV-wP*X$hBRm&pC) zr`$3b-DJyK*|%ufdhpK5HZ&-w9W$TRfs)X)5Qc$i8knZZW6!+7y${de*i)`x&fJBV zn#c@8Xst^fn`k@7wrF0lIKQE0ZhoYtA%Y}i2s0)yxa5*cxap>wiN`Ej&){ zp>Ru`Y^a-`7mGFqq;yD>@~-7#>LurM_IXpWtd5T%`u6F;*=HQdd6(W!JZ^amu;CEZ z7JbHEpH*IU>sH9Vy9`!+&rvDegeyCRkt8qCDwJJh%=a{KPZrKH!i82^R^mFsHMzJW zF;SwuZALaZCDI%wNwZ}tSK&saVNI_TdSo&g7?-v$uZU^e9Lt{l#xS6;KYq>FG2&@N zMMk8aL3Rzx(W90`L`!N!p)*ssw5+63Aat%lAyP@bGISvWq4IboG&6m!fzrg3CgoN- zB!QZ8c)i`VkHnhSUW?S#YBe#2-ppP-bIaHuXX7HK39D6ssw1ua{T93h7e9h&># zLk}CLoq9^v6CKBHiH4g*xTQ8vwmsw=de|WoK7IMsH@3Url54r~nhQJJa0l+BHpax^_HaClvjm?c^rSpw(LHtlFA-M__ReNZp#dpLyAb;gWhBSgG)Rl zG%G_&ZK=`nBU8(A$wR5v*D?}$1htH_GhQ+&Q`Vsj#Vl$4NqnZyCzeSGm6U=mO-pE} zSlmwPc7b*AY}P(uO|RSz_$M$dMN;M5H2Qem6a6p*nO9A(Ute|*%v=@&x6nL*%v=@+l?1d zQc~QZ8?@qKpY2#aVfeW0k||XOPCT^txUsv325dG^3L#8^$Z;I2ITmRap{DA5se>un zx#9Br?!4ZaF!5O9_PZbEj8l&3aD!cU*=Zbq##P*M*VCMH+Mymhtbb(HN?w&0=W)WM z9oTJDCB3`kV;U*njs#+HhnA?FG6jDa0sxP&IZ5j0j&vl3$*Ixqbh(#iCd_19H z8LmK!{JdQ5xbZw5fBFp`{nu-J_`feW_{g(3>@WN9x8o;a7#&WBIe+-DE&5$Jf(2Z@o$|n1er<*V4FrL4jkh(&Eem^7Hfl`{bi{?l}H{qv_qdI|q#4qeCYKbAsG+ z%lRC0%9WH9=X2boy*-v#KZvT8yc$^9jZ041gJGLh;Pa&$`6Ups9Kvx2+mb1{k{11! zK~_ShS%jn%X+A4M6NTd<2azBz($%AOhs|QuB}cYXvdO@bq7-e+`e8xh>*wp@IjtQZ z*7V9RiPy5iYE-G1vPd_Pg$zWd zUA0ZeX6qm8HZ6*kT6fb2I32MFi#Eust;R3YgylFkGk*UBd{7*4e09TGKHLapLuN>P zI&rmOiaYj*3EY3jr3~48Qx+^(%=9}R<$yy^<-Z?)(P6@pQgYH?kEchE?o7SnYHZs= zI!?&8qPYE39V@bLw~8*YCm+6z%dfnJPiKAI;RYxw%;&!8=keT(_jvKu4?MQ`1uQtcL$*@4VxzsLucYJ~OxM-tDlBvMlUU7er7{RP0z{%hz5KlNjSK`5KKS zF(%O@<{J}RBDTbss8M5!4Y33Z_5#wYARQK1V3+N?x6PdM`(w(Td-v|LiwH#a>}y`P z&7C_t=gjkY%IA3=;xg9liziY5K#0!WgS!kaeu*f8Cd3}QnEAdO0P3PUA=VCgCe3m2 z79u8l#Yties~p4&Pu|Vtmz~FnCmhRp=bg=>m5}aCb3Gg`JBbdpXXFtGUg>K~r!YV?O(L&sedHPxH zu9~jwgyLks5opvGj-Tx2j8yg)hwLuB1!?d#@EN~@j5j;1KlPOPb}W=W{@&v82h_W&UFkib%Dz9e?Tf78Dp zh?6S%;X>CPxS)FGje8|ebk6|R)QVEzQ9vUYWbwt1QxYr!AgZEN$F|ZY+tXfkyKS9& zQJr?rNd@LQTzy4nf1Rxc=>ZgFe;?0pcV{J}KeX!BdQzPndtksyB8`|){iR>uq;) z{E6S>)HBXkx8MHfiCed91!X2}H~jP};_GuCPptLWK-TyNdFWbOx?|729(d!BRiowd2SN_Z(Pu+bfMFkm_yoyUUH$-TU zn(6Opl~OcQDLM^Bsf1Fk?Dq3+@K#x*3)zXql`MBux0UZ*2`tskrPbN7OY`jO6q8Jw zvCw&Y*4~=CFArE&j@4s85x_g=J;b2UKtRzZ+t?hf!BA$B^&!(-7MXgvgt9Zc6CGNW zZA~c;=p_BQV3&C9d|x<;2wnZA7u1AGd!D&|L4qlKf78p|uw9_PHX z&!D`#jIA{_%=&Duxc$zlnJ1idCMQfei{;B!x?m$T!M~n(toh-8KEwP4i+4Acl?yC! z&iB~Khw<#=Q@Q*8$GPvpe|7u+%+1N-Z+HEeXe7?HH~)i{mPqg9e>Gs0B(EO1^+L)D zvrr1!B9bi)5u~ZQC&X1kjwn~*wqHB>QI5+`SwRd7xP<*YQHjElX$eI=p}42}an*%) zN2wl&ESbh5B|6GTp&+PhF1m}(25^K zRg|qUyFi1t7_6tgz2@l^rFko>r?me_szo7EeZtA^L1*DOv*w*QnTAlUJb)sTV8&y$ zB>|91+Di)M`P7rKY%Y z&zYy6%%$JHfT5K`sjqKr3inc(7k%UNm~g}uawG0Df$%Ta@J|b^OwIr!>rF1bNta0tp<2a%>)B}R{=vMKkB;R zgkuh7?fR|U{kQ3i8aj~TqTCMqSiECgjB-y`A$dlw^k#7tlT^|c9oO_>}~tu zshaM6PkH2Cr|mmDGRTo#ZagbZqHiLBSS5j_Ai2;dNfsc9++&sco5{+xYxz0F6_iM_ zEX#{S-gx)kn$K>B5x@QIZ(nu#>8Jn9@AvComq?S@Qe>F% zpwa~;L94>NsU(InKq(rcjU4{XqpXrTXl}P&&FKznx9rf+k^s%Ea;#E|-jXN=nI@%Y z{p$6sU$U0mtQ<@Si0oug<+M_FZhGfEY0?sDW#Z|R2!?{z=RzTocI$QPx%++heTgZo zW0|;gXLnx{)BSACyDJ!$U5T#iVDIO{Z#5{C291$Mq?BZk(byQ;V)khrXeg!BqD6~7 zT(f4)w_bel#jm@Iun5@ki)$|8g8wN%zP7&L4$)f*Sn9*OeY^D}thp=UVu$O()X zK7`FRwOn)McNjLT(yD2u>*$)#s@Sgk&^2GP5F!wZ#xve{`(2)T=6SCC;pH5A%n|O7 zW@Z=F)^1r=^z^@{bI$|+v+;%-f%1i7xoQY{aN@=2$VLeKNk3ajIKi=~K|MQKBeCPbh)|&Ag8)@+1lb`VV zyUU`HNF<{dn$m(*#?@oS)E%A?$P~IysXzdw>6+60LIrd*pC9SdLHA>78t6Jw*O0o8 zW$j^%%CQue_6N-_@#X<2&5=VLT7D#y^F%#CkF*lW%JTZPt62BNGIDaVY()eNB@^w*gCH zx^6~kkvo*KKnZ1op#xaXz7AlS)+8e_m7=MpoU7oME}tlWYDz`UtCFX{RHUQ(RlSc}Wq#OVi)v4|o2Rr~f&X5yLBbdj5uKnp@Vc zt1gel<0xgKq@~1s_~~38dF%zUv$7aBW*A3L9?y{S0iYyOO0q*fiV8wWfF-5L&bmf! z`SWv_hRIFWe3QaHS-pVOgK@z*=Un{o*s)I#iTH%y4?Z9GbkH=Uu7j?lG#%yhfu@75 zVQ2!O`_YSwd9tRC<4!$0P5-I%y{R-uq9%EiIs%t7OeLB3!5h0LuY7){|K}}^JMBCV z=-4DQm)zmVtYpA)-gAJ+l$M1hZNO@%t>NGS0g4Ly(o=c=Rm-bxxde|s`ska^KKtxR z(P&gNP1AdFn5K!8GHEkS(|tcHGsNP#3mG`3+ya`ErhLf_%S5823Epm)qC-Q!bRoz! zE7wp}G15H;rBvGUGYrFhbk0>NMQcL~x#a~2U37?FNsnzT9e1O^wZL$#FO}j}g!S)# z!Lf%PLo60^&%t$5dajvsO*N%kya1&L1OmR6mX_gfz4g|EUvv2q0)9UE@G*=Tm2e5h zW9>xR>giX0Fh)Fv-v`GZH;JrJh?ib@n=McI*)zBjm zS~D{U4H$w?*ArdBJ>7RZD9u8olwL|yZBGqjNMP+3D;PDra`)v`b0hi1gFs7A5u}IJ zA*9_k7-f+yJkO=DWtB{J69TMR_z}k*F`?V?$~ot~A+L4`Sm&O5u6E&t7rszbRFom5 zMAvn6T~Aacwks6tx{lB1vnyy@pDU$^#bN~X0P#$dj8M?copKA=CZSV8dm$CH>?UMN z&}n6g`5(;Xpiu`AkH_sMfY!Mv*EJ~7G|fFvZvj=)G$|}BWa-MK6ql9Q%pT=oF{+d( zl;v=CfXN&Ga#@eI`Ci)8m!iH!IM>hlU1!3D5cqsP_q=?X-~FHB@i@7;xdkmPEl;ml zv7+HX11uq|>TKcW9rP(FBe$SGwL7*@HlUQFj+#JST?4PYHj|c?Rw^n6v1svfroZ|o zfnZ=~MMe1#Yms#xe+mS%+gqDzvYgk$DhE?pQO1?m-onH~N7Jvgs9SNYx}6OS8GcAG z6bjkvR&&d8-+c40dHU(6x$?>@IqR&mm^pJMuf6sfKA)elV-KdNsF($dS2D2JmjqZ& z;NhT=WlS79gvVa^h#BuMWn@JO1-Y5MfYr6!D*M6E%HIVV8#GNRs|$kNW=PrvTXi82 znvVqE95h`+NHC>IVMz%Kwrr=k-+-istlrp^3tA!>v=qs5&9St+l56g)4;VYT>PyM1 zC7-XPq;#NbT|o*H(ep-!IF=xV^{9|aQtWG?sHktZ<(01Mdre;L60oYOs!qA|(n~K< zO8NYLKSGEklyv~iYc_fj8K6&}KFt1XHUoy2dFIgT*G;Z9BGQ4D`?;4!nh+S#NQ6*4 zgHR~sUP!OT0wlffjbpjmb|ApAg-aQDPqDGvy<|YmrJ%VxL zs%UI%;mx-{VB@AOjIJ6^P0fy`X)nK?^VysQ^rU^D!X}VpsKofRQ$G(fDwBtV#4UIr7&Vr_T1`ZpICPYUo%L`g*ly65> za_?o2>J*7a2x-Qbmsg7xFlgAABx#r|xjGU&Tp6a4iDpVc_4>~_V%+fkR$lEAur9ge zl7}Wunlw~OOFTO#?x3u5LA@qy>cy2(vSq^-ibwWKl-{?=6w01xEyZX`1ugGq%C@+a zXIuE`d=4IYuxs&o%~l6gy?~m!2!s&C<8l0&pJ*UPFe}634pbeUr@8|ZT8i{oWU%wI z8pe+rpRl;nT)$KoK*Ja}O`{~!OA7jRhVbY{YoORY& zTz1)Il$MqvgrINVzRa5S2?I*9(*l;0qA_%EF()5e#q0&^c<194n5INZc^f^?TmM>* zJ1)9tT4{4rhIfvHZLwH4mPAMkoRpH5YfPvlV@VkXzP^2F3+0iS8A`UYyetYqCnPXW zdybTk7JT*z2ag%`<>i%6BdYuaL!pEPr8>F*Q{90s%TRgPguwPyb2;Ros{K}8>ANti z`&@LFiG4MRL^*MYuh} zNM9wHnISYy>+<@gTaezzf`S6`qmMpXxMIbM)dw5h6+m7s+5xwk+7^nkW0q4n z9bh?ZEx*o@hYV(Ybv^5=>rhI47l>iCcrXuiusZkLbC3C6QQ`MRTbu4E)NajDDP6{$ zw3DWsxzG?uWr3D1G^A4G=VbHY#*GZB7?C2JIsq+_rZR0BfwgJ%QpyJn+B5R%(^(9v z9FbySsU#PmlF9BsDLTflB9V566$c4rX70E0s>{S-zu*7uAwz~p!!X<%%ekQ5;Gox( z^`5v8f?zO+VHoty@56>g8r=;qQnK;xxJ22!$%Pghh29} zI&Hv92Jsfnbp(y`e`UooF{O0lSdL(FEUJXIlM}8S+}M#k{`cFs=Pyri_4U7}zP_QuJ{z_)k)566 zt~Y019RcJhd#U$KsuyC+n1k4`rTcFGKKa=M0)aSi0anAoby!UYLkIG@IJ#`$O<7G% zen*M+`4SedQVJE1V@6}hn6+U_kOs;y&{pzY;MSXW-Rpks>5j(Ml+KA+Fv{ajvk30Nne zeDV!a%Ait;a5zjb7)-jT4r|3he{a=vUDwIX%p?+tAf+T4jZ)rsAnna<9_m8Eo#udp z>8)Df0XH{iz?}-Wo(0S2ET_1*7^M`^Xw(%Hsgs`WJcSTsWZ30KqEQUPV9b~?tXsM| zQA$N6ClE^ysCv&666YsT6dQ#|)0*H&B!b`XPjaDp&n;C;(RH0rC`34H*$Z9Eq;$-vRFWIvPxPh%LKoam6Ez3x&`Q zDJd?ZU%x>ZMx2(GI%2Ul0s&u#@?6gMb~OS4AHTTna=tO;C~mm-q6d zenl)6$23hcGc#Qpa!P(mlF;$ac_0v=xw#po6y+6T2!~e_4DMEG@Wr;ZH4?bVwz&R^ z)j@aj0RUKma5((;TVHxf?`IesQc}z@0|$~D2oQ-{rLO$8d&}oGh7y*?#xRH(1~L)_ zpUrZ%t&zT_!KSq<8Co^g?g*UHiClQt6%Lgdr)jxL#IYR9I5%_8%d0sHs2F)r65~(F zj!ZxaN%fwT?GtRSuVrl2i2Yt(b=6*ERKI@x^msh(GE$rojYgBqKrh238jZUDknt0jA%9}**27ujOT4HZ$Fq8U4wWuMrB1Me!m~nGzkO(NtF6n zEasl06JPL}w$251Ol-q2@caE_`$I|Y$pn49GwVtw(chbE8Ymu7>@Gg%yuhMiOpZVPcz6D?va%3D5RFDVSXGV-C=dwX^Z76g z!&U5jJ|81SjNpSAa~NOM=gVA%mB6JyrR^e|53B&50bT%heod!8e8LGQ{84Xedg@09 zj}=>5TDfoj0+!a*aYVm<{CNDhr2ET#6v9d>)^wz%S6)(^kI|)3l1$N>3Ma zT?kj;E63tcHhIHU!8}bx1jJ%dDl01XxV#Ew`fY&n#I8gq)|CV^#ky*!-$wDMs{LMG zbp=?NTeohFx3{R7oFZZOaTTzKd#wuWOE zCUsl4laY~;6!1>fZc_u^oqgs!uia6Xa7nt%z(nN>rIPCkdaWx{D5O+u*|wFo=GL_5 z;ysVl_fmtK&ZEt4(|Td4bAhFl8kgiQR3_oJMxc?yCX7l1-94Hps{1^J>do7^_is;< zo14SWuKq6ndhSi8PMu0)Vu$ZGH?lS8}KRcG%y`#`#LRv_V)G+y}g|W=g;T0bsMM&w^Ejs%aqa*8g|xO z^z@|Ip(UU--G?DS3C#j7T|?e$B?&7HB^0J(V>ud(u#9dma4A5!;AJfh z6XhPit*wo^x;p%Rzk8le;#=xP_4#~B+JJYGT$fG~(IVjjbgMI`iCuX!^qgbW*4EP2 z*4E+rrDKleEL|iw36W9!g!){!pcXT=y3Z;(hbg>IS*OJ>V^sqlsNpW8;I_FfD zEL+9)+B!}=@kG|HUCXb3{cEDpC>LCCfjg&8Tq|9yEZu8|Eml`w2ZQ%0Z^8inV{ail z-gSxXE}Q{83%mz3{}<2wP~hT^KKkh9fX}CG&dcM1A?2J=Qc8y3kKgCRG~y_ieVPnh z0lyy+kE1l(GSMu?ps6InG*HqiZKMf>M4&0Jn=j!a^a2+i@Nz&-DZV;+-hiM;s`*|I4dg0TuwUSd7-zRx&d)ljyjq`#Kjk7K?Ro8M@k~o4aILR~phP zhhnQC5@ola7p^bZwYVHD#f9xe{XQqa-PF`XW@aXtnVIf6c^83KqjeUow`zwA!k)sn z7FZ>_0=u%VoWx=0yd1xCYQWu@T<_xZRtfPkgS_Y7JJJuKlwa*eD;U)S3(Myd95QYs zhm0FZLsJ`VvY5@Aw=#6-Q0C2>M^#l-hdE2tLcKX!UY0FbWF^D+N}1{HwrlVipf7Mf za5Yvl!?nQ2z$fA8pZq`Y!vl}zsn?7$54 zSXY?Ig%S2=dDR(UiD)#MDWw!jDT<1UC@d@_7K;&&$2**4I;}C?2`i;AY`>?bX?8w9 zGQq;h1{WUSa$;VNZ7n=;!$>5IDJ>diC=_DYuwlevF$}{?(XFz5nQI^f!iO36;( zZ}#Q`4gn@(5jXC$!Oa1$kL}o4d>_nhA7FwFP|jljm}hTS+HOv5#E20WD4S3b3 zLT*}tbB!HKR(aB6tk3N^F-(-Hp>(UfmXap%xPfUH=pIE~+5`&)3E;w3x=98v*JmA$ zV48A|%PT2O%(z78nj6dVx&jl`-cvV)sURAOxbD&YSzdJtSf**V8HORUva;Ng99~n@ zG)?01xND9&n?t%t^=bNiKC7aIPeWpr&<`+74dLuFme5*2;}k2{hpv)ozhW$BK)D zBjlInr_}(Q^K=4-sfo_%=*B*u4<)P~OQ!CzxbSvGw#oP&AWNhfgD>bO7K@RSljHtg zR#sMrJd4!p=UAty34?yWAElHT4u@-cu_^6Xj#9_$7VzvQKqx6u(hAxK0s*qJvO3({ zsW))C*Byk9+9yRT3}BAEIsJ`>V^zDKj78oU3~U270vpmiZUUmaA%lvsiWECriTyYL z7-s|4d>fpe1g^1vD|MK_$Fo{mJuQ6;4m3aK~i?E@ud0Hw$e^`Rh`hflLB9cuy+)R=hoBu}v0cK11&iO1UVcH$Jm zN#9qVx?PqQlUS0DoVk1gZ<86TmyX(-bqZkm{;1ZfhN3Ke%WX+e%JgpyDjsiUTK z2Yyp)$q8pI6+%cMgak|>gbatno1~Pt_gYuh_``jVu(7onBNpSgKe(*l(8|HP13QS_ z00^lR?cp#oAQ5O}Wo1!SRfW&zBN~mm%AB*Q6jxA}geB7?ZkYJ?J-ud?y**;T_5)== zxxEbrjZU(0*X%p_?@H4LAv7*N<50eN{AjX5K>~pw zn&zW525nIjd;x;N44YwRBT|A&;jSa;Qpgi=D`kU~v%<3T@@WhUwk%j|0fs3tqzR_Y z2K2axrES3rTXwm>sT4{RHiO=3%ryG26VZe(39QT%R$zKppmnYTh91NRv)@FIw_!>X z)37W~Q%aO6F{R<=pk-zFKq+b)+u2dqil)n!Y(279iZ=nj12u7 zO^ic>!hS<}{nXnSnpKfhc0a75L}Y}B86~b_#Fc@eOcKo-y}e^nnl*~#cRPw7`{p-o zz>-$7n`s!BrbK8UG##M}bX~K0CutU-rjJ=kPX>D;S=n(3Iz6wfE?748LOK1jq}#yJ zX$C2!iD?=bNQ}5aiQbn9*<%p`Zv5m9Uiji|fG7Cby6+uez|;rWdBfEga`8D++$$EYG(Vc=aq~&jl%`Zxiymox<|Jxb@drsX79-lUfysxKx(yps zANS*aSehM6^4oI3=aCVq&%-t_)v=H2AeK$JV*v1q?Fyx0SU>)B{b^M8%SjS>(YPWU zH;KkgqNc4{FDS~9`LA#P+q6MdciByufGH5iKXqREvRp|0;l-Dts{QaamM;B4QLx`wVRG@;NnLGkbbZan3`Y)D|of6K{M z&%AL<*_1nucI4H%)is>+zmIVFMMrV*H^x$w7X%c+fFKy~0ltK^QP|3f!jRC|9;ZEK zCfI-^Wnj4i(*d#$UT956*K~B!NLi}dMzu7w?tG&SD^EmX?wg( z60t3X(>ltB5DIjie#JRtXNOQqFmK6Lgr@IRd9~-A4%e+~2hOzvp*sQY{PYn<{^Jx* z{?CtiyXGTmB0GpH!_%zA0v;bef+9e+7Ggl4lngCkBfhoSc8YDuctFGR+O=kE%O}wW z+5<33+Ln>@G+mS^A-e=CiR?hYFm2_~p|zVHx1mthyQcECbxfR_LXoHE0g4u*724{Z ztGKBblDZzDPzH;t*YicVkxoBhP-ql;W!PA zZF^l_eKm7WhBlav*d5`ng=Vey**a>KruiU?vp~V{mQ>{X?Y;2o+P3 zS;WAc0cb*_#9z!hW0QsM2v;j{lJ+`!G*uc~E8(|wEhOdb2TYPxg}0VrQl$&k5v+ux zl~{o31uJE1D-KlyC6Pi}wGx$Z0WnX{24HjhHt$LI0@g0ZiIWfGnmhkO@zv+j9FGQl zZ4s-ZY!9s^FP%5tXm1Y_iN*H_bFZx&WO7VaIAxNp8SouyW#7(gRiw5DB+} zeS&^3T}v=g%r`s3hfq+nvu&TqtFJZ_^}2Npz|~BxxCtxY<9k?)BQQ;tZd}09jSD(_ z?TIsP=ZwK86A%IXnvb}QCs|_3HLX<|_fE&PqrEpbja1}x|mBVCW2>@2Rm=p1hN{lgK4^~>nH zrID-ucuUW+fSnc9704;8lYq0&N3)l(Xz40O4Ij*uV<)(~z4iWFmaSMz#oz(_&o_?d zg;(Ea3$LM50kX z_;fj|*48j-dO?Yz4U297u zfl`t~U8yv7m)(3ZO)M)pG1%Ehy6vaC0;{CBmDd%RRBJHdf|Nj2)-daLq~gJ)uQ{jv z9M`R@?S-UWk8xv0#@~M6PwiW_ZqLZi%favW_1qHLMRKE;p8n#i@ALfh_qgQzlX>#F zH`!RdjqiW!bRK$qI`bB+;DWPHBrD5G0PSB|NZEiAo_P9oqR|)^pEJe#dxrzdfziLc z{htJbK8~I=hNa8bx`6eMCuef{Nr&+4v`=YjZsXWVBgqIRJou5A$!81JAuRX9?gYGU z0Dg;AAopo3#$EyNDDY=27T=w~=~%>wY~XOLJgDE;0GAJ(1PlVMw_R&r2jFt$RoRp~ zb}6qOe`Ox099e0(sslO=ZKh4Y5UD+XJI1h*ims@n3hNF?kpfh11$eJjWqkY z7(p#SKnu8FWO|C=icSn`mG*t7hn4bJP`E(VnN=l8ZXL2KurlqqmBcB9YageBD^QTF zWl$Edy4WtSU&-oiE1Z+Pw-<`KJr*L=VFLyflbGF}kFMQ_&fH7>J?&k7cJ;+f95;dy zLkDuz&+p-~OTNLgFTKM*ANVEZ154cF^(*Pa;$>?XJh&fUtli}PhST9zKr9wF_;A(| z-h27a2%*?my^TmTIgQCMCEIp1k`)RtxU8>B#`yJ}(-~COm!=h4cb8%JE8CTGF;MLR z8;53jn+;mSu&C&cMfH2&3gB~l-#AcV1KQWoUD&C-Dw}eLC9hU&B^EOY1atyEEde%I zOv3aYpp^H$eXCMI%4rAUwZ76?Ri4T_-2p3^04wQf^qA@j1PBFn1{PRk)Xz8UWW}0o z`$S&tZ-rDpbDlIr8$dxo1Tcj~h_Gs2N!#{Gzd7UyXJ1oDD`@HV)Jp6tl}x;6$Dd@P zb!3voWjX|lPQc0`SvYYksS8RK@4iZUXzI$Ms!JPW zTv;SkazbztSSb%1Fg;j#EvzJ}JISsxS)nK#ZmkO6BoP!F)B zWg7|zCNTvqATdSPo3sfKrbpOFW!kw?iAk59T7(WvxePu#riBN@92R0auu>kdQZ`sQ zaVw<~tU!t6@SKAw@a3X_Pj}2?Q6i3aPcH~{d$cH}#`lyJ*j34!bX!(dCe1DFD5W@J zVinK4_zumj?L7O^I~+D)6iO*3jvK*~FT9OunzXiu*;u`u!2?S;>(nEeG-0%B!DnZM zXl`k9kDVRL2o9?p$Wzb1O*C$}F6^X#vMs{#lSlK*tIuNI;`Ish=*OoJGfWJ_(7)t$ zd>nWJcpG>XD;c=h2CDir`(I%1v(C2C4&?3uuDsf|!>St`@Cmn#hjggt-Poi`bc3*} za#%p+y8i5Axj^NBn1|*rJ@o^vq+~h;E2%nym79AZlfm$i!Fl9mT88(W6*b;C-d>ki z``2Q#*oAWd=Gi0!A3id*pvNjQccBp{z{yGG^@3DVoXGC8V^A2G` z&RE7|jil6HY`Z39k{d7;tdt8@UJAQJ*L04{o5;XmKL9p|YdCkh+eLZ=@AIQePXJ)_umK1mm@sxQQc4Z0!o-)k?o(~I z-p9c6cFZc%b_u4A&9;R#4mb;oQFlNNn_V~uV9{!egy7Q!!JuwgMW&srrMli6aB?gu zFG!gVP^mOPr4m5p_X|dsC)r|naDCHA{jFty?q;I|YEqVAHH5D2y#wQP=V>wvJSg(Zcz&rM1!E42rb zH_2DHqzWruk+0`5EHnh4(39?K2^i8~OKdwEqSZuYJjo5{1uJY2l+rqGkuRSKxd#){ zEI)94tcj{=S3q4gSWKJaxNhC70~oLhfcl?Zbpe;0d(ti|U1*xN$QGJkYLt|ZuU$QV zTG2lIzt<@x%cV4Rr3|~Gg^{+x#U8G7@saKJ+u(8SjRyV=jIn`hE$|EbGlz-j_w0YK zz0YFX+B)SxTeM|U?zjc`ZP~yAUcLXj2mw2q;?y))RpOOLU}-8@UP+OVNkS&eEtg1< z5Mdo@=JxCSY#)`yS?HSR5GylGMNLzLO|=o?hD_@QOa?29WLA*nXM9CI837%kps_8= zWj8<1=IT0EURBS$aj(m({jtH4NzO2)v|6NQNmuOCtRe{(!C&+ zO1_Y8U>yc!^kYDX!8!{qnWY>=p&M_5cT~o{k!&)Yv^`*QtF|Y}Of$ zWixpn{r-(u6z*Ce54hQW|0*oT;OD?rV2FK=Zvu_B?idAb0uDWZ*WzaY)teirYmQKu zmqBjG$L4xv7bi^=>NZoE%*q2!g5&~KG8LpeGwyqW_VbqX9 z0)8EzE-172X$hM&w#R8~jnfh_Xp0&ySot(TC_|^TAV^7G23bK3iB+#~?Y1Va`rV7v z)w=D{uH6@4wZ8(^Y#W#Kk2ZhC*_Ee}A%f)kvS~D0>?UX)&7>1rdoo+l0x02XDhX@K z0XTuih^z|w`tkv2i--BcC-?IAWiMJ^@(20tr0;Xy@G~gMC_)I0JUy3OJ%_=;f%b7F zhBEO9UmDX@5mzQJuAjlRZ`}c=n;80Qu3PtDF9xL_Pdxh{CQUq;s*#o6w^u2p`t31a zrC|ta8Vv21LqW(A;o*qEpP%@E7iTO5 zAn5mV`L~YbtYbz}oS%Uv1bHEy+>p+oLdzhRQeaAD6K?@ z6ThyWdE-8lSNeW8{ckozIJIC1Fz(fDOSyLJ`RKxmV{MPuxm0n1Cx6P*?blnhDM6U< z0+qyD{lXNAkRD=e_DFK=suQ*0I=(smC%m!#LvUD9(i9(Tn#Y3+pJrwA8VW;sWCucI z=s`~*UV$#Olp4;8?U7nu+w=~n%=|yzSp2HPVT}RbiPHMj(Wa5X3?C#oOh1H@BSp0qu2LmS^qb|2+rz1ms|cu#+eo)$!5U z=i|9+Szo==Gz{a`FZugX5BQ{x7*^7-SH!m1?;r4^dELxNUd2g6fN_gg)^O2jS;fM;96r z#ma`&9P{G;LZlg>6*wD<5%ps%pMUE~^jcR2m5-{R6-G<3E@+cNu^9?KNxr_Q@Z% z=YByYC&BEyF$Rk|zDw|2x3QMTo}Z&Wowri-?b~-{B+^@|X@8JcI~!U9Ge29yX_JSO z70}4f@=@O!t{L-v000{oNkl$&1L&(YEn z=~Q05|H&7;|H&5sJZYp2>rc zyb%8Vudm8H=BNpKn{vl}z|}d~8NB)M6=Vi=OsV+1x|ML;=t9)0w6aT)T){#K!O)T{ zigN?5HTK4*Yq{x9uO-PVZ(dZ{lsoo_ysDmg<9;Kr_GiExZlaE{SmO7CXpI<4fz8;aRb1U zXWz-m1CB#O&~8Rp6<$M&+1`m8Fi|VetFeT%3@S6q>Ep|H|MQx*ZJave25Po_;e7FP z=Cz#5Gqv03=w_BF&rMRvr&BAe!YbFTlfAcbk48Sw@UtJE&n4%b>;l&6bzAtwEq~=3 zCmqF=S6r}A)AaEG|9R_Uratf_C!aWpE3deStkCZDFdR7ah}z||pDNmquEPgP8pld$ z5{X23?v>AY@`VpoTYH$iyu5!mG&KB-WO79B*iYouz!U!P2ch|H0Ql3*=W@a!We5S0 znBN-8=l?t>1yP7gAJy#4|gtVR3s?nhEz# zyi9p6(<}-z<+I+g8-S&0I?1@XG7&bNQwJ z!`Ww^NM>fwfNS9JBWjm_`sDruSP|2VH@x=N+bEm_N51myb^oi_dZfK;(*C$c2!BG6zwuXyV*{&*@^%HT7aPN6V!ra>)CddcijEw?^w)f@7xOY zb{iLyovaSu!$UQ1>_S87Y|_1(xr^s!il);$b^%y+3@iNd>+g&EA9^<9rk`ETiN_x1 zez9g-9gjcrDsR0zo6}C2%(pK%ozmhy-2<#)lWUiK^7wuOSRa1&Ie&Td&B(^;tr-Oc z1kMjO`LQ@ild15RFvuV})3DH@4) z@>8E{X|$H)1#xcSAsm%E6+s5T(IbxFo}+$FS*ZIJX-aS^XkP3&*8S% ztJ)Jg9+x$-R?@RgyPE9ICg1F0(#@1jVo^~KbO1{WSob~jJQEKc!?8zAn1$eo>Mc9C z?a%)t9*^_O>o22XNMa4Nwzc!(^mlmr`8PSJs*-cRc_N1$IvQQ?JbR;T*wMAiKK|!E z2dt$lHu9IpXUauOS7~{9d0U#Anyvy~?!}P30#JKYUgeMc<{>!)j+lxN832z61s1lo_iO9^LU`dzdCgZnv(iIJ@91y zvgPZvKmFla`j=X90mCqP>bV&_{lc4^bLR0}c+N@W=j9|FG!l*R$?V0VN001vz^dEX!atw;km)mK5eNjLLWnYb$rdU+PlPWw=qrj!{O84tC# zw*Cg#*^4V*$=K)eDv(#+H-G4(fGS<>7 zul4|7bvB8wO4 z?K#dl^H_9EbB|L{RMB3uVcu6#{C3WQHT?F^)8(}P%o4ipTNRB)FN{PYzrm`6-aEef zm{qU5`kKkB-LbHIyP=tMS4-)xrqbQOsh7D!%EgqsFqwDP4DRke=`^8uH@@?AAEAPR z{MQ;98jrvH(zE!?uFTeHy&%7{`k;BXR?!~7urmCV_KQ^RlO6|h; z{<=pktS{Eo@b{-bmU9=b&~kFJx3{#kUIRQwZvy$gu&{dN)z?T~^|ZZ+=;X0GNH2P* z0mw8y+|)p0x(STiTNJA#eiVpwvBf<5#XYZM_G|Aky%Ul8I(1SdT?Zz+!`w;T=Rgp6 z@kk`{WNvQmz}a(`jNi1mHvgC-$J$m=`r-HaIH;37}M_y#ws~_O^>y#Gv z=|E-oj)LOK_L}u`_K1bGrKX;HAAU#O`^bMpLsM&;lp4y!sl+ zE3q5<_r$-e@8bQgw2jUZ`nsESPKu^NngzG3`~~ksbcX`knNL~LGp5afMz}~jva+&% z(ca#E+o9t|tEsnMDRT3&yZznIS8m|-w?5{BPZra^Um+(Rb126jeJI66$+51AgHEcQ zKl6bvWnncow(;c4b9i~i9HnVmT+_6>B9X}LSnVEq$G!!uUU~I3l2>0Q56e&Y=%C%D zg?5$akS;kdU7#|ZZ&!3N`KkWat`hIMn+-10XeI``X611RCz8qkU{YLG)x2JXsu#|!iXRqSX=Vr<3E!(x+ z+?+Yh%`H~}b9<-fzuH~+Rmm&SO?lTC{8v5C8i!xnk7@Ehi^?ZA(k*)xeD2>G)a$nAQ#DId!9(7F9YI zpw4PuRB$b#V#zW&L+Mm)|Gb=q+0-;X)8Se zFRzx;Yd!7RF*!d!@2SSdra^}tdXPHrtfRyUlgFd!T8|Dg^x)HL=e+snP62D(h8;Zm z!aO;9-U=-%E32-pt?hc?sa|E{fB~_Nefm1er_Pk;bU>x($Wjz3ZotkSp=)+xIsi-8 zCri>fi*dis`4s!vd2|L`yE3W0iz1!o0$uxrJF~P>H5TZ+hL_^&pWM5@4PRzv=1&5F zz^|H`n)((O^_AzGF$Yw?VLC6>;PEXYg_o&%L~+7?=2F6V6Z(F zi~UAQ`2ZH3xOemd8SFY7(G@j*SDv7*+=87c-JLylw(;L4uf%`(V(Dt}bhc1-m8soT zR(J2%f7iq*g@uK`sIRX-MhL-iM<2p@XB;Iaj33dV;H`HIA9rRFU^TTy`1fl|m^Ndc z0%Z!JO^wB3e*l_#FLUqs3S@0pVAWmF+SU7C`-|8+ddGk3D9g^yz6#Wpt*vd@%wXEv%XxPCJlP(Oh+r`IuW&g0D`3n2Z|~fL zw5-Yi{`)Sw@a-AfIOZ)4 zZHA^M3@4*WXvLH@D5w-mluI}ZLKy=J6s%u=JkRNUzL$5u{TB9v*nNI8-|YLH_kGWM zzVn_v&v`EA9IsNxaU7?dQ~Rm+O5XXwHKM4rrTo!L)Lr2<{SF=Z3FVv)z!;^)4lqf7#U>EzyCL~uFN=2_p>C72fo~|!RIdX*=vCza-v;K!(ke|S{_{@e_gb0TcAVBSJ!>v-i#LzX(F?e>$oUz-6=iyM zAK>f$3c3YDfjPNc?jGP{ULB5d8n6jjE!_$QD;OF(nP)kDOQ~b{yeh4m(cVl-=LX{svqRJO*qA7L-uVDVpw-KqIiy2(K0Txf9rzAh0e#Y_3ayUzia> z3-CSQ72q;WzeHc!Lf|G%djfb|8^gVtmd3X=cbx9ZUZ7TwTeP!I>mau&L4(QbAa@wB zU(@~w%+}*3bN$7@4^<#kBD+oxH2Hiyp*%BzrRM&h=OlT2JX}2PXNC)=XN9rEP#GG^4Os-;A$g~E(Dh8@t=YBoBPht zk<}nzFz`B2l+b!$h`$cU>2_>2n#w+4fd1cSV;#N~1esX+e!UOv=<`Nvv2#7!w4bs0 zTxZU==zV|EGA+|SDurxaQ)2{HjSAb4mtB{z!W^d(%AAP_wWM+etJ6r;CXWm06Ra^= zJMzgok}1Z0z)yj10(A+$_f4Q)k4LG13Y@CK>K@>3RmBI#Mlg=!bT2j#1y#JH8crIC zAia{pF!Im`*ex|GHIMU+R)h+LR<+>t)=v|K5q)jIT}HE+u6@lS;2NMo`@9OoEH_*8 ztw z9jyKZm|zCFft|7AG~!neKCGJ%KNtGW9#M^HB@#ZLidl$~$N?LHPntn@@L~N9@nbF3 zw3HEo-%vqwH-&DDokfmQTF%A#n^&mD70S7o!-GSp~D)H-$+Wlngx?q>>iAv zvycc#rxWzn&az%r?iSz&MqMu_eN;W}$@E7|u?G>``e(J{zXJ)6J&|bEPgk%+1zop>3GeyCBQq#_9J(8LSPM5jlH%=-c>rf9acnOrIGnN zlXOt$IF8e!8KIr!LnT~08wrF7p|NkoI_y5cKPe2K-GfYM+-lO^20m==I~$39viI#E znok$ljYN1_ma9f?1(wZci#c}*u-%;727I6cfwct*`pF{>@?rvOXzclIYB{j}fei3n zqV32-Ir1v31CtQHOSootLVY-nWxIFuGX@}1=hv-{km8lt5nbg;k+3M%b}TSxoPPteZV?$WyfF=rJEjtQG3_BUy*32AW-sa7$! zi+<-Bh&j$io(A97C?wdY7zea0YmkMSTN3Ig2++7dovs?*He^+S$4V&cA>cuimNI2^ z92ZzlkK}dz`(DI+*B-rymWi?KrTB!`gZej#Un`W!2Ay1*@IH2WGj+NfzT&uZq|_m1vkBUvZbJN8Z<%Y}gUns} z4-%+g7Y)uLbE;k=GP_kFA@Lhbhg*-B`_^k`JPY`h{;fg+K-QDIa(@n4WqY&=g;pd? z-UfpVR4u2E3b-}q++bwF?hq}{3luJVN=*3Ga=70M#J4uh{DwfSK6{ux`+1Ugi#=7# z(NF95WvwH7ZC}I$I!dous~V)^IF56i@TG|yr-$M`EXQ#i=Y-(DjF%ajgd}AR00000 LNkvXXu0mjfRJIQ4 diff --git a/doc/source/images/instance-life-3.png b/doc/source/images/instance-life-3.png deleted file mode 100644 index 29883065621c332252eb2795bbac4f3f3c22bf1f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30176 zcmd3N1ydYd*Ddan;4TU7?k>UI-90!2cMA@|b+EzR8Qk67Ap{-Voy+r9egEOs)O62u z)#>Wqd-Xc2_gOnqSy2iVi2w-#0s>V=T0#{90GKGpleCU21OzhXe_u$5%xpXe2r>v62~l;=wbLAr4q}bPhi;!~n}?0ujaw4w zkWd)0BS~^H1l0u^gx{v(u#`e#2~t?d-*8O#u>BYwO9N@pqo9ISR48t7WNFAdw>LKY zPbRi5NE@uCgbs6kWW*3?8?G}pT#Es#K1+^oOS(SxzbXDme~bM8aMPk|8u*(;oFY*I zLx!TgkYjOaiAjFI)Im*cUUsMP=zoV3CFk<1 z!JpBEA^IX%&#iz1u}y6JGTNiib7U{D9UsW(bJTzbmyO{b`R5g?)sFeiz9GmFlFjx& z`Z=*pK_PY=Lh!ApY>V-uUAe~K9w0P6XLYfBBYb0$qgaxXtjfv7wFMUwNwTe&Rs%u6 z(+D_=4s2S#QoI;7z9Y`Fa&$O7)0i*|Li9Oq;K6PcfU1x#ADJjFE{+K7UR+#Me4Ir! zI;aVFZWDMv>i;TsuS0Nh^7v{cJk*c=YN+q|?jz~wc=2Ldw-N;cfk>f33WwX~|KZ#` zY|yQTLpyT6l+vAgdYGR&9=!AKwR8ig?X74I2#hhDLLuvS;5i{&Q3R`~FoX62bwq3f zZ{p$5xL<~!y#kX0`vY&ewr*f{@aFUBfnTlJqAOjPBT${Xn|e52JqJgk>}_uN@2STK z*44~^ii`JOgndyeyh~ixf75wyIO2LbZ%`DKjr?NTV~F{yqb{fi91~<9`qDZ#tC$ZCE-})7uFv zmjv{2fY8hYzoPOK{oMe-lR)bR30$Jvt*abxTLfzKa0fn~Z}H+lPU~z7!?%VDqB;z za-fZcjq93(A}*L)dQYjbr%gke&gP3T;n#E7b_hq$X(yYSnzrKyO&!=Wl+*NI$=~MK zqi>)0~r1WXaZWs?O9V`hEKZT1|#j z=aN-JFjFl!QPYuLX9Y&yp4&cN0s}I@%Xs55nt_Wy&15FomZc$T(&4)Bct*7C*4!js zRt4-ttV7#KFGF$(TH)|uCfJE@YZ7x;&nPmaDR34R7R-Uk&)Hs9CnY5B(Ob2D>vBLF z&fxeva`8%KV69o@mEkok(mM-QGBTj>4VCHRhXSqK=LieuG}P696ZGPUJ}3fUj-Iw@Tmhpb$Y|sWR89a@vya z%3RZWz6y6FWXlko7Wg!z>SrYU;GN!9qUm}jbqt>NUn-b|-67hu>=;wNr|f|}$V?$F zS>HH>a}~yeyHEcNXwOB^2kaIkhZM-N?j>e#Gp7d`o|tn> z6BF|5rvAkpi*X!e?|9XVJYFDa8$IFkq?2eR``BjcT{3*C6hXxyi&SFz9Z5nP6 zHncwVk?}T1pp72yTiZ&PP3rMb)N>W&QqVs#*Y_9trH{oxGh}G1FACfa;#o4Vb@h?h z)lt&FIJv_ zg?+9L)9zBGq&7iX!eX|z(05tQ|y>#`Mb1#T&OR(>{ zW96C|x@Ym<=Fnp&-Xn8Amas!$n=pPjsoc8qeha{+2heA+W)V7^zL8{z`p$MxFfOC6r?Xc z1a(r?R}g$w;{hO?@LRo>%43{kcg!H#_`K@9xVX4}LIT2tmg+Yss}8U^?LhV2(4gs` zn`tlZn<*iM_tUhNt2e>`Rw;xl&rfO<%*n|~+Z`7EhObT1hpyzifEV{YkW_CyMZD8x143Td_5 z4-6rK%~Cu0c-7p}xqlwtvXdEs|Km!FbBhA^8Z%jn$mS z80N3`95BW%gRCBzVBbErN_1}T$2j}e13*l*fQb=IMPMdC;sX-on9+^C(~itMsMOiG z|4ZDl+c@yMapWFcYjck06;c;7SKe^YI4eTbQs%zqd?fD9OPAfm8Ht&T58F<`jW{974QH`m2+P|pKh^BtbYs{CtAyU(TBcz;m53X=GS z#nweYhSjE-_C(nb&D$mzuW$XkQo3uv$i@TU%FOyL3|2SB`Niio`Ef8jW7E3%0{~eg zc_0MnQ9|+RgIF(_aP;r-^fKf+J-z!k{z#tDowXNW)uD#`%N^RWX4))KrGtIqp(ZP< z`S@`9G70|l>q`LZ@FeTyS@W_7J`BHZInFx)%y!WXuGedmnEVTjG$IOlIR`_lFCq+_<39Z0&T5`OSfX|p>(^*WI* zx7x)q)&W@nfbd`oXo~{%8B&&{|BJk8K+h=a^1eIV|0*@HMen~N{Lz_Sj zc(5-L@s(9EO+%YfS@`$Exd+8CN{*+kC9#{LCs|U_$v1TPb=KlDIx8SGo zn_0Uisqxc;g9Fl*mX_%Z9+QsezZ!s~PqzKnc`jvJ)drpp*c&(MH1zOre{lQd&MBeL zy&k+^{7O;p`qPgvemFzW2TF>FBTmLF)23Q(Hivd*E!SjnNHN#Ti!okki&~~`9&cq^ zr@>m(4o0&*o@1vSU+vL<88uvCasa&;&Y4{i@cfI3_YwMZn zorC!-2>z)?${*3d;_Qvh>FZ4zbDUwFpdu9qC#PRcL&Hr>ZwOo&JyW;3p=a;Q@4@WO zi!@moy2XX{^*T|b>tm&*P4|zaCw->0C{T|BqPo@YaCkT*0`22b+pyEYdI?(xufjJf zcn)bc-gwG!y{AP|fcce~#do}Bh_ou5#+{b0+Go}EPJiLALJFJ%Sqn?9*ISGxZ->f$ zFF(;+)D?p01At&3F zw#7iZE5j=bWiZpLaX9*Wa`BM@WUfNjY=OS`gh;ROe<27wX@D`K&Xl^o_HneduBYsH zGfc^ES_!WhZ_o%~%5#UXdf#r8pU0{w*(zf{ocY%wAnbc?S^c72SdOp>_Lw z#`TSO_(_>B=iR}>!(;xq1W?d@J2*aA^!6}CyM$4v3!GiO8z=ors40}OG$u(Kk5Ku` zE>HJ2shri&E$i#;NkdtMvzXvQ%AGZMw6JOajP|9ne_B_Rfy!Eb*hX)(kx{vmSU7Ax-vps1*N2etN))0MByHzRkDpRSvE=v8 z45@|Ki1DqrcHqlgJ(a-uu7Z-&+J_&K0QxHi6{*$xwUK1at{&3`2fn`X69G|hfKsU8T zVZZ5xWVk(GRyEG4^|*ONmoLH-OpYo;po)eN(9M|z;{6!YJ6a+5!O+!ZIU8K@z1G_< z&=$)dXGdA}uEVqU0zdKAPSLUhg>ds7)6}i44FgJ@bEe$&1TPUUcmR^4!e^Ncd>;?v zAd0F#^19z0XE@v39DbY^`NquONZ9#-xWaQaVW@>fDpFEs$)D_i+$k zO9Cx)+5f_x86@1yA!xZk=J0mz#Zmm`ByKfiVduEbIwrPmNH*Sqrmpq4ysV)sO1H~W zKr1S6K%MA*i`DvQ=dtF{Td~BT>$C~putVxTO}5k4xokmb{Y=CPW5o68xv{&j69(H* z(&6^hJE}kHUCda0*~%KTJT0AQR3KB9yPqgM6VVT9sZ6}|V!`cR|A0rb#+rVPgVhP6 zcUip?%1mT^faVcMH?cI?Dq;Kfe;`++0C5=-Tx< z7$gw_a@7QK(D9(lk7 zL3PBqo9YTZ7SE6y%b@lr^~*EJ&1+g)%`qN|DbYUyWrW~kIAi_sfKRT?T5EwH@Ef05 z%_>;#l-E50gKH)&8%PzXl1$&NgejSx7KUX?9;Q&bJ?pu^2cX4rt&gO!yGA}Cnzm2c z)G@wedIbf)p6HPp->YxG%x7sex*s0b{ON9X(}d7md)7zP)zxE=v(r6ELMe}aQ7bys z7jj#?@Oo;B?LF>_7rs42kX6Gt`|bI7htS>h3C@!5wdH7<9?%z+!~#K!hp&)Qd|2*@ zs85>DQ)w%t!)&qir>z=gb@r)1)Iggm{0ux<@k+>6m3yNKa!-?X=b;@8s(%~#t??yWHWP=jtQD9t>nGN(At6--*EMx_)}|at4y&5(>dKK zB1kxLX)$&2SX*;JZbY;a))x=vQH{SWb?T~f0t7)ZQh@U8i&P^Bz<)EUBa9|xUZ#aL z=LgS20B_Mg83Y%0i}LL?jt^b8pNdJ@N@0Z?tTpV%&(wVzVyLB~e+Vbpy3pEsp4**T zmoCjAH$R9bOuk#Pv{?ubEyWd+Ys6z>t$o=)Rn&+%zfK;O%Nc*XYG`qKxp`R2dPmG| zk8h~#75mLNOzKhjyxRxf4~_J(3I|};I5*uR=`D4W@o_O$TqVOp;(cPB8ftNU;$a|DS5?6H!D zQB7_yf534U{niWl=ML~Gj(XNI0(i=orjg4DL2T=^$$o87o5gC#aXr^UWQ zfd#7}VTa9!zxPd1e?SbG9%5LbO>^s`r9vCTmyLeLyTfz9e6b?6{bJHHRO5GC|Mw7A zf6v>Y%d!TlOj`_@oIOWRv)1A*x+(WC`1*H`Z9II%OHh)cfH&K=o)ZC-DAmPmE^R89 zMnyv<#cv_C3nN^Vwn047LIb|iL_vlC)mTFuP|x9o3ag|>L#GcNW&L{?rmVFmaW0r) zHulW!ino3=5BmAP2u&T^diUcFq8K1a)?C5s2=~4r?IecrhmL2K0w9|0m`3G{B#iA8mpyg z&#-@7ct~h^`Oq25m|jUZGR>FiEejcHG29lob7T!0QW-(uv4b+|#0f=2s7fyDhO~H0 zMOsqP6~)p*v}%QJzogKX&;`OGFj|8r8=AP44CvxiGqQo4llX?~3*#%Iobp7eKV*A-wzwjTK@HkYo20z}q!Ycz!9kB%!X}@(a)KRb zh;6>VVy1JDFghtWR;rRaWRu-pY9Vqk7?FF9jP~MW2|UU7$iAau$PL`PT^NxFJqic{ z`}JkDVUuBux21*xRj+6oL#GqTM+eNi?uZxAFtF46VG|Vs^Dp0W?-LM&1kQ-!RzFI2 zB+?q3{toyVAiK5H$W?l|JInK3ou0ErUodNhcY~h_BD~%bmG9plbYh^;?zz_rkd4TQxV>97G&Du`+5Z3natuxp4l;q- zu`i5Acx$bu)7_DPr824k>YmFu7i{hiZC}9>0$2*k7>$O>4PvgHoYb0 zSV!GEHv?8cq6+83l3F)2m(TMj3NhEq1mce9ju#z4&W*n-<*r2Bxs`XFuJ$c6uM0udaY9lU4J4Y`X zB76~LpMKbn?eWQ~!O4_w4nASrZDf&IIDKx=cgh)Ka5GVcx2_1xb0x2}ZrkZ~$A0YF z%udp@VaOZ-Ax(?F_!qr}0Te{9pbkq-3su;XS%rB-jqHnJs1D7b7q3k)Y$e0xi9bY1 zw~}I3$3f2WtfY6y1Z+fxTyRvB- zOY2u7cm(I*yReWuE4}x7)uq68)d`B42qgi_V@&!))YZh)TUp08jE&WPLOI6{ z(e>B&thIdSmyr~ELX5PnAftnS+TrDDgT&fH38A$cfmHVvw-Gp$zpy|A^qWB%D;fy{ z4C9SmJzPz4<;u5|`eL=FuHiptt>Pj`dFWRDKum2}^`3~UZDR{vb}%IeVR1Rx976|l z0qiZ!gK$!YwIC}o5D>?SSJDhpc<=h~a4nVzj&_lvEs=MrP2D`-1NUZ|3dwR2ehIS9 z!SBy)fjTTV?c$eO2lX|_Ja?;H9uS8`hcG(Be3dSiVwsmM`M81PW0&j0=3G6r!-}X{ z{|suNilDS|CH*9NQlFtquRXCXP^3foBvsGRib#K9gFb!^0p*M%O5@;kwGVM9t-+^v za#ET)2V2|qr|Z7BMH^558l6i(q5gD6~Cwhdwl^bbq^O<>fCl7q%|!zE=FCeO?71;vd%5?v4L|c$X%sa-u;Q~nJahPS z5zRl49$0;NudzkZ%7q^Ra@G+s_eW9td}(ZGv#Rdg;qQm5PfhX*qOMzXa%L?pM#)VC z#a$Fh-mROxG}!8t}zBFK&bZp+SWn*f`i8^8Xn!e=}j4! zkX)1IMuyxTkyztunT}MA2^1Te&%6 zN^kKvAOtA6zy@W3m2NLfr-u8!O>VGtPj{!`93d zLTvFknz?i3p$mTL*-1G>BkcZ5cInKGo0ORz?+m?0i_Co$nk+Z%p@m>CiKWQ6teD5z zbip;u+qr{?5rN$)w70N$jJ?yb4#>9VDV9x6P7(Vaq8nUL)CNO<-WN0`mYSTMN~SKu z3>(D+(`6i3kKb{w{aktE)rw?_5zYbhC^LCm3R>)oWPKH_z2F#k3yfFfMIClpj`))= z^M77|f_t&NKYqbM`w76ZR|t$)i~=O%jPg@W>cr$IsRCG4YrFIs5d^i=P*tg~#o~ye z$M9=zajMhxK@}4O^$e+VUcHf{zjg3+8XPdA-`^-meLrkI7a+DZqQhc-Zg*5_QmHO* z$DPVh*?papv8(#E;z}0EKzD}CzFAeTeS}yJCjGX9|Jt}OeNP-nU=MMEWy~~#@L6F( z7s*t`mIRKODy+56K{3lM3R%*r(e(SP=b=>6@ptjDfre%%s>Th>%A*{vh7Ir)DCHK@ zpu{)DBX&!Jkjjtf#^@e7=bZ0dESsGEx)}@k%K(S!?eD~SeCEo!7V}etAGz@GaD#ui zTWTaW;x@N&+=rC^{a8p|(a=fhX34dnGdiLpQwxvY--bdGOeid^#|*6=Q_V^B%hh?i z)e87K{U9{Z`5-*e()|lxjS5|VL(@yrn{R2+A_tGtU7T$+)0@$Pp}0xWfnOSNpO!lf zyeZ4Ho?G3wR@iv!2b~AwlsT(Gt?nTFb4go~*S3E8GKCAynOAvL26GX`&L+m9%>8Kf zD-Ib=f1Vp97!iWj2|avR>xdlOPJ`WZgo%yQJo{5;8Q5QS6Ph1W-*759VY@3~Qu*g8 zT~{}sV(#@J`^IxQG%0FUE(9%K__p>d(oAd}AQ8;aw1L#!qbX3DcXd zfkBZh)k?5VNMv8rcAFWA1$%WoMvGz`NwgWWwUPCa^$9u_Lc&jn9k+i`x`va;iw%wC z7C=SuDJ_kw^l-XER0TDP-9plt>>t=vXjh>e1R#>O$^W+JliB~u))Eti)5o1me$1?=|A?&Ry!IehE$n3C%$>-xV$z5FH; zts#&=TDodyZq|08;kz*xB|^Jq{KZ8I#G0PI`qA~?A>UA5ZZYr8w( z0r3G}1S#bD`1Ja4Dkn2)zM?3pk*9X2e2WBJ^AkgPygY(71cLm#;^sB(%Shxa{As!$f9u7q=?HyJ-K{ie3CiyepPilUZ(3$Df4a8N_r8+McAHzLQ@ZI` z^7j5PZt8zCCYV89_`%pT47r%n;|?Jn!FhdC(;0u&4;FI)(9-ZVFv-%g_jnL)TWu~k zsb)5m*VNZL*`?Xi<*%GK{%-wVs`PyEx4y}~_e61L(O}k+pIM~C>90-CMIa34Btz$= z61lPsR#tS?&#{)wf~xgpqO~>Mwluc+d33j-W$1e1V}$ak;B~_neaJ%0ey13_+lvZZ z_^e#ZcNcC|+S=->9(V57sN602<2KrDXMW2KJkvUu&J3vS=X36rxusDl;X5xy0Sgl0 zu47A3yEhcry#goGHkx!(lOKcWG8@D8r%TSF_k{Yt>Zx4idk(ZFGBp~!KOzrB zl7P>74PGt*oQUC}j+(Yo&6v8qi#^1!e`#+k&7#q|wL-whv+EcgvV0>i33zRN4znDf z>xlVyw`}r*;%%U?Bkgd=>Ks9nIZi!_`L1aa1)nPv?9iG~lIu6x7*bGDh;5)=q{>O; z)WZ=P?r7-2c$`U)D)8{l=>6o5$fwUH`_}>QbNlw&d%hl5OYr?OL!bQF8(gY*;C(6Z z9xpF56mT`1m&)hRb?>)8u&n7gWoGXaC#Iqi0B~=E6d?P)OlhXOhh8|l>zwYsn4uX^ zX{5!v)gq;1U(urXVCIYNcuS2vVlH_8R~b+;~CRfu3Kn zb%>_Jp_|6qyTd1wkw<$FAMNiwezSuUyy=f$hHXTpL@<07@JN4_BlvogU($LN;Lxvz zbcS(^P!{BYNsx}x{+ zYR6@toP}{A%425`5KPA+r2-}$rq5` zr-MzsZ$3TK9vUxaG3kq|J^T<3JjY0AHbNsT-)+WGIa2HeW7mCL7ek-?>A2T@Vr9wo~TD&1!a9IP*3D4aj&SkSEJGVn6j)2w#PX>L+dl#l;<*Hw$o{17nlklI*P}GmUN%h zGb#K+BA7wsvEhZ@vh^HMz0_QV3SO?UdV5CI_g=+jcm|Ww$INi+%U!YwOuOS9b)JoDn`u%w#Bv-M;xu-f zTr(ocvNeiix*Fo!e=Tz~XC9$pOJEaS1`FpYL#h#J5e88@-XpDymZ`xR`&qzPL~-|_ zv8FsGod0tl;Z6JaL>vzXzx7`gZn3>wu!VQv*Zb39!1aQfQm2^gadIV;LfQ$*Ut4Zq zlcn_Jm!dj?sPf-)QHF^YQ#MjN(#KYBHg;&VN#^AJF(P>`zYGm{{cslyw$h;a`Y zX0v=9m{TV*$po?7sAS6bZ!OFH**TC4RA9vHz08t3dd z(N~8)AAPJ=q(;XBA@u|}AS9_dKTyHhIe2Sfv`kIO^e7>J{dyzw>>`5H|51ltvoali z1C$2FHJDu7S_DNU?pfAeH&J%H#I(t0a^!{yKVqW`-NIPq2&EAQJQETSoV1<(iD&VY z2GI88F6xpd$x~%;U!WXJ@)s%*8MkLYL2tjR=z=xIQ)`H|XmDo6xDAsvh43SY_}ryo z0iBl=Wk8IEPhOK z@X5O5l3-h=+P2rXC;hI|C)kHC42!u&bSbh#&I4bO&s+E6^d2OE^P6?KzST)rFO#O5 znOds>VnL36dr0T)AEFMOZ_EqJ%PuFQj(c)Ook1WFpV0m6O$L{RY93berwbIds5-4v z&V$*z{3)jdfUWuWezlzrY)9v-CHv;Q7aq%CboT}G2lo$}hNORf?8N~p9uEVJHpo(s z4cte0#ko*_S^viEHDFYIK}tqGU#cQvDGkzRl2TAm7&S%{!?y@2i*crt;L(t}%*i7u zTb-`fZ@3MkB=X#W#6k*F4MUP+Uh%F_W&Y>FJA~p$4w(KbM9eg47d!TNZ#Aoor|Dl~5w$iXgbCPpe z|9d?Dd8;rAF&_soxzCx1X!w{NZ`qmXle8ZDp=fcUDY9V(8^7r!Aq@Wro9~JE?kU%B zvUVz~9R^ANB175a{WoZs_b3n_D|+x!h-et`V)jO$D0#q?m1tOWY|hwx`oQb9R*w~>P`7YY?kW0L2YpNq{yvAB9eaN`O5yd>D%tk-4LugpXA;m)j*r_c zj^}_ru1h)ut$)+|OW6I^BtjV`rz)DL)|)a?^UcagIf5Cpmf0;ASmJn3l)0LkoevEj zl19z(g&k0ogkJ(QpXWGQXLg{Qzll9-?}-J5?cf8b((+LTW$C^{0Vx| zK~M`{&D3G8)MFf@Mk8i3r%__|ikuZU9ojD#(`QZz2=U9+t`FDJ`gyOv{N@d#WRaUk zOG?6SE85X6hs2>OEi7biNg{<%geAQj8eZItBxeW2-(U-k(5kT*IC8eYEQ3>nGMPNE zqp!$2@s<%xS%yZHKeGaO{)R4504md{FCyxKm9-Gb#SADzd)y7e(TInVlmz>K&+eh} z(|P$ePNoOX;)AfhHzXA|?Mt~Bq|Lg+W@crHIVPDELn5BbAbhQEp{7Q4^D{Fu1GTrm zC^2_E{yoUqtggvIecW)J&fzc{ib)59c0+Ci(5G*l3~*^FuRMupE%1GU=G>y z2d7hlBm2hI=J!|qg0@qlVuZRKQdhrs#Ie25yR0&(4Jmy~8VwaGYTAbA4T&4xvp_cr zYw&q~$dIQv)^2OE!b4^E;irRUD} z$EeZsD)UH3qgVuJbvZJS$_bA(sI3gWO8#vh!|2pDf#21s=YGoQ0`{$n^ zpX;x4r!cGCUH<;AKJ6%|v!;+W2~yb62-A{uu({D%3?lizU80rURKlQInN+7xgWdKV`vhe4N6GqLSj~06Jsu#+Fs*%#;_M}~M zvR+VstmybbdTJIIt;01h)q1aD6f0&BKxyoRA-P%4c!YiNGqvgUi^XI8CggTBGromk z=|07K`1-H3FfOyG2`7cAVnsDXE!2DVn0Pr`anl zF@w;>e_j>=Fg-}d-_2%`r4^WNDT2BzFPi^Fjlhta9+RkqBU7AYazjTCyY&k)+(EdC zu~ZcYGo)FHo9(4Xd@MJSG#g6;U<7nrdBvd(P0y94=z9Ab&3w|SK72b~**C3PAb`%j zEHi%jH73i6EQi>BVX2tsb|xd+^L5r5#$&tvcS)|HDtD1ZLOPc{#<*7XUv!U+!FeU& z;h_1FKk})TMlwYw?1IGLZDm-bD$dO?j{a~|^w@9h2KGN3+r5LQWohBiNe2E#y!V%y zxPNW%Hme+Kb9xSHK|eo<3E>zDtqCC#X78gQx9&zrT(VY5@%kBh`Jyv9L;mxt%*hFO zd^KeLP zjl@g+y47}#UrfpZJk1U6BE-|W7u48FyHZ9Rb}qJTSiNA-Yo1uou;(Xhc#7lvG~7wrV73KTD#JWU(ehUGh65xAQ_}$WKJGr2p)f7 z%a9+rhVuTTyb5z*FPrPZMEapuiNwiBYa096@4q;4Jl8yNJkJukx}Sy(TOQo(4@3X^ zJx9m)fng>ry|uNcYpt$HkFJ-GqobofdK|BlE2}TM5z*VNC#Z~{Dq*(=+VB39N6|VK z&Tqo0V8Dsgbl(_L@DhhDcP>}3B{7-*LC6`Ushz!VTY!K!WeH5sI9VL`?tuHYP zJ1tI!E=3=D$VLU&16q+b;r-zZmrWbeytB}z{IMT4oRSK|^m=Z|PXVNOJ2%eDuirNn^_Qy!= z@usty!F}3htuAc2cf3BCNGCkPzb2OzL#m-nishEky^A2xA&@(FD%o44!Yp zmU345pK=bx5$jpztV~b;QqdIZ-r4#yN>L3~H8KJSUG_jT$1fyKT4N1PP@SkUMNILA z^G1+P*Y5V0IhKNX_6%H~-ur4?E;hUOk&&5z?IjVUTeouCDL>mTPRXv~nCO>(DQjtw z63B|S!@ZTy(Ue&-*i>Z*nZZ>odHg_qf%}*IM&(btwTQWFo$Ml`&7ft6u5s&|@JT{6 z!G}TM0zuqk5D$`D-S4qOlf56|d{M!AjZrx2@C+6Y-w7M(o|m6FE7JKake+;n%vFON zP&Wco@|0~lhW4C|0$E9SWEY8iFME-H(P~fT@ISba;hX1CB*_pB)B5ZNlNxWk^{Cp~+K%kI&;Umc zZ(>LUqJDY|$*R$`%|IDn(v_>g^x(uH=P@^Uzh52Vb1f(#Paocp)4&KVk45}!H!Vso zQl)KN!a{=8U$%ILgs*>nTCSX0^6+%l7X5s`OH*BX>lTj+nNYd~0L~(+VW!6pKh2=h zCG+MDI?1Y^!_avDC48Ua*^9B>u?h2Z2r@0kmO)RVBF#hR|9iIgBaT3dEitdHEOGu* z>2bgB5N^2banF{sSpv6}E9uh4*}c{nKJifM1clrn46A=)Xu{i+Nuatv)M@M?s(l$Lm?5MSwPga!-$m7P_B(#3f%u}l~QKFp~XV-%pJZx zp7(lcE9plOF$mL0sT13STfk=OsSEkjI#>XhFn_ME5~p8P2fkO9mF1U|P%mgSSWaeg zIc{|z@;CUIL36ymvs+CO88YC=P$m7l@%o(BWhgKxl15p`d)^^Msq`QSKKoI2#p4D| z<#YH_B1rr~#SfuS zL3T5cr!oA??#E|mgYE{QUlx{@Fye=wriF3Y9iBtWE6a6FC4s=j#kJ9x(RpaOkWZzm zq=bIuDd6k>D^?nxwL7?NHmz+x3pIC`)I2r%~UK0UiO*)7Gt^?9t27tz>^aZNj-)~ zAO1ZMP-luqfSsw6wHF*6i%;A_q->8a&{6A7p#7!~_tD#CuJhszL7)pYcWV&jUYf2Lw=m_QgW@-YLM- zPv4;7@wyaYqG&fbwkp$v{&qavS@(b5!Sl%$F|a2#u+O0~P=D+dg4VW-GlG^5SIIDs zslQS`h__Nzh<<{?98y9sgB5jJ@0!k=Jwp*)_pbXaH^ctD;&HEAbAiK#QV*zHE_@3%T#qs@>?+w}QYNEnDUYR;A+3$&E5wILgr@6`3ds?QOj zR+Y}ogu{%FEiQJzZu?mxEYi{!Ti0Ky$W_p^9Vhw%oVN-6@~Q4;D79^3;h3AipnND7 zqKMHX>YJUncs9F}yi(SdWd#Zgd#WrA+A5-aNu6F>j@DM-PoavlCQNZ2v^5g4Q|Sa& z&{B23`yb0n>`!-SVT>Eg<#M@hUKkf;x&*#97LwPGcoIprjN6LEg@t5FwSO*cY@Ovz zO!NOK;}WRx~@V)ot1vLe*-u#nxaHKed%PaHsyI1vh6bq9 z?F`*%AgFNx)V(j7d@r}>Cf4|2TV^zBTV<6R6nTH?y*@fSWfCEex|f5~`h>mWA#&}N zRyJRpG=12=QUC^hc8Wv@y{gk`a5*q3vpxCd=H`;T?4#}JDNfS7(oTN+T}+gmO8gu=zDOdEOM~P?BuXg_ zv%pn4y$E=jCLy#OE^|ZwKioqBa!{lyhf&-`W|L3ZbA_|Ava;|4grB8Gz1)0J*l-+}B$AIv2-k z_mMNU@A|LaKYJx|@b_>w+fMyvcb)cxQn^TeiU>bOK4*2=x^;ZnwH!_4m%8M^`cees zWau=F2K1xzbIVWp`Z+8988siur7r(JFF;0DeTSY2UY-k)U0w~;gPu-s7)?;>1xd@$ zZ;e_y#@B7EY{&Uxh5u6fU;%!Sb(AnO`~KwHg+2qfI^9lC`5HdGe{^#4Ek`j;a3ipO zjX5VL3|BziCq}Jw2esl4O(@q=Bugyw7@TTKdaBKx>cki*{QH$a{Qdo}x#Q2y6i$mB z+Ygco0cp*vn9DRSAm!yC3BN!cz z#}(BeRGIO!&1P?jS^`rF3OjQc^_q7;4mzMC2$SKi;7p{x1>E0N3PYLrq!Yt7)ngK24LA(zXQS&;Tk4pgAk{sDmxR!SMx&XTs3 zD@s#>1fUhfxPrbAOfeDwsZt6pH8nLgOqw()%<2msX0`jOj8*h`(4JorTS{R8m9jF( zbT~_K8N1RNS9=t4d32q>-_W5$89sbCg+hTsp-|T7{XM_R&sGPBL0QZ;5?{7zt!M2h z=Q8%Wk9sgX4y zcqmWdc_yZ=%Ac?6x_HVh#fLo!G0)H;g;E;Nb#W}uMliTmQvxkf0Z=6uq480h%w8=e zwY9YzeDJ}zt{bMYR~j(=>40jW;1xYY%n5-?>2miU&}Y1tUu|8J%Vtrk0f1q{hB0>R z*icsm{WGYix}CZzZg_=yQ^BfOEcUvtD*~2MNkaom(jF@m_~NToWC&eYF@a}hXr-*! zqc(0qVcdPzJXVOX0B-0T)+O4V%jIYq)>M|MV!Ky@R8PC2KxO9$QjUXr_3+S2;W;t{ zs&Ha7E2+W;vA|I%G{ll|iba?D`ugy`Dy?XHOir-a9mIZq@pv4-%Vx71BfV5ro=5BB zb_jHK5FAucRUHwg6<4}TDj9vX0;O-ZMY@lOr>g%G70qh?ksADm3V(r~gxOEU|t!|D|h zaGV%jU0v)qabj6Icq)~uTCkwnI96Mac%DZhk-$?16jeo>%5wjCLHu8rrULQ1t{|Om zpr@ys!Gi{c*G?vreeR?39)lXJ!G6JgCX-2u#bPm&$t>HMMszZcudHW?5EM5QGTWsxHlAC438}eo#Fxv6MB>t^rKY()Fig@CVL${+JjlTry?U zJ)p}z7|NJr+DX$PUIA3WVB}!*R~7f4heAsjF=I6Uy!Szz9{iE>O5u6dz)%wfRi36+ z`dLA3*>+kpTX2-@smrHdG5uZ%A(Rlp&1SQ!3WdTqBUa_hbwBtyD|))Xb@{?Y7Y?7i z-}oJia_m6ry3(3lHisAYkkX;PzMhtrma;6sz@W2V=FpNi>uf99R!l|LBNnC7Y?s`3 z-+f!}yYIfSHrp~fE^d}25Raz^rv}gb(5=FW&qhd2IbjB$JO4PE8dAh!evGTP0G+)B zG6iGAn)h31ftIE0?C4SrVm^7ZY!x~h$s@HRBct@leQJt?ItrF0#~DSDo_KAleAxHEN?*n$0} z=>+!Tx5q`phA}n$JEWL}fMKI1bN4CVWI}ozR$G!)Ir1t`PUR`;0(tGCv`1;;#nRj_ zr5v8})OFF%t%sAAdB&CJxwxKZJ}dlomWY^N3{pzRUnfOBF)C$^PFH26pG*pdKVaE> z^C%VA4KmL>->wVCZg|S2s6D)*VpwbtvukG{rQnMHT+baZ-w#6kFP~rj;hjaF_YfHb zv|WDb`#JaBbHm9OrF4)^%>V2-2pLDp7=M58G1kZ$Qb$hakxeVv^rJht{o7v;80a?3 z;NeqRSH7@lhbCVv6kY!Q=sIQ|G}9O$I(*UXk~N-*dnl!pR!VxI^{o5}K+TjPe!QSS zu5%^F9y&aleDO_`F>|l41%3;h3DDG3$G5IHox`RLFO~Td0?$+Ai<(?fQS=n9r*S<+ zQB{^2>N9~v9~B#dScS&2Jt@91>rZkhFyQ1!q?Tx>6c!1y{;U{P;NA3sIF)CWg0ozI zemq{3m61|md8kq%h*Ewr2IXlyrpHeDY}7?WHWHBJWq8b;?5u; z{NrPeAeQhw5020{4KQ@dC?Aj_dDCRgq6NKU=3f79`~mj!J34y!_*J(t_tozl$*z^I+HB|NLeF2G zVd2F002c#a0|sCF;7wfn;7uGhVGjRw@OexhG=-MrAmRiS1$>JkMvSQkM-qP`W=k0@=AkSL|6_Xku8)hF4)Gnf*ju6vfNFz|<~(iOt9 z1T3!6X#_OgZV&Ws4Q_jNq)rBwn%Wf4tY5*P#&JxqY0Rw{H#)W9kyYCte=>}DM((94r6L{)&Q=!e0+L47}=!!SuTHU$;Q6eea> z2~=fqD&zhO0L(5J9)cl3Ml&eOo!D3tM~QF)SsXchDr4$LFd{vK;Wa}TQq#hix{*jB84(}Ka(6XCXur(A z)$n$=sO$ojQ_gE`Z>t0;%GeZTwF&JC7P>D`*>S20pwh-t35xNe6-p@MCKMqUHac|x ztj(0=O+;YzIx_d@!?@(SA28yx@1x5tr0C7#IC8szN7H|T#k0@9$}7uOGw-Yu-|+oj zd&A9KcJceEP1hU=EQ9~w-g(DIQRn~vIkTJHP4A(%p!8}13o0sN!}HFL{immQr&zF; zuf3ll7WBk=78E@Z6_p|i3QDiikx&98kdU5ivSsG?$9!gYcCx$Kg#ZEO^?1xPbdjTw zlaa_2!!aW7DL7f8iKGH`|I|S8Z%8nO5}Bwl5S2er@PNSV1Bzi10>wDc7sQt8vLmUm zqon$v$g2IDWjeZ`9Jr0K`HupZ1J?pt(x%_f;%!S@yUJR>>}$00dLknMOC zFUC9uHH@Yes!4_JQw{S{8R<+XDH>^0q1E3*Ny|u7YJjL3n+ZdwF8AxvjjAdl$W&5f z7fzd21a5ipO8s%Yz%omOL`P~$GIw2dDfd13cD>TQohy5L1*4vOe6KQmBL!F#ZZBrz zrfqu_fMLd*-p7WOcX_3|bAuJy- zuEVew-T+P$vg%?W4WPQ(&5D&}hwe{k?DTy|0b}ij#*}5J2 zA+^Aw!-K3sN$V}(Eym_|1davzi6tL!phfHC0GTf={e%&nwLfICoX8%(mWLoqhC(N* zPF+$6A%&O(RW*#hB_>V`rxFG|arLqKXZhw$rs! z8*+0p1ILw=ma~3SA+7VWII2r~Hf$=Scy}2c+vm}_Lz{5-8VsxkdT-uZL`i8GZg)dq z-lh9TT|`QX6IJn3Qd-T1f*l;yDVOZbq>w>bWp$m^37Vg@RA4qRyTh;-ZUVX)WL0+{ zQOK&UK>mbpmvY&ey+D#kcG_t0`VGTssgac@oSLPYwg*gnCQS-8NKzqYKT%RgCo09{ zqcReeDu_x^gVvuw5^)6cpI?Pu?NX6c@GA;mO$&^rKP;Sif7RhdR-weTO{k8O8|@pL ze+}@_>Yb}-P&_zg2M*a0XiUFqvaWe z2O)D#1p}ZF#+OE#R3Jb?HJkkm)pVi|RcJtk(N7iG{FjlGK&x~uAa3cpA_}*c>wk{d z4rF_@v|@7$^p*043R=7Ja7YgPl47X8r3P}aB6a{NC~TBQBV{!tA~0BjBvhLU23mxM zsT+M%MxxRKDs(@UV(?QL&42p+PDmSoERvA#n>Q}_`$rcnu*5{nG~;WIERINI6-k=Ay~p#wH<^1NGB;t2@;WhyxHs* zSdT_jN|2~@Kb4}WLDDmr|71zxxcu~h1?c7XXX{Tsy>RCJ>ks4$Z)s_V8-X%_qUvH) zz$rTdFV8Q`Y9m@S;Re#OX)+VJslNTP`Zp|+m>2dRs5({Rdv9=&hi^dN|E>PC@!H}*AA$vO4rWq z*j1v$(Yb#I;P*kE>!kl(FkgudlqPD0))J0K)vpMJ~G>V@P zQ=VU3N0&=F$r!6rC@Lf-*n&AzCLdKqqSEha7@+yp;4~vA9GyY3Q`2c?EZoeWTS^X^ ztZG?gr9}}g1Xw5n4(!-TlpW}m_!QOOCwwB?Ey3a;5}HB}fVMPU^wn z^kX>DZQKD>nqr zS+sO5J$rQI*@tiB+AD@5DQe(as^1qlEI!Rqartddm{oti07FWBx^wEBCB8oC6wi{o^1GRi8y za671BNJDwulK}TiMzjFqia+9#j!C(qSjZ9R!MyDvj`?wr7PV1RN zcB)pfVtrvbkB*rV`0oMJ5A@5nx6lr7&eMNP=BB<^At8}&OQYKF4zO!5gwAbU39k}EK((P$n$HmUi~;TToS%henU&k>~zPZgvLAi4GnZeHHI~{5}7? zXDn6Kb@b`k8C6v|XV_p~eDiajdGN--^`Cm^|8f80Z+H8oLo0PxNx5|SUqee9jvfCU z+l%&4TjS=!vyTaEPfc<0_?;JU^MjwDn*RhA4z#Sg@T;jS`RB#QAxjb&NeNWDeE}^X z;_P#JV4z}>SGoyL2^D01;~ZH5lOMFoaMCe5F;F!_0umEs5;I-2$#7CyU%fSCOe4+pURrTsz!DzNez`*sv&$m)Uh7iWK6@+Q$=->k|dSZAJwK_321fsxw6PxjhQiZXeb4KJC-($L;mww8_{LWRemjT4%V(Np)biNhoM_ z2^Xv_=B{Ta;c)?h)Ua>?F;y8{<*j$7iqR+D*QE~oQY~+ z+wh_5p;3XVp%QYa7NwJ$giMd5e6kX>Y6P`jH_!a`DsQivq@9@Hg*$aCc) zNitdX3^MKMfjjwCKYo;eMrNiQsuVnB5mjV-H1P)pAK#DOJwnow z)~l-8y7`o}gI?JxA80@;v})nd!9u;=W-Z>#mBV}Ev`J*9I4H07gw$7RYM!-Fatb9V zmCy)1r(LFd+cXlL8XJ~Z)pOsN?^wKYd*JM0Lwj=V1;^5}Qw}y6k{mLL4hMPZjt~K< zhW$TCxEs8@GkF;kCe733trz(F!6GWqiqN(!OfRqJoXmV+z_-PJaA&{Eut_!&Z7z0s zO9SEH5{CRJ!z{guaa$lb!l+#-7^_fyWl!EH5WG1GN;z+_}a|TQ8N;0-av8pP28p@cl^H(nV@ku5x|5^|6@d7t8 zy5JS-KM!d6EBBu?s5gCkbw!e8BuSgu_kqXW0HA;0?nsgiNx}naHpbbR_i{sWaDQe}0zKNL;Y`q?w>Ir8=i2)xvT5rcfGRYX zg0o;tDU)ZcWcs{~$o2%Bjs%huHLWQUQF;6-8+KIi>9kcmFy=d!t=OXPssR2{IP?Cm z59a%BnfC6n`JI9FfaAS$pW?!<=YT4)$-S8pUwLe!WdlV8M+^i_fFfV2l_Qs~u1?q_ z83mPf~XYCY^OucoB5h9!S&p|-l-{D>2P2McH3zx`mp>z3=B9h*N6 zxD{6#+phnPBzq!0)z8Yhb=3RniR892H+mo>Q?D;#AhYg?`LuH6Y8sZR^6i!x-1Pk; z0pS6RV06KJ!!xA-R|6LTLxeUE4^{zBFuLGN>u(-AS?NBjPyaWKw?Fu*e#~?CxCR~9 zuUT%|E`LzT+J&Fxwvcx)TT%Q&BazjvU1fam?LygPXr<0s6~ zPg@V1EyC&bXOe*7XeFbD0bSzwNwsLve$N%oynk^^dCx7Gtl9vZfW-eh>1OUc<|Y8@ z6%Wg6R^!DNTkD&&Ed`ZSIhpQ>`6Su3@-+>94^J%_$J+}h>W6Lv&SrGMx+u@*1Wpzm zVIa^}EZO3{?h&J%LSPo~4WkP-T7T{M$m;D6zv09|{W*3(Z)BTIS-rNP?5P*t&S}#+ zhx)#xw1@4lY@>nE*YN%bXsT2e$~^Ri-E_Y;ieQ3Z)P0Ey8B)@V0AWB}#&+q2~t#eE1>FJ-BmzUoI?6A5^i;$bLSZwXjcvQ|4+ z@%>b`cG3-XMN4Z4?+#N8pSrvj4z72eYX=!R{#DTvb&^A z&dSP~RaRCu3Rq)xlNKYVLS&XJyIWFD|LdnovaK7yOAlYf8OL@ET&&XVqhMDpZjTbt z_><(6X_f9IGsQt-LNHLP*6roPDa&|w;_m?g=l4{9Ry_aJrxZ`MuZaP*5=K@XlJZJA z0N1yfmcVEvO5+EY%W`^gLfXejq+oY2@h{Iaw9Uz)0=pI|Q>WBZ=dYtmgtj|m2Tmyg zr{pBXo{VgW&{I{FWhH-b(X3~wD=E-6`q{%q_bZGqn$|?aML-KKyJ7Gk%`Wd1nvd>dVfjbzW9%WOekAl2yNb z*kZ_P&Vn_({{FQ3ZG}ay%*@Qy<>lr70DiN&#{seC7-dx4@+zg{$!#)@I)8#B+uCRv z)>&M3Mo%2}p#1UqRq8x`Y8tcwvcoQuV3Tm!?OKGN0Uocy^!b1C{97||yFHrW$Jem2 zwrKIcE7pIzvI$Kp7FiWZMj1bSp+?HXhpcpRvYQ&*+zI3e+aeLx@tlx021(098QOCQ zV^6%7c1dkwez00^9e8Fe4c= zfmvi_BCFTN|BvI3?Zas&A2$z)6So!?^VAFP;P)#$eD4h$mER$7ftuPnCVlf0pZ;$; zeS3A~;tS7Y(6PO-*&=6XbU6Byl2vm*IOt@xYTY*8`0z(%*~$%adV2bf%F4=7z}Hwf z5abj|PKA?M(d3marPC>0GP<7onk3t@0XlbS#bdXeMZd1uffSc$P*j!TJ+=I{Y#VP* zoJ(bSofu>*b@k80eK02u z?vK+E>f7qr?Ua%~<{Uh-+Pb}j@e_VlXU$nArKBWRx!v{m`u+a*2uUfm&`hw!D5LgC zW;wF*vvT@edB4q(+E>5vz`mUreriv8cFm=2b_!{U2`Gv}gU3&;$46Cl154KK;PWX< z*j^MGH{-3{{d?KkFDG~^wpPTUNyTdEiAGG37PHJcAjBo@!+Az>GIoGSox!MWHKlol z?G$pBJxRH~^_8@D3`H_k-;g9oZLKBT*6s-`I)yL3n#rYq z8^+}qokd1^YREe&6%l9hAbJK$usr>uq#CjP?L z)8^rDI2t5LddBT`zXZ4~PHA=-7fn7zlUI@ClRXScg&ZTvc6(O8tIto&I{G|Gwx`DX z(2B32!c((zMeWYTi|TeR-fEBoO=(hlBcqfs@+cZPg*(=}zh%|lkd?VHBYoPMAhNyH z#$-A>vpGNOOgg!aCqYVy-sy4CWW%5_Sa&mEwyrZuTpH6wRtOq}#1lc&sL$cg>A=8E&_(y49Z zv5KlTsw>&4$Iy}$zrK6G$f~xsflt5worz!1QGGtY;&Qph*VNQJik9kNp?P7CQ(oDl z$fj5Z;f-XL>`ZZ`AAR1*u8gk7*q!MeMOl^z1m!OGRBT;STfBU2-LB<340XWN7zrvC zw?j1YX^N11G0Lj_LRwOs$Hvm=@qEUkoi*0XB<%D`YDJI4&NyV3R#Tr~ef?UcL0VkB zO|9~KL-ecgjUP{A`(q%RSQ|K=PxWvotJ`n>8`qCKFNCZl*|rMk1F*ZKj2B+{h@C~H zJb2gj^y?F>8R+qP`C z#D`zZQ7WsdrPS2a$yHTVccE1ru(1EMsr@alni_xyzRQkOS5oVKon^bzsVY8I@%vO& z@v8WJir-UR-%$2vxk)BRl2x(znBpOwc;rwl$Ltp^D;~0n)PVLT8hBqiM^hssxe0Y$ z4~3T+>099&KHBIaQsaB~fqdfKcQ|WUw)C{LZ4I7=HqSkJvov(bK%K0%N|M~keDLCB zYkB_V59xk%NAA3JB)Qp{A^YbqTE%zM=CEMN8v6C>${D8}&(M)8jFIoP} z`29v!s;ctiud8_XvtN{*MI~}ZM#kKVii%Naa{^h||GuecGAbS!)Lfs@tVgf(_Avkw?7&+9v@Y-2pKz#osM=QN7F>J6Pp`0A;SIyvDgjk4zV}^mspa- z;xZhkUt0>(QKT@0ZYDf6!gtj)Dr$~>M?$`q+R)H2AvGoSFW*j`o0FZHM(>{aNRqr9 z)XwHXX4}>|Tyo**l$KTU$kXH5U0O!xj;+yV-c@PeHkUI_J&r3c9Zp7iD!(mU$;)p} z$Xd;VI|9Ez1ggq)rLQd82{N^Wzo{L za%yVIuIlQVJAf~+lEYh?W_2JntZ3SmY#KOY68vU)6^jKZUd_r!^IC%rnr0PA%P^K5 zX-Xq$YD5r@{iE4!!;y{Gur#2R%WnYc4U5~b-e5Y;E7tYJD_*+Su+9=3NlQy>KY!651GjD8opI_({e?CZ9tn0k{d#xhii?LK0k6OFCEt8M zhlB(ht@E;?`m8eZy4DqKUf3*}RZ(#{ufF$_`pUaMNad9^wThzNU0>gD6|mCckAtRV zNpZ@fNS|s9R**>1#Nh_TUW2@8Ff4A9yfVwG`YTXm(qB6DqY#WPen+1U=*EL^eIUY9hy2a}@SA{~(&SW(8L?f#l@m2kPqTo;!9x zFJSC-`$>+9>E2Wl+7Xi*wgEKz{5L^8*kn-wjQSTb3Cq$u)OiJ)mI z5eDy(eDP+|i8UH9-k`CA9t~(dFCor`Z7NhfmWmgRGR94|+;F}pDS{0*lKnQG1mIYc zf1~9uH03ZoqLV}IcKcBak}TbQ%VqM2b54r;8f(^V;iq30GiTmPK6(3590|b` zy{`SvE?M&9YkNgj^$lLW{C+8)emzfVXz<{2xjv|^t$i5FC)LultEM9Mq79;D(|WAc zF3jta!PF<2&;9 zg&$`9!H55wr<9df$|)%+Q>v@0?*jg`nySTXT6?P{?F~6a)1>4mrb=RzSFy;dXhjVo z$t+(Cn%My_y(BgBwf4F?MU-_Z0zpO7q$FaGl2N0m+OcMe$I>a98rfI`)kObFNJ#j5 zLPEl;9*@U$-NtlWYzE7lEpucjhU=|U9f@ozgXbkR#;?9OG{l?RaHF- zSY$QNA*XG{Bco!GRc3h=izX$-U;>P!VZ|z|{BaJp59`{1kq~b&NLv${QzV~MtQu7~ zpH-}8!FY;9##%SfDqg#H$jQjac)hZ+@`^UCbCvsUyIel)r2cz-v9A5kD_Jz7X|h_m zW*hH*zDQZUu0T#rP1#gkU2_{S)oPBzj=Z7~l08N%Pox2LID(2L1idM1(7wv5Sme}U zS9Lw!PGL7SrY#CVNkk5yh*hPIrd_EqsM_&XH#Vxzme$m~7?P2ZKB1zbvg7f`_E9hW z%Mj^|Ap?V3#X5XNLE|6mGb_iMau6B*2u}p$)&ZmwRZs%tbxW6MPiZC zVdBxGBFQMJ37M?Ms9~`WvdU*V?ueYvD&FUiBal{8s&qJwDpsEr#Gd39t%l$sI|Jt- z40~c?;_VKHKezmZ?*J zS}Hl5jyjLW`>3KQqRaRa+IDqd?uw*eP~kX3bU17Cc< zl1V=-QdL#;OOia+3lL)3>xU#Wia?*FM}ySMJX z_aDR6R(aVzKuu0go(A-^UY`Y1U&?NQ&((iFDI7fsEvR6@f`tQULFg+<(k*#;c~jEU z)5~@8aySxHKut+WSq7YJy($Y~-~`iTuPwxiu%E7*DHaR+*lC`Mvj248y8U$TRNxV- z2@V^y2d+p=One2n(0Uz5ES>>IM%n+BNy|t8QqZcS8{rT#LQf30na(o?TG*m!E|CJo zjKLc^T)@r<=Na#7JWnRPkIei$*8@*j-DB@@AMk%s_Wu)jZa?0CI&i$z3>GXL7U%{P zpiLhX3B620-64o7SCE!Y8oA;+DxrBONh#d(fVL(#P>r$NxGM4yEs*6YU#buXShmi-cN8!*15G}fWyMAJt>HG+I* z0xt?e3O5DAKr|o3zko?%+YP|0Xy(H2flI{p5x{T2`2{khU2ag+lQ&R zvbxnUGKfT0Z-|l0`=VpnOk~whjHD3SnsJ7{3xVyxM8l%bR=Z3zyafvu4sC2TXef1P zekU``g5PG^?gnzidY)nX(}Y%)@C)rtPx!S1cNzB27yDKS`SPt{09`}S{ef7ojzCuS zFwbl>QA(!cqdjM5p+&X`97<-ul|T=%{yUoKuDw{CV!PMSnXVDX+yLxpq7$wGh8VuD z736iJAjvC4$Cl81VcV_$xrKv>WJ8~~u=iL)Fo0r}&}=|+A>10_9P|8gtAR7c`uT?K zJAu~;1~!ZoGDMd&J+5<_OtRD7HI+_k=w9NoX*V$r2O}iNHqSBEvS_XEj$4*kprsM-N?pRLIA%;xpPa z8`r}ZA6ccM`K#s~;hi`eEh4EaT3G9| zXwKTfg6t0^=tMQH8Lrh17zTU+TwpR6r2yS64Yb9_tEc!6>86CW1>v3w$v~B2}T~glsizs{qb5oHqo`nQT0-m|)JFjtaCKIU}YK z>X@Ewy3Q8Ez7ar?VP7F|QWIpg1uZB?CDi8-j;z`NGYr@FqLu!!o5=ooF*qFae)wqP1VvHgcbxa8?A!KU(igbbp%6&GsLz2jh4&SLuffF z@Bo^FyRSHJr$LD8g8xc1C-YTk(V;3@vUj{>wHVDlKY(Cl*imR9$)BM4EKf&^MlB$+ z4S%A6?A}G2|Efy_vXar9^1IQ(mwTa^RQ-h3pS(ky_n5d=U$lS#qh^(cHjT+ILdzV0 zmM~w-p=Xx0V8McgX2Cr|Q1))(xNn7~V4OUvmpJz|6ItCEMguv+u~UiiDxsD)p_FkU8ZaNDo92r=FMn98-#gB z`tQG0d^c7!I9_bK$?CEe4i{LCMGNsj4_Ikx`hb2yP#b60U4v%AidL>lpXksychJr- z+syC@ZDM3I{mwpUCOBi1sqX8_M+;MqhI;WVi_i)$&kyr{bn-Nl=_x|9D@04m{BI1; z>I2?3Y?BPnYQcg6bn|Nne?7TOSv$=AHuw&YQuhe9M7uPaWf`-&O^%=vS7g?D+@;) zmY{|5-vPWu<7|E%YtejFPPFJa(ELli2qsuJHwK{1C8P`NBDBDRXs9K8iJgsTrnGn2 zw`wIX0~e$DoXqbB;B0XZ9mCOleH+kxX2mp}*S8VPk2agodYKk199FQVF18R4s)2ja zf*+<6%<=~<1Y0jR^Ez6X_={*x{?1}uAz0sWUvUD#`FXBI%W-Th{-`W*~CVCsO z8O@BQ*L~Crc#ap>9)lKAkGM~~GqWmaek1e!r=UeV-i{U_=|GDjJX4UD+wdOGMvDmS z7~vit2`%smF+k9@(vDVlwP3-*k&f$tGJ;{!HDbNopshb8G%q)rDX$)_(6Ekh0Tw;P zwkr(Vh7uZ_pa&Ru#kH%%_X&iW7~7!*IjCs9GyQpuroCQhffNk{!|dCLeWS%Qt3au^ z*RN6Di7~*!jAv;ax;_}K2AgX54buo#W uVBsLZs*PvC!Vw6|XJx^H1q&8hJpMlrYRy?zTt+wm0000 - - - - - - - - - - - - - - - - - - - - - - - - - Keystone - - - - - - - - - - Folder 2 - - - - - - - - - - - - - - - - API - - - - - - - - - - - - - - - - - - - - - - Glance - - - - - - - - - - Folder 3 - - - - - - - - - - - - - - - - REST API - - - - - - - - - - - - - - - - - Glance DB - - - - - - - - - - - - - - - - - - - - Database -Abstraction -Layer - - - - - - - - - - - - - - - - - Glance -Domain -Controller - Auth -Notifier -Policy -Quota -Location -DB - - - - - - - - - - - - - - - - - - - AuthZ -Middleware - - - - - - - - - - - - - - - - - Registry -Layer - - - - - - - - - - - - - - - - - - - - - - Glance Store - - - - - - - - - - Folder 4 - - - - - - - - - - - - - - - - Glance Store Drivers - - - - - - - - - - - - - - - - - AuthN - - - - - - - - - - - - - - - - - - - - - - Supported Storages - - - - - - - - - - Folder 5 - - - - - - - - - - - - - - - - Swift - - - - - - - - - - - - - - - - - - - - Ceph - - - - - - - - - - - - - - - - - - - - Sheepdog - - - - - - - - - - - - - - - - - - - - ... - - - - - - - - - - - - - - - - - - - - Filesystem - - - - - - - - - - - - - - - - - - - - - - - - - A client - - - - - - - - - - Folder 7 - - - - - - - - - - - - - - - - AuthN - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - <?xml version="1.0" encoding="utf-8"?> -<svg version="1.1" - xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" - x="0px" y="0px" width="40px" height="48px" viewBox="0 0 40 48" enable-background="new 0 0 40 48" xml:space="preserve"> -<defs> -</defs> -<linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="655.0938" x2="409.4502" y2="655.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> - <stop offset="0" style="stop-color:#4D4D4D"/> - <stop offset="0.0558" style="stop-color:#5F5F5F"/> - <stop offset="0.2103" style="stop-color:#8D8D8D"/> - <stop offset="0.3479" style="stop-color:#AEAEAE"/> - <stop offset="0.4623" style="stop-color:#C2C2C2"/> - <stop offset="0.5394" style="stop-color:#C9C9C9"/> - <stop offset="0.6247" style="stop-color:#C5C5C5"/> - <stop offset="0.7072" style="stop-color:#BABABA"/> - <stop offset="0.7885" style="stop-color:#A6A6A6"/> - <stop offset="0.869" style="stop-color:#8B8B8B"/> - <stop offset="0.9484" style="stop-color:#686868"/> - <stop offset="1" style="stop-color:#4D4D4D"/> -</linearGradient> -<path fill="url(#SVGID_1_)" d="M19.625,37.613C8.787,37.613,0,35.738,0,33.425v10c0,2.313,8.787,4.188,19.625,4.188 - c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,35.738,30.464,37.613,19.625,37.613z"/> -<linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="649.0938" x2="409.4502" y2="649.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> - <stop offset="0" style="stop-color:#B3B3B3"/> - <stop offset="0.0171" style="stop-color:#B6B6B6"/> - <stop offset="0.235" style="stop-color:#D7D7D7"/> - <stop offset="0.4168" style="stop-color:#EBEBEB"/> - <stop offset="0.5394" style="stop-color:#F2F2F2"/> - <stop offset="0.6579" style="stop-color:#EEEEEE"/> - <stop offset="0.7724" style="stop-color:#E3E3E3"/> - <stop offset="0.8853" style="stop-color:#CFCFCF"/> - <stop offset="0.9965" style="stop-color:#B4B4B4"/> - <stop offset="1" style="stop-color:#B3B3B3"/> -</linearGradient> -<path fill="url(#SVGID_2_)" d="M19.625,37.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 - c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,35.738,8.787,37.613,19.625,37.613z"/> -<linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="646" x2="408.2217" y2="646" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> - <stop offset="0" style="stop-color:#C9C9C9"/> - <stop offset="1" style="stop-color:#808080"/> -</linearGradient> -<ellipse fill="url(#SVGID_3_)" cx="19.625" cy="31.425" rx="18.396" ry="3.926"/> -<linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="641.0938" x2="409.4502" y2="641.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> - <stop offset="0" style="stop-color:#4D4D4D"/> - <stop offset="0.0558" style="stop-color:#5F5F5F"/> - <stop offset="0.2103" style="stop-color:#8D8D8D"/> - <stop offset="0.3479" style="stop-color:#AEAEAE"/> - <stop offset="0.4623" style="stop-color:#C2C2C2"/> - <stop offset="0.5394" style="stop-color:#C9C9C9"/> - <stop offset="0.6247" style="stop-color:#C5C5C5"/> - <stop offset="0.7072" style="stop-color:#BABABA"/> - <stop offset="0.7885" style="stop-color:#A6A6A6"/> - <stop offset="0.869" style="stop-color:#8B8B8B"/> - <stop offset="0.9484" style="stop-color:#686868"/> - <stop offset="1" style="stop-color:#4D4D4D"/> -</linearGradient> -<path fill="url(#SVGID_4_)" d="M19.625,23.613C8.787,23.613,0,21.738,0,19.425v10c0,2.313,8.787,4.188,19.625,4.188 - c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.738,30.464,23.613,19.625,23.613z"/> -<linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="635.0938" x2="409.4502" y2="635.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> - <stop offset="0" style="stop-color:#B3B3B3"/> - <stop offset="0.0171" style="stop-color:#B6B6B6"/> - <stop offset="0.235" style="stop-color:#D7D7D7"/> - <stop offset="0.4168" style="stop-color:#EBEBEB"/> - <stop offset="0.5394" style="stop-color:#F2F2F2"/> - <stop offset="0.6579" style="stop-color:#EEEEEE"/> - <stop offset="0.7724" style="stop-color:#E3E3E3"/> - <stop offset="0.8853" style="stop-color:#CFCFCF"/> - <stop offset="0.9965" style="stop-color:#B4B4B4"/> - <stop offset="1" style="stop-color:#B3B3B3"/> -</linearGradient> -<path fill="url(#SVGID_5_)" d="M19.625,23.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 - c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.738,8.787,23.613,19.625,23.613z"/> -<linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="632" x2="408.2217" y2="632" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> - <stop offset="0" style="stop-color:#C9C9C9"/> - <stop offset="1" style="stop-color:#808080"/> -</linearGradient> -<ellipse fill="url(#SVGID_6_)" cx="19.625" cy="17.426" rx="18.396" ry="3.926"/> -<linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="627.5938" x2="409.4502" y2="627.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> - <stop offset="0" style="stop-color:#4D4D4D"/> - <stop offset="0.0558" style="stop-color:#5F5F5F"/> - <stop offset="0.2103" style="stop-color:#8D8D8D"/> - <stop offset="0.3479" style="stop-color:#AEAEAE"/> - <stop offset="0.4623" style="stop-color:#C2C2C2"/> - <stop offset="0.5394" style="stop-color:#C9C9C9"/> - <stop offset="0.6247" style="stop-color:#C5C5C5"/> - <stop offset="0.7072" style="stop-color:#BABABA"/> - <stop offset="0.7885" style="stop-color:#A6A6A6"/> - <stop offset="0.869" style="stop-color:#8B8B8B"/> - <stop offset="0.9484" style="stop-color:#686868"/> - <stop offset="1" style="stop-color:#4D4D4D"/> -</linearGradient> -<path fill="url(#SVGID_7_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 - c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> -<linearGradient id="SVGID_8_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="621.5938" x2="409.4502" y2="621.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> - <stop offset="0" style="stop-color:#B3B3B3"/> - <stop offset="0.0171" style="stop-color:#B6B6B6"/> - <stop offset="0.235" style="stop-color:#D7D7D7"/> - <stop offset="0.4168" style="stop-color:#EBEBEB"/> - <stop offset="0.5394" style="stop-color:#F2F2F2"/> - <stop offset="0.6579" style="stop-color:#EEEEEE"/> - <stop offset="0.7724" style="stop-color:#E3E3E3"/> - <stop offset="0.8853" style="stop-color:#CFCFCF"/> - <stop offset="0.9965" style="stop-color:#B4B4B4"/> - <stop offset="1" style="stop-color:#B3B3B3"/> -</linearGradient> -<path fill="url(#SVGID_8_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 - c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> -<linearGradient id="SVGID_9_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="618.5" x2="408.2217" y2="618.5" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> - <stop offset="0" style="stop-color:#C9C9C9"/> - <stop offset="1" style="stop-color:#808080"/> -</linearGradient> -<ellipse fill="url(#SVGID_9_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> -<path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.291,46.792c0,0-4.313,0.578-7.249,0.694 - C20.917,47.613,15,47.613,15,47.613l-2.443-10.279l-0.119-2.283l-1.231-1.842L9.789,23.024l-0.082-0.119L9.3,20.715l-1.45-1.44 - L5.329,8.793c0,0,5.296,0.882,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.644l-0.375,1.875 - l1.627,2.193L31.291,46.792z"/> -</svg> - - <?xml version="1.0" encoding="utf-8"?> -<svg version="1.1" - xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" - x="0px" y="0px" width="41px" height="48px" viewBox="-0.875 -0.887 41 48" enable-background="new -0.875 -0.887 41 48" - xml:space="preserve"> -<defs> -</defs> -<linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-979.1445" x2="682.0508" y2="-979.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> - <stop offset="0" style="stop-color:#3C89C9"/> - <stop offset="0.1482" style="stop-color:#60A6DD"/> - <stop offset="0.3113" style="stop-color:#81C1F0"/> - <stop offset="0.4476" style="stop-color:#95D1FB"/> - <stop offset="0.5394" style="stop-color:#9CD7FF"/> - <stop offset="0.636" style="stop-color:#98D4FD"/> - <stop offset="0.7293" style="stop-color:#8DCAF6"/> - <stop offset="0.8214" style="stop-color:#79BBEB"/> - <stop offset="0.912" style="stop-color:#5EA5DC"/> - <stop offset="1" style="stop-color:#3C89C9"/> -</linearGradient> -<path fill="url(#SVGID_1_)" d="M19.625,36.763C8.787,36.763,0,34.888,0,32.575v10c0,2.313,8.787,4.188,19.625,4.188 - c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,34.888,30.464,36.763,19.625,36.763z"/> -<linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-973.1445" x2="682.0508" y2="-973.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> - <stop offset="0" style="stop-color:#9CD7FF"/> - <stop offset="0.0039" style="stop-color:#9DD7FF"/> - <stop offset="0.2273" style="stop-color:#BDE5FF"/> - <stop offset="0.4138" style="stop-color:#D1EEFF"/> - <stop offset="0.5394" style="stop-color:#D9F1FF"/> - <stop offset="0.6155" style="stop-color:#D5EFFE"/> - <stop offset="0.6891" style="stop-color:#C9E7FA"/> - <stop offset="0.7617" style="stop-color:#B6DAF3"/> - <stop offset="0.8337" style="stop-color:#9AC8EA"/> - <stop offset="0.9052" style="stop-color:#77B0DD"/> - <stop offset="0.9754" style="stop-color:#4D94CF"/> - <stop offset="1" style="stop-color:#3C89C9"/> -</linearGradient> -<path fill="url(#SVGID_2_)" d="M19.625,36.763c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 - c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,34.888,8.787,36.763,19.625,36.763z"/> -<path fill="#3C89C9" d="M19.625,26.468c10.16,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.554,5.438 - c-12.125,0-18.467-2.484-19.541-4.918C-0.127,29.125,9.465,26.468,19.625,26.468z"/> -<linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-965.6948" x2="682.0508" y2="-965.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> - <stop offset="0" style="stop-color:#3C89C9"/> - <stop offset="0.1482" style="stop-color:#60A6DD"/> - <stop offset="0.3113" style="stop-color:#81C1F0"/> - <stop offset="0.4476" style="stop-color:#95D1FB"/> - <stop offset="0.5394" style="stop-color:#9CD7FF"/> - <stop offset="0.636" style="stop-color:#98D4FD"/> - <stop offset="0.7293" style="stop-color:#8DCAF6"/> - <stop offset="0.8214" style="stop-color:#79BBEB"/> - <stop offset="0.912" style="stop-color:#5EA5DC"/> - <stop offset="1" style="stop-color:#3C89C9"/> -</linearGradient> -<path fill="url(#SVGID_3_)" d="M19.625,23.313C8.787,23.313,0,21.438,0,19.125v10c0,2.313,8.787,4.188,19.625,4.188 - c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.438,30.464,23.313,19.625,23.313z"/> -<linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-959.6948" x2="682.0508" y2="-959.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> - <stop offset="0" style="stop-color:#9CD7FF"/> - <stop offset="0.0039" style="stop-color:#9DD7FF"/> - <stop offset="0.2273" style="stop-color:#BDE5FF"/> - <stop offset="0.4138" style="stop-color:#D1EEFF"/> - <stop offset="0.5394" style="stop-color:#D9F1FF"/> - <stop offset="0.6155" style="stop-color:#D5EFFE"/> - <stop offset="0.6891" style="stop-color:#C9E7FA"/> - <stop offset="0.7617" style="stop-color:#B6DAF3"/> - <stop offset="0.8337" style="stop-color:#9AC8EA"/> - <stop offset="0.9052" style="stop-color:#77B0DD"/> - <stop offset="0.9754" style="stop-color:#4D94CF"/> - <stop offset="1" style="stop-color:#3C89C9"/> -</linearGradient> -<path fill="url(#SVGID_4_)" d="M19.625,23.313c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 - c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.438,8.787,23.313,19.625,23.313z"/> -<path fill="#3C89C9" d="M19.476,13.019c10.161,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.555,5.438 - c-12.125,0-18.467-2.485-19.541-4.918C-0.277,15.674,9.316,13.019,19.476,13.019z"/> -<linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-952.4946" x2="682.0508" y2="-952.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> - <stop offset="0" style="stop-color:#3C89C9"/> - <stop offset="0.1482" style="stop-color:#60A6DD"/> - <stop offset="0.3113" style="stop-color:#81C1F0"/> - <stop offset="0.4476" style="stop-color:#95D1FB"/> - <stop offset="0.5394" style="stop-color:#9CD7FF"/> - <stop offset="0.636" style="stop-color:#98D4FD"/> - <stop offset="0.7293" style="stop-color:#8DCAF6"/> - <stop offset="0.8214" style="stop-color:#79BBEB"/> - <stop offset="0.912" style="stop-color:#5EA5DC"/> - <stop offset="1" style="stop-color:#3C89C9"/> -</linearGradient> -<path fill="url(#SVGID_5_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 - c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> -<linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-946.4946" x2="682.0508" y2="-946.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> - <stop offset="0" style="stop-color:#9CD7FF"/> - <stop offset="0.0039" style="stop-color:#9DD7FF"/> - <stop offset="0.2273" style="stop-color:#BDE5FF"/> - <stop offset="0.4138" style="stop-color:#D1EEFF"/> - <stop offset="0.5394" style="stop-color:#D9F1FF"/> - <stop offset="0.6155" style="stop-color:#D5EFFE"/> - <stop offset="0.6891" style="stop-color:#C9E7FA"/> - <stop offset="0.7617" style="stop-color:#B6DAF3"/> - <stop offset="0.8337" style="stop-color:#9AC8EA"/> - <stop offset="0.9052" style="stop-color:#77B0DD"/> - <stop offset="0.9754" style="stop-color:#4D94CF"/> - <stop offset="1" style="stop-color:#3C89C9"/> -</linearGradient> -<path fill="url(#SVGID_6_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 - c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> -<linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="644.0293" y1="-943.4014" x2="680.8223" y2="-943.4014" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> - <stop offset="0" style="stop-color:#9CD7FF"/> - <stop offset="1" style="stop-color:#3C89C9"/> -</linearGradient> -<ellipse fill="url(#SVGID_7_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> -<path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.04,45.982c0,0-4.354,0.664-7.29,0.781 - c-3.125,0.125-8.952,0-8.952,0l-2.384-10.292l0.044-2.108l-1.251-1.154L9.789,23.024l-0.082-0.119L9.5,20.529l-1.65-1.254 - L5.329,8.793c0,0,4.213,0.903,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.645l-0.521,1.416 - l1.46,1.834L31.04,45.982z"/> -</svg> - - - - diff --git a/doc/source/images_src/glance_db.graphml b/doc/source/images_src/glance_db.graphml deleted file mode 100644 index 64c1c964..00000000 --- a/doc/source/images_src/glance_db.graphml +++ /dev/null @@ -1,217 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - Images - id: varchar(36), primary -name: varchar(255), nullable -size: bigint(20), nullable -status: varchar(30) -is_public: tinyint(1) -created_at: datetime -updated_at: datetime, nullable -deleted_at: datetime, nullable -deleted: tinyint(1) -disk_format: varchar(20), nullable -container_format: varchar(20), nullable -checksum: varchar(32), nullable -owner: varchar(255), nullable -min_disk: int(11) -min_ram: int(11) -protected: tinyint(1) -virtual_size: bigint(20), nullable - - - - - - - - - - - - - - - - - - - image_locations - id: int(11), primary -image_id: varchar(36) -value: text -created_at: datetime -updated_at: datetime, nullable -deleted_at: datetime, nullable -deleted: tinyint(1) -meta_data: text, nullable -status: varchar(30) - - - - - - - - - - - - - - - - - - - image_members - id: int(11), primary -image_id: varchar(36) -member: varchar(255) -can_share: tiny_int(1) -created_at: datetime -updated_at: datetime, nullable -deleted_at: datetime, nullable -deleted: tinyint(1) -status: varchar(20) - - - - - - - - - - - - - - - - - - - image_properties - id: int(11), primary -image_id: varchar(36) -name: varchar(255) -value: text, nullable -created_at: datetime -updated_at: datetime, nullable -deleted_at: datetime, nullable -deleted: tinyint(1) - - - - - - - - - - - - - - - - - - - image_tags - id: int(11), primary -image_id: varchar(36) -value: varchar(255) -created_at: datetime -updated_at: datetime, nullable -deleted_at: datetime, nullable -deleted: tinyint(1) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/source/images_src/glance_layers.graphml b/doc/source/images_src/glance_layers.graphml deleted file mode 100644 index 5598c458..00000000 --- a/doc/source/images_src/glance_layers.graphml +++ /dev/null @@ -1,363 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - Domain - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Router -api/v2/router.py - - - - - - - - - - - - - - - - - - - REST API -api/v2/* - - - - - - - - - - - - - - - - - - - Auth -api/authorization.py - - - - - - - - - - - - - - - - - - - Notifier -notifier.py - - - - - - - - - - - - - - - - - - - Policy -api/policy.py - - - - - - - - - - - - - - - - - - - Quota -quota/__init__.py - - - - - - - - - - - - - - - - - - - Location -location.py - - - - - - - - - - - - - - - - - - - DB -db/__init__.py - - - - - - - - - - - - - - - - - - - Registry (optional) -registry/v2/* - - - - - - - - - - - - - - - - - - - Data Access -db/sqlalchemy/api.py - - - - - - - - - - - - - - - - - - - A Client - - - - - - - - - - - - - - - - - - - Glance Store - - - - - - - - - - - - - - - - - - - DBMS - - - - - - - - - - - - - - - - - - - Property protection (optional) -api/property_protections.py - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/source/images_src/image_status_transition.dot b/doc/source/images_src/image_status_transition.dot deleted file mode 100644 index 036b3040..00000000 --- a/doc/source/images_src/image_status_transition.dot +++ /dev/null @@ -1,51 +0,0 @@ -/* -# All Rights Reserved. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -*/ - -/* -This file can be compiled by graphviz with issuing the following command: - - dot -Tpng -oimage_status_transition.png image_status_transition.dot - -See http://www.graphviz.org to get more info. -*/ - -digraph { - node [shape="doublecircle" color="#006699" style="filled" - fillcolor="#33CCFF" fixedsize="True" width="1.5" height="1.5"]; - - "" -> "queued" [label="create image"]; - - "queued" -> "active" [label="add location*"]; - "queued" -> "saving" [label="upload"]; - "queued" -> "deleted" [label="delete"]; - - "saving" -> "active" [label="upload succeed"]; - "saving" -> "killed" [label="[v1] upload fail"]; - "saving" -> "queued" [label="[v2] upload fail"]; - "saving" -> "deleted" [label="delete"]; - - "active" -> "pending_delete" [label="delayed delete"]; - "active" -> "deleted" [label="delete"]; - "active" -> "deactivated" [label="deactivate"]; - - "deactivated" -> "active" [label="reactivate"]; - "deactivated" -> "deleted" [label="delete"]; - - "killed" -> "deleted" [label="delete"]; - - "pending_delete" -> "deleted" [label="after scrub time"]; -} diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 22ca7f8d..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,48 +0,0 @@ -.. - Copyright 2010 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================================== -Welcome to Glance's documentation! -================================== - -The Image service (glance) project provides a service where users can upload -and discover data assets that are meant to be used with other services. -This currently includes images and metadata definitions. - -Glance image services include discovering, registering, and -retrieving virtual machine (VM) images. Glance has a RESTful API that allows -querying of VM image metadata as well as retrieval of the actual image. - -.. include:: deprecation-note.inc - -VM images made available through Glance can be stored in a variety of -locations from simple filesystems to object-storage systems like the -OpenStack Swift project. - -.. toctree:: - :maxdepth: 2 - - user/index - admin/index - install/index - configuration/index - cli/index - contributor/index - -.. toctree:: - :maxdepth: 1 - - glossary diff --git a/doc/source/install/get-started.rst b/doc/source/install/get-started.rst deleted file mode 100644 index 078bc505..00000000 --- a/doc/source/install/get-started.rst +++ /dev/null @@ -1,71 +0,0 @@ -====================== -Image service overview -====================== - -The Image service (glance) enables users to discover, -register, and retrieve virtual machine images. It offers a -:term:`REST ` API that enables you to query virtual -machine image metadata and retrieve an actual image. -You can store virtual machine images made available through -the Image service in a variety of locations, from simple file -systems to object-storage systems like OpenStack Object Storage. - -.. important:: - - For simplicity, this guide describes configuring the Image service to - use the ``file`` back end, which uploads and stores in a - directory on the controller node hosting the Image service. By - default, this directory is ``/var/lib/glance/images/``. - - Before you proceed, ensure that the controller node has at least - several gigabytes of space available in this directory. Keep in - mind that since the ``file`` back end is often local to a controller - node, it is not typically suitable for a multi-node glance deployment. - - For information on requirements for other back ends, see - `Configuration Reference - `__. - -The OpenStack Image service is central to Infrastructure-as-a-Service -(IaaS). It accepts API requests for disk or server images, and -metadata definitions from end users or OpenStack Compute -components. It also supports the storage of disk or server images on -various repository types, including OpenStack Object Storage. - -A number of periodic processes run on the OpenStack Image service to -support caching. Replication services ensure consistency and -availability through the cluster. Other periodic processes include -auditors, updaters, and reapers. - -The OpenStack Image service includes the following components: - -glance-api - Accepts Image API calls for image discovery, retrieval, and storage. - -glance-registry - Stores, processes, and retrieves metadata about images. Metadata - includes items such as size and type. - - .. warning:: - - The registry is a private internal service meant for use by - OpenStack Image service. Do not expose this service to users. - -Database - Stores image metadata and you can choose your database depending on - your preference. Most deployments use MySQL or SQLite. - -Storage repository for image files - Various repository types are supported including normal file - systems (or any filesystem mounted on the glance-api controller - node), Object Storage, RADOS block devices, VMware datastore, - and HTTP. Note that some repositories will only support read-only - usage. - -Metadata definition service - A common API for vendors, admins, services, and users to meaningfully - define their own custom metadata. This metadata can be used on - different types of resources like images, artifacts, volumes, - flavors, and aggregates. A definition includes the new property's key, - description, constraints, and the resource types which it can be - associated with. diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index 75a1aaec..00000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,43 +0,0 @@ -.. - Copyright 2011 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============== - Installation -============== - -.. toctree:: - - get-started - install.rst - verify.rst - -Ocata -~~~~~ - -To install Glance, see the Ocata Image service install guide for each distribution: - -- `Ubuntu `__ -- `CentOS and RHEL `__ -- `openSUSE and SUSE Linux Enterprise `__ - -Newton -~~~~~~ - -To install Glance, see the Newton Image service install guide for each distribution: - -- `Ubuntu `__ -- `CentOS and RHEL `__ -- `openSUSE and SUSE Linux Enterprise `__ diff --git a/doc/source/install/install-debian.rst b/doc/source/install/install-debian.rst deleted file mode 100644 index ec37cbcd..00000000 --- a/doc/source/install/install-debian.rst +++ /dev/null @@ -1,329 +0,0 @@ -Install and configure (Debian) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Image service, -code-named glance, on the controller node. For simplicity, this -configuration stores images on the local file system. - -Prerequisites -------------- - -Before you install and configure the Image service, you must -create a database, service credentials, and API endpoints. - -#. To create the database, complete these steps: - - - -* Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - $ mysql -u root -p - - .. end - - - * Create the ``glance`` database: - - .. code-block:: console - - MariaDB [(none)]> CREATE DATABASE glance; - - .. end - - * Grant proper access to the ``glance`` database: - - .. code-block:: console - - MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \ - IDENTIFIED BY 'GLANCE_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \ - IDENTIFIED BY 'GLANCE_DBPASS'; - - .. end - - Replace ``GLANCE_DBPASS`` with a suitable password. - - * Exit the database access client. - -#. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - - .. end - -#. To create the service credentials, complete these steps: - - * Create the ``glance`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt glance - - User Password: - Repeat User Password: - +---------------------+----------------------------------+ - | Field | Value | - +---------------------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | 3f4e777c4062483ab8d9edd7dff829df | - | name | glance | - | options | {} | - | password_expires_at | None | - +---------------------+----------------------------------+ - - .. end - - * Add the ``admin`` role to the ``glance`` user and - ``service`` project: - - .. code-block:: console - - $ openstack role add --project service --user glance admin - - .. end - - .. note:: - - This command provides no output. - - * Create the ``glance`` service entity: - - .. code-block:: console - - $ openstack service create --name glance \ - --description "OpenStack Image" image - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | OpenStack Image | - | enabled | True | - | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | name | glance | - | type | image | - +-------------+----------------------------------+ - - .. end - -#. Create the Image service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - image public http://controller:9292 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 340be3625e9b4239a6415d034e98aace | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | glance | - | service_type | image | - | url | http://controller:9292 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - image internal http://controller:9292 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | glance | - | service_type | image | - | url | http://controller:9292 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - image admin http://controller:9292 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 0c37ed58103f4300a84ff125a539032d | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | glance | - | service_type | image | - | url | http://controller:9292 | - +--------------+----------------------------------+ - - .. end - -Install and configure components --------------------------------- - -.. include:: note_configuration_vary_by_distribution.txt - - - - - -#. Install the packages: - - .. code-block:: console - - # apt install glance - - .. end - - -2. Edit the ``/etc/glance/glance-api.conf`` file and complete the - following actions: - - * In the ``[database]`` section, configure database access: - - .. path /etc/glance/glance.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance - - .. end - - Replace ``GLANCE_DBPASS`` with the password you chose for the - Image service database. - - * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections, - configure Identity service access: - - .. path /etc/glance/glance.conf - .. code-block:: ini - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = glance - password = GLANCE_PASS - - [paste_deploy] - # ... - flavor = keystone - - .. end - - Replace ``GLANCE_PASS`` with the password you chose for the - ``glance`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - * In the ``[glance_store]`` section, configure the local file - system store and location of image files: - - .. path /etc/glance/glance.conf - .. code-block:: ini - - [glance_store] - # ... - stores = file,http - default_store = file - filesystem_store_datadir = /var/lib/glance/images/ - - .. end - -3. Edit the ``/etc/glance/glance-registry.conf`` file and complete - the following actions: - - * In the ``[database]`` section, configure database access: - - .. path /etc/glance/glance-registry.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance - - .. end - - Replace ``GLANCE_DBPASS`` with the password you chose for the - Image service database. - - * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections, - configure Identity service access: - - .. path /etc/glance/glance-registry.conf - .. code-block:: ini - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = glance - password = GLANCE_PASS - - [paste_deploy] - # ... - flavor = keystone - - .. end - - Replace ``GLANCE_PASS`` with the password you chose for the - ``glance`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - -4. Populate the Image service database: - - .. code-block:: console - - # su -s /bin/sh -c "glance-manage db_sync" glance - - .. end - - .. note:: - - Ignore any deprecation messages in this output. - - -Finalize installation ---------------------- - - - -#. Restart the Image services: - - .. code-block:: console - - # service glance-registry restart - # service glance-api restart - - .. end - diff --git a/doc/source/install/install-obs.rst b/doc/source/install/install-obs.rst deleted file mode 100644 index 3ad682e4..00000000 --- a/doc/source/install/install-obs.rst +++ /dev/null @@ -1,333 +0,0 @@ -Install and configure (SUSE) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Image service, -code-named glance, on the controller node. For simplicity, this -configuration stores images on the local file system. - -Prerequisites -------------- - -Before you install and configure the Image service, you must -create a database, service credentials, and API endpoints. - -#. To create the database, complete these steps: - - - -* Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - $ mysql -u root -p - - .. end - - - * Create the ``glance`` database: - - .. code-block:: console - - MariaDB [(none)]> CREATE DATABASE glance; - - .. end - - * Grant proper access to the ``glance`` database: - - .. code-block:: console - - MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \ - IDENTIFIED BY 'GLANCE_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \ - IDENTIFIED BY 'GLANCE_DBPASS'; - - .. end - - Replace ``GLANCE_DBPASS`` with a suitable password. - - * Exit the database access client. - -#. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - - .. end - -#. To create the service credentials, complete these steps: - - * Create the ``glance`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt glance - - User Password: - Repeat User Password: - +---------------------+----------------------------------+ - | Field | Value | - +---------------------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | 3f4e777c4062483ab8d9edd7dff829df | - | name | glance | - | options | {} | - | password_expires_at | None | - +---------------------+----------------------------------+ - - .. end - - * Add the ``admin`` role to the ``glance`` user and - ``service`` project: - - .. code-block:: console - - $ openstack role add --project service --user glance admin - - .. end - - .. note:: - - This command provides no output. - - * Create the ``glance`` service entity: - - .. code-block:: console - - $ openstack service create --name glance \ - --description "OpenStack Image" image - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | OpenStack Image | - | enabled | True | - | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | name | glance | - | type | image | - +-------------+----------------------------------+ - - .. end - -#. Create the Image service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - image public http://controller:9292 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 340be3625e9b4239a6415d034e98aace | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | glance | - | service_type | image | - | url | http://controller:9292 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - image internal http://controller:9292 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | glance | - | service_type | image | - | url | http://controller:9292 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - image admin http://controller:9292 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 0c37ed58103f4300a84ff125a539032d | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | glance | - | service_type | image | - | url | http://controller:9292 | - +--------------+----------------------------------+ - - .. end - -Install and configure components --------------------------------- - -.. include:: note_configuration_vary_by_distribution.txt - - -.. note:: - - Starting with the Newton release, SUSE OpenStack packages are shipping - with the upstream default configuration files. For example - ``/etc/glance/glance-api.conf`` or - ``/etc/glance/glance-registry.conf``, with customizations in - ``/etc/glance/glance-api.conf.d/`` or - ``/etc/glance/glance-registry.conf.d/``. While the following - instructions modify the default configuration files, adding new files - in ``/etc/glance/glance-api.conf.d`` or - ``/etc/glance/glance-registry.conf.d`` achieves the same result. - - - -#. Install the packages: - - .. code-block:: console - - # zypper install openstack-glance \ - openstack-glance-api openstack-glance-registry - - .. end - - - - -2. Edit the ``/etc/glance/glance-api.conf`` file and complete the - following actions: - - * In the ``[database]`` section, configure database access: - - .. path /etc/glance/glance.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance - - .. end - - Replace ``GLANCE_DBPASS`` with the password you chose for the - Image service database. - - * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections, - configure Identity service access: - - .. path /etc/glance/glance.conf - .. code-block:: ini - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = glance - password = GLANCE_PASS - - [paste_deploy] - # ... - flavor = keystone - - .. end - - Replace ``GLANCE_PASS`` with the password you chose for the - ``glance`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - * In the ``[glance_store]`` section, configure the local file - system store and location of image files: - - .. path /etc/glance/glance.conf - .. code-block:: ini - - [glance_store] - # ... - stores = file,http - default_store = file - filesystem_store_datadir = /var/lib/glance/images/ - - .. end - -3. Edit the ``/etc/glance/glance-registry.conf`` file and complete - the following actions: - - * In the ``[database]`` section, configure database access: - - .. path /etc/glance/glance-registry.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance - - .. end - - Replace ``GLANCE_DBPASS`` with the password you chose for the - Image service database. - - * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections, - configure Identity service access: - - .. path /etc/glance/glance-registry.conf - .. code-block:: ini - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = glance - password = GLANCE_PASS - - [paste_deploy] - # ... - flavor = keystone - - .. end - - Replace ``GLANCE_PASS`` with the password you chose for the - ``glance`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - -Finalize installation ---------------------- - - -* Start the Image services and configure them to start when - the system boots: - - .. code-block:: console - - # systemctl enable openstack-glance-api.service \ - openstack-glance-registry.service - # systemctl start openstack-glance-api.service \ - openstack-glance-registry.service - - .. end - - diff --git a/doc/source/install/install-rdo.rst b/doc/source/install/install-rdo.rst deleted file mode 100644 index f88ebe47..00000000 --- a/doc/source/install/install-rdo.rst +++ /dev/null @@ -1,332 +0,0 @@ -Install and configure (Red Hat) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Image service, -code-named glance, on the controller node. For simplicity, this -configuration stores images on the local file system. - -Prerequisites -------------- - -Before you install and configure the Image service, you must -create a database, service credentials, and API endpoints. - -#. To create the database, complete these steps: - - - -* Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - $ mysql -u root -p - - .. end - - - * Create the ``glance`` database: - - .. code-block:: console - - MariaDB [(none)]> CREATE DATABASE glance; - - .. end - - * Grant proper access to the ``glance`` database: - - .. code-block:: console - - MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \ - IDENTIFIED BY 'GLANCE_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \ - IDENTIFIED BY 'GLANCE_DBPASS'; - - .. end - - Replace ``GLANCE_DBPASS`` with a suitable password. - - * Exit the database access client. - -#. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - - .. end - -#. To create the service credentials, complete these steps: - - * Create the ``glance`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt glance - - User Password: - Repeat User Password: - +---------------------+----------------------------------+ - | Field | Value | - +---------------------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | 3f4e777c4062483ab8d9edd7dff829df | - | name | glance | - | options | {} | - | password_expires_at | None | - +---------------------+----------------------------------+ - - .. end - - * Add the ``admin`` role to the ``glance`` user and - ``service`` project: - - .. code-block:: console - - $ openstack role add --project service --user glance admin - - .. end - - .. note:: - - This command provides no output. - - * Create the ``glance`` service entity: - - .. code-block:: console - - $ openstack service create --name glance \ - --description "OpenStack Image" image - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | OpenStack Image | - | enabled | True | - | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | name | glance | - | type | image | - +-------------+----------------------------------+ - - .. end - -#. Create the Image service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - image public http://controller:9292 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 340be3625e9b4239a6415d034e98aace | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | glance | - | service_type | image | - | url | http://controller:9292 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - image internal http://controller:9292 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | glance | - | service_type | image | - | url | http://controller:9292 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - image admin http://controller:9292 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 0c37ed58103f4300a84ff125a539032d | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | glance | - | service_type | image | - | url | http://controller:9292 | - +--------------+----------------------------------+ - - .. end - -Install and configure components --------------------------------- - -.. include:: note_configuration_vary_by_distribution.txt - - - - -#. Install the packages: - - .. code-block:: console - - # yum install openstack-glance - - .. end - - - -2. Edit the ``/etc/glance/glance-api.conf`` file and complete the - following actions: - - * In the ``[database]`` section, configure database access: - - .. path /etc/glance/glance.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance - - .. end - - Replace ``GLANCE_DBPASS`` with the password you chose for the - Image service database. - - * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections, - configure Identity service access: - - .. path /etc/glance/glance.conf - .. code-block:: ini - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = glance - password = GLANCE_PASS - - [paste_deploy] - # ... - flavor = keystone - - .. end - - Replace ``GLANCE_PASS`` with the password you chose for the - ``glance`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - * In the ``[glance_store]`` section, configure the local file - system store and location of image files: - - .. path /etc/glance/glance.conf - .. code-block:: ini - - [glance_store] - # ... - stores = file,http - default_store = file - filesystem_store_datadir = /var/lib/glance/images/ - - .. end - -3. Edit the ``/etc/glance/glance-registry.conf`` file and complete - the following actions: - - * In the ``[database]`` section, configure database access: - - .. path /etc/glance/glance-registry.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance - - .. end - - Replace ``GLANCE_DBPASS`` with the password you chose for the - Image service database. - - * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections, - configure Identity service access: - - .. path /etc/glance/glance-registry.conf - .. code-block:: ini - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = glance - password = GLANCE_PASS - - [paste_deploy] - # ... - flavor = keystone - - .. end - - Replace ``GLANCE_PASS`` with the password you chose for the - ``glance`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - -4. Populate the Image service database: - - .. code-block:: console - - # su -s /bin/sh -c "glance-manage db_sync" glance - - .. end - - .. note:: - - Ignore any deprecation messages in this output. - - -Finalize installation ---------------------- - - -* Start the Image services and configure them to start when - the system boots: - - .. code-block:: console - - # systemctl enable openstack-glance-api.service \ - openstack-glance-registry.service - # systemctl start openstack-glance-api.service \ - openstack-glance-registry.service - - .. end - - diff --git a/doc/source/install/install-ubuntu.rst b/doc/source/install/install-ubuntu.rst deleted file mode 100644 index 992a5b64..00000000 --- a/doc/source/install/install-ubuntu.rst +++ /dev/null @@ -1,329 +0,0 @@ -Install and configure (Ubuntu) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Image service, -code-named glance, on the controller node. For simplicity, this -configuration stores images on the local file system. - -Prerequisites -------------- - -Before you install and configure the Image service, you must -create a database, service credentials, and API endpoints. - -#. To create the database, complete these steps: - - -* Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - # mysql - - .. end - - - - * Create the ``glance`` database: - - .. code-block:: console - - MariaDB [(none)]> CREATE DATABASE glance; - - .. end - - * Grant proper access to the ``glance`` database: - - .. code-block:: console - - MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \ - IDENTIFIED BY 'GLANCE_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \ - IDENTIFIED BY 'GLANCE_DBPASS'; - - .. end - - Replace ``GLANCE_DBPASS`` with a suitable password. - - * Exit the database access client. - -#. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - - .. end - -#. To create the service credentials, complete these steps: - - * Create the ``glance`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt glance - - User Password: - Repeat User Password: - +---------------------+----------------------------------+ - | Field | Value | - +---------------------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | 3f4e777c4062483ab8d9edd7dff829df | - | name | glance | - | options | {} | - | password_expires_at | None | - +---------------------+----------------------------------+ - - .. end - - * Add the ``admin`` role to the ``glance`` user and - ``service`` project: - - .. code-block:: console - - $ openstack role add --project service --user glance admin - - .. end - - .. note:: - - This command provides no output. - - * Create the ``glance`` service entity: - - .. code-block:: console - - $ openstack service create --name glance \ - --description "OpenStack Image" image - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | OpenStack Image | - | enabled | True | - | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | name | glance | - | type | image | - +-------------+----------------------------------+ - - .. end - -#. Create the Image service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - image public http://controller:9292 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 340be3625e9b4239a6415d034e98aace | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | glance | - | service_type | image | - | url | http://controller:9292 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - image internal http://controller:9292 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | glance | - | service_type | image | - | url | http://controller:9292 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - image admin http://controller:9292 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 0c37ed58103f4300a84ff125a539032d | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | glance | - | service_type | image | - | url | http://controller:9292 | - +--------------+----------------------------------+ - - .. end - -Install and configure components --------------------------------- - -.. include:: note_configuration_vary_by_distribution.txt - - - - - -#. Install the packages: - - .. code-block:: console - - # apt install glance - - .. end - - -2. Edit the ``/etc/glance/glance-api.conf`` file and complete the - following actions: - - * In the ``[database]`` section, configure database access: - - .. path /etc/glance/glance.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance - - .. end - - Replace ``GLANCE_DBPASS`` with the password you chose for the - Image service database. - - * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections, - configure Identity service access: - - .. path /etc/glance/glance.conf - .. code-block:: ini - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = glance - password = GLANCE_PASS - - [paste_deploy] - # ... - flavor = keystone - - .. end - - Replace ``GLANCE_PASS`` with the password you chose for the - ``glance`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - * In the ``[glance_store]`` section, configure the local file - system store and location of image files: - - .. path /etc/glance/glance.conf - .. code-block:: ini - - [glance_store] - # ... - stores = file,http - default_store = file - filesystem_store_datadir = /var/lib/glance/images/ - - .. end - -3. Edit the ``/etc/glance/glance-registry.conf`` file and complete - the following actions: - - * In the ``[database]`` section, configure database access: - - .. path /etc/glance/glance-registry.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance - - .. end - - Replace ``GLANCE_DBPASS`` with the password you chose for the - Image service database. - - * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections, - configure Identity service access: - - .. path /etc/glance/glance-registry.conf - .. code-block:: ini - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = glance - password = GLANCE_PASS - - [paste_deploy] - # ... - flavor = keystone - - .. end - - Replace ``GLANCE_PASS`` with the password you chose for the - ``glance`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - -4. Populate the Image service database: - - .. code-block:: console - - # su -s /bin/sh -c "glance-manage db_sync" glance - - .. end - - .. note:: - - Ignore any deprecation messages in this output. - - -Finalize installation ---------------------- - - - -#. Restart the Image services: - - .. code-block:: console - - # service glance-registry restart - # service glance-api restart - - .. end - diff --git a/doc/source/install/install.rst b/doc/source/install/install.rst deleted file mode 100644 index 26c17716..00000000 --- a/doc/source/install/install.rst +++ /dev/null @@ -1,11 +0,0 @@ -Install and configure -~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Image service, -code-named glance, on the controller node. For simplicity, this -configuration stores images on the local file system. - -.. toctree:: - :glob: - - install-* diff --git a/doc/source/install/note_configuration_vary_by_distribution.txt b/doc/source/install/note_configuration_vary_by_distribution.txt deleted file mode 100644 index 337c5696..00000000 --- a/doc/source/install/note_configuration_vary_by_distribution.txt +++ /dev/null @@ -1,7 +0,0 @@ -.. note:: - - Default configuration files vary by distribution. You might need - to add these sections and options rather than modifying existing - sections and options. Also, an ellipsis (``...``) in the configuration - snippets indicates potential default configuration options that you - should retain. diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst deleted file mode 100644 index 686e279d..00000000 --- a/doc/source/install/verify.rst +++ /dev/null @@ -1,103 +0,0 @@ -Verify operation -~~~~~~~~~~~~~~~~ - -Verify operation of the Image service using -`CirrOS `__, a small -Linux image that helps you test your OpenStack deployment. - -For more information about how to download and build images, see -`OpenStack Virtual Machine Image Guide -`__. -For information about how to manage images, see the -`OpenStack End User Guide -`__. - -.. note:: - - Perform these commands on the controller node. - -#. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - - .. end - -#. Download the source image: - - .. code-block:: console - - $ wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img - - .. end - - .. note:: - - Install ``wget`` if your distribution does not include it. - -#. Upload the image to the Image service using the - :term:`QCOW2 ` disk format, :term:`bare` - container format, and public visibility so all projects can access it: - - .. code-block:: console - - $ openstack image create "cirros" \ - --file cirros-0.3.5-x86_64-disk.img \ - --disk-format qcow2 --container-format bare \ - --public - - +------------------+------------------------------------------------------+ - | Field | Value | - +------------------+------------------------------------------------------+ - | checksum | 133eae9fb1c98f45894a4e60d8736619 | - | container_format | bare | - | created_at | 2015-03-26T16:52:10Z | - | disk_format | qcow2 | - | file | /v2/images/cc5c6982-4910-471e-b864-1098015901b5/file | - | id | cc5c6982-4910-471e-b864-1098015901b5 | - | min_disk | 0 | - | min_ram | 0 | - | name | cirros | - | owner | ae7a98326b9c455588edd2656d723b9d | - | protected | False | - | schema | /v2/schemas/image | - | size | 13200896 | - | status | active | - | tags | | - | updated_at | 2015-03-26T16:52:10Z | - | virtual_size | None | - | visibility | public | - +------------------+------------------------------------------------------+ - - .. end - - For information about the :command:`openstack image create` parameters, - see `Create or update an image (glance) - `__ - in the ``OpenStack User Guide``. - - For information about disk and container formats for images, see - `Disk and container formats for images - `__ - in the ``OpenStack Virtual Machine Image Guide``. - - .. note:: - - OpenStack generates IDs dynamically, so you will see - different values in the example command output. - -#. Confirm upload of the image and validate attributes: - - .. code-block:: console - - $ openstack image list - - +--------------------------------------+--------+--------+ - | ID | Name | Status | - +--------------------------------------+--------+--------+ - | 38047887-61a7-41ea-9b49-27987d5e8bb9 | cirros | active | - +--------------------------------------+--------+--------+ - - .. end diff --git a/doc/source/user/common-image-properties.rst b/doc/source/user/common-image-properties.rst deleted file mode 100644 index 65d07618..00000000 --- a/doc/source/user/common-image-properties.rst +++ /dev/null @@ -1,62 +0,0 @@ -.. - Copyright 2013 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Common Image Properties -======================= - -When adding an image to Glance, you may specify some common image properties -that may prove useful to consumers of your image. - -This document explains the names of these properties and the expected values. - -The common image properties are also described in a JSON schema, found in -etc/schema-image.json in the Glance source code. - -**architecture** ----------------- - -Operating system architecture as specified in -http://docs.openstack.org/cli-reference/glance-property-keys.html - - -**instance_uuid** ------------------ - -Metadata which can be used to record which instance this image is associated -with. (Informational only, does not create an instance snapshot.) - -**kernel_id** -------------- - -The ID of image stored in Glance that should be used as the kernel when booting -an AMI-style image. - -**ramdisk_id** --------------- - -The ID of image stored in Glance that should be used as the ramdisk when -booting an AMI-style image. - -**os_distro** -------------- - -The common name of the operating system distribution as specified in -http://docs.openstack.org/cli-reference/glance-property-keys.html - -**os_version** --------------- - -The operating system version as specified by the distributor. diff --git a/doc/source/user/formats.rst b/doc/source/user/formats.rst deleted file mode 100644 index 719de9ab..00000000 --- a/doc/source/user/formats.rst +++ /dev/null @@ -1,124 +0,0 @@ -.. - Copyright 2011 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _formats: - -Disk and Container Formats -========================== - -When adding an image to Glance, you must specify what the virtual -machine image's *disk format* and *container format* are. Disk and container -formats are configurable on a per-deployment basis. This document intends to -establish a global convention for what specific values of *disk_format* and -*container_format* mean. - -Disk Format ------------ - -The disk format of a virtual machine image is the format of the underlying -disk image. Virtual appliance vendors have different formats for laying out -the information contained in a virtual machine disk image. - -You can set your image's disk format to one of the following: - -* **raw** - - This is an unstructured disk image format - -* **vhd** - - This is the VHD disk format, a common disk format used by virtual machine - monitors from VMware, Xen, Microsoft, VirtualBox, and others - -* **vhdx** - - This is the VHDX disk format, an enhanced version of the vhd format which - supports larger disk sizes among other features. - -* **vmdk** - - Another common disk format supported by many common virtual machine monitors - -* **vdi** - - A disk format supported by VirtualBox virtual machine monitor and the QEMU - emulator - -* **iso** - - An archive format for the data contents of an optical disc (e.g. CDROM). - -* **ploop** - - A disk format supported and used by Virtuozzo to run OS Containers - -* **qcow2** - - A disk format supported by the QEMU emulator that can expand dynamically and - supports Copy on Write - -* **aki** - - This indicates what is stored in Glance is an Amazon kernel image - -* **ari** - - This indicates what is stored in Glance is an Amazon ramdisk image - -* **ami** - - This indicates what is stored in Glance is an Amazon machine image - -Container Format ----------------- - -The container format refers to whether the virtual machine image is in a -file format that also contains metadata about the actual virtual machine. - -Note that the container format string is not currently used by Glance or -other OpenStack components, so it is safe to simply specify **bare** as -the container format if you are unsure. - -You can set your image's container format to one of the following: - -* **bare** - - This indicates there is no container or metadata envelope for the image - -* **ovf** - - This is the OVF container format - -* **aki** - - This indicates what is stored in Glance is an Amazon kernel image - -* **ari** - - This indicates what is stored in Glance is an Amazon ramdisk image - -* **ami** - - This indicates what is stored in Glance is an Amazon machine image - -* **ova** - - This indicates what is stored in Glance is an OVA tar archive file - -* **docker** - - This indicates what is stored in Glance is a Docker tar archive of - the container filesystem diff --git a/doc/source/user/glanceapi.rst b/doc/source/user/glanceapi.rst deleted file mode 100644 index c3b3a30a..00000000 --- a/doc/source/user/glanceapi.rst +++ /dev/null @@ -1,891 +0,0 @@ -.. - Copyright 2010 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Using Glance's Image Public APIs -================================ - -Glance is the reference implementation of the OpenStack Images API. As such, -Glance fully implements versions 1 and 2 of the Images API. - -.. include:: ../deprecation-note.inc - -There used to be a sentence here saying, "The Images API specification is -developed alongside Glance, but is not considered part of the Glance project." -That's only partially true (or completely false, depending upon how strict you -are about these things). Conceptually, the OpenStack Images API is an -independent definition of a REST API. In practice, however, the only way -to participate in the evolution of the Images API is to work with the Glance -community to define the new functionality and provide its reference -implementation. Further, Glance falls under the "designated sections" provision -of the OpenStack DefCore Guidelines, which basically means that in order to -qualify as "OpenStack", a cloud exposing an OpenStack Images API must include -the Glance Images API implementation code. Thus, although conceptually -independent, the OpenStack Images APIs are intimately associated with Glance. - -**References** - -* `Designated sections (definition) `_ - -* `2014-04-02 DefCore Designated Sections Guidelines `_ - -* `OpenStack Core Definition `_ - -* `DefCore Guidelines Repository `_ - -Glance and the Images APIs: Past, Present, and Future ------------------------------------------------------ - -Here's a quick summary of the Images APIs that have been implemented by Glance. -If you're interested in more details, you can consult the Release Notes for all -the OpenStack releases (beginning with "Bexar") to follow the evolution of -features in Glance and the Images APIs. - -Images v1 API -************* - -The v1 API was originally designed as a service API for use by Nova and other -OpenStack services. In the Kilo release, the v1.1 API was downgraded from -CURRENT to SUPPORTED. In the Newton release, the version 1 API is officially -declared DEPRECATED. - -During the deprecation period, the Images v1 API is closed to further -development. The Glance code implementing the v1 API accepts only serious -bugfixes. - -Since Folsom, it has been possible to deploy OpenStack without exposing the -Images v1 API to end users. The Compute v2 API contains image-related API -calls allowing users to list images, list images details, show image details -for a specific image, delete images, and manipulate image metadata. Nova acts -as a proxy to Glance for these image-related calls. It's important to note -that the image-related calls in the Compute v2 API are a proper subset of the -calls available in the Images APIs. - -In the Newton release, Nova (and other OpenStack services that consume images) -have been modified to use the Images v2 API by default. - -**Reference** - -* `OpenStack Standard Deprecation Requirements `_ - -Images v2 API -************* - -The v2 API is the CURRENT OpenStack Images API. It provides a more friendly -interface to consumers than did the v1 API, as it was specifically designed to -expose images-related functionality as a public-facing endpoint. It's the -version that's currently open to development. - -A common strategy is to deploy multiple Glance nodes: internal-facing nodes -providing the Images APIs for internal consumers like Nova, and external-facing -nodes providing the Images v2 API for public use. - -The Future -********** - -During the long and tumultuous design phase of what has since become an -independent service named "Glare" (the Glance Artifacts Repository), the Glance -community loosely spoke about the Artifacts API being "Glance v3". This, -however, was only a shorthand way of speaking of the Artifacts effort. The -Artifacts API can't be the Images v3 API since Artifacts are not the same as -Images. Conceptually, a virtual machine image could be an Artifact, and the -Glare code has been designed to be compatible with the Images v2 API. But at -this time, there are no plans to implement an Images v3 API. - -During the Newton development cycle, Glare became an independent OpenStack -project. While it's evident that there's a need for an Artifact Repository in -OpenStack, whether it will be as ubiquitous as the need for an Images -Repository isn't clear. On the other hand, industry trends could go in the -opposite direction where everyone needs Artifacts and deployers view images as -simply another type of digital artifact. As Yogi Berra, an experienced -manager, once said, "It's tough to make predictions, especially about the -future." - -Authentication --------------- - -Glance depends on Keystone and the OpenStack Identity API to handle -authentication of clients. You must obtain an authentication token from -Keystone using and send it along with all API requests to Glance through -the ``X-Auth-Token`` header. Glance will communicate back to Keystone to -verify the token validity and obtain your identity credentials. - -See :ref:`authentication` for more information on integrating with Keystone. - -Using v1.X ----------- - -.. include:: ../deprecation-note.inc - -For the purpose of examples, assume there is a Glance API server running -at the URL ``http://glance.openstack.example.org`` on the default port 80. - -List Available Images -********************* - -We want to see a list of available images that the authenticated user has -access to. This includes images owned by the user, images shared with the user -and public images. - -We issue a ``GET`` request to ``http://glance.openstack.example.org/v1/images`` to -retrieve this list of available images. The data is returned as a JSON-encoded -mapping in the following format:: - - {'images': [ - {'uri': 'http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9', - 'name': 'Ubuntu 10.04 Plain', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'size': '5368709120'} - ...]} - - -List Available Images in More Detail -************************************ - -We want to see a more detailed list of available images that the authenticated -user has access to. This includes images owned by the user, images shared with -the user and public images. - -We issue a ``GET`` request to ``http://glance.openstack.example.org/v1/images/detail`` to -retrieve this list of available images. The data is returned as a -JSON-encoded mapping in the following format:: - - {'images': [ - {'uri': 'http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9', - 'name': 'Ubuntu 10.04 Plain 5GB', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'size': '5368709120', - 'checksum': 'c2e5db72bd7fd153f53ede5da5a06de3', - 'created_at': '2010-02-03 09:34:01', - 'updated_at': '2010-02-03 09:34:01', - 'deleted_at': '', - 'status': 'active', - 'is_public': true, - 'min_ram': 256, - 'min_disk': 5, - 'owner': null, - 'properties': {'distro': 'Ubuntu 10.04 LTS'}}, - ...]} - -.. note:: - - All timestamps returned are in UTC. - - The `updated_at` timestamp is the timestamp when an image's metadata - was last updated, not its image data, as all image data is immutable - once stored in Glance. - - The `properties` field is a mapping of free-form key/value pairs that - have been saved with the image metadata. - - The `checksum` field is an MD5 checksum of the image file data. - - The `is_public` field is a boolean indicating whether the image is - publicly available. - - The `min_ram` field is an integer specifying the minimum amount of - RAM needed to run this image on an instance, in megabytes. - - The `min_disk` field is an integer specifying the minimum amount of - disk space needed to run this image on an instance, in gigabytes. - - The `owner` field is a string which may either be null or which will - indicate the owner of the image. - -Filtering Images Lists -********************** - -Both the ``GET /v1/images`` and ``GET /v1/images/detail`` requests take query -parameters that serve to filter the returned list of images. The following -list details these query parameters. - -* ``name=NAME`` - - Filters images having a ``name`` attribute matching ``NAME``. - -* ``container_format=FORMAT`` - - Filters images having a ``container_format`` attribute matching ``FORMAT`` - - For more information, see :ref:`formats` - -* ``disk_format=FORMAT`` - - Filters images having a ``disk_format`` attribute matching ``FORMAT`` - - For more information, see :ref:`formats` - -* ``status=STATUS`` - - Filters images having a ``status`` attribute matching ``STATUS`` - - For more information, see :ref:`image-statuses` - -* ``size_min=BYTES`` - - Filters images having a ``size`` attribute greater than or equal to ``BYTES`` - -* ``size_max=BYTES`` - - Filters images having a ``size`` attribute less than or equal to ``BYTES`` - -These two resources also accept additional query parameters: - -* ``sort_key=KEY`` - - Results will be ordered by the specified image attribute ``KEY``. Accepted - values include ``id``, ``name``, ``status``, ``disk_format``, - ``container_format``, ``size``, ``created_at`` (default) and ``updated_at``. - -* ``sort_dir=DIR`` - - Results will be sorted in the direction ``DIR``. Accepted values are ``asc`` - for ascending or ``desc`` (default) for descending. - -* ``marker=ID`` - - An image identifier marker may be specified. When present, only images which - occur after the identifier ``ID`` will be listed. (These are the images that - have a `sort_key` later than that of the marker ``ID`` in the `sort_dir` - direction.) - -* ``limit=LIMIT`` - - When present, the maximum number of results returned will not exceed ``LIMIT``. - -.. note:: - - If the specified ``LIMIT`` exceeds the operator defined limit (api_limit_max) - then the number of results returned may be less than ``LIMIT``. - -* ``is_public=PUBLIC`` - - An admin user may use the `is_public` parameter to control which results are - returned. - - When the `is_public` parameter is absent or set to `True` the following images - will be listed: Images whose `is_public` field is `True`, owned images and - shared images. - - When the `is_public` parameter is set to `False` the following images will be - listed: Images (owned, shared, or non-owned) whose `is_public` field is `False`. - - When the `is_public` parameter is set to `None` all images will be listed - irrespective of owner, shared status or the `is_public` field. - -.. note:: - - Use of the `is_public` parameter is restricted to admin users. For all other - users it will be ignored. - -Retrieve Image Metadata -*********************** - -We want to see detailed information for a specific virtual machine image -that the Glance server knows about. - -We have queried the Glance server for a list of images and the -data returned includes the `uri` field for each available image. This -`uri` field value contains the exact location needed to get the metadata -for a specific image. - -Continuing the example from above, in order to get metadata about the -first image returned, we can issue a ``HEAD`` request to the Glance -server for the image's URI. - -We issue a ``HEAD`` request to -``http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9`` to -retrieve complete metadata for that image. The metadata is returned as a -set of HTTP headers that begin with the prefix ``x-image-meta-``. The -following shows an example of the HTTP headers returned from the above -``HEAD`` request:: - - x-image-meta-uri http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9 - x-image-meta-name Ubuntu 10.04 Plain 5GB - x-image-meta-disk_format vhd - x-image-meta-container_format ovf - x-image-meta-size 5368709120 - x-image-meta-checksum c2e5db72bd7fd153f53ede5da5a06de3 - x-image-meta-created_at 2010-02-03 09:34:01 - x-image-meta-updated_at 2010-02-03 09:34:01 - x-image-meta-deleted_at - x-image-meta-status available - x-image-meta-is_public true - x-image-meta-min_ram 256 - x-image-meta-min_disk 0 - x-image-meta-owner null - x-image-meta-property-distro Ubuntu 10.04 LTS - -.. note:: - - All timestamps returned are in UTC. - - The `x-image-meta-updated_at` timestamp is the timestamp when an - image's metadata was last updated, not its image data, as all - image data is immutable once stored in Glance. - - There may be multiple headers that begin with the prefix - `x-image-meta-property-`. These headers are free-form key/value pairs - that have been saved with the image metadata. The key is the string - after `x-image-meta-property-` and the value is the value of the header. - - The response's `ETag` header will always be equal to the - `x-image-meta-checksum` value. - - The response's `x-image-meta-is_public` value is a boolean indicating - whether the image is publicly available. - - The response's `x-image-meta-owner` value is a string which may either - be null or which will indicate the owner of the image. - - -Retrieve Raw Image Data -*********************** - -We want to retrieve that actual raw data for a specific virtual machine image -that the Glance server knows about. - -We have queried the Glance server for a list of images and the -data returned includes the `uri` field for each available image. This -`uri` field value contains the exact location needed to get the metadata -for a specific image. - -Continuing the example from above, in order to get metadata about the -first image returned, we can issue a ``HEAD`` request to the Glance -server for the image's URI. - -We issue a ``GET`` request to -``http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9`` to -retrieve metadata for that image as well as the image itself encoded -into the response body. - -The metadata is returned as a set of HTTP headers that begin with the -prefix ``x-image-meta-``. The following shows an example of the HTTP headers -returned from the above ``GET`` request:: - - x-image-meta-uri http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9 - x-image-meta-name Ubuntu 10.04 Plain 5GB - x-image-meta-disk_format vhd - x-image-meta-container_format ovf - x-image-meta-size 5368709120 - x-image-meta-checksum c2e5db72bd7fd153f53ede5da5a06de3 - x-image-meta-created_at 2010-02-03 09:34:01 - x-image-meta-updated_at 2010-02-03 09:34:01 - x-image-meta-deleted_at - x-image-meta-status available - x-image-meta-is_public true - x-image-meta-min_ram 256 - x-image-meta-min_disk 5 - x-image-meta-owner null - x-image-meta-property-distro Ubuntu 10.04 LTS - -.. note:: - - All timestamps returned are in UTC. - - The `x-image-meta-updated_at` timestamp is the timestamp when an - image's metadata was last updated, not its image data, as all - image data is immutable once stored in Glance. - - There may be multiple headers that begin with the prefix - `x-image-meta-property-`. These headers are free-form key/value pairs - that have been saved with the image metadata. The key is the string - after `x-image-meta-property-` and the value is the value of the header. - - The response's `Content-Length` header shall be equal to the value of - the `x-image-meta-size` header. - - The response's `ETag` header will always be equal to the - `x-image-meta-checksum` value. - - The response's `x-image-meta-is_public` value is a boolean indicating - whether the image is publicly available. - - The response's `x-image-meta-owner` value is a string which may either - be null or which will indicate the owner of the image. - - The image data itself will be the body of the HTTP response returned - from the request, which will have content-type of - `application/octet-stream`. - - -Add a New Image -*************** - -We have created a new virtual machine image in some way (created a -"golden image" or snapshotted/backed up an existing image) and we -wish to do two things: - - * Store the disk image data in Glance - * Store metadata about this image in Glance - -We can do the above two activities in a single call to the Glance API. -Assuming, like in the examples above, that a Glance API server is running -at ``http://glance.openstack.example.org``, we issue a ``POST`` request to add an image to -Glance:: - - POST http://glance.openstack.example.org/v1/images - -The metadata about the image is sent to Glance in HTTP headers. The body -of the HTTP request to the Glance API will be the MIME-encoded disk -image data. - - -Reserve a New Image -******************* - -We can also perform the activities described in `Add a New Image`_ using two -separate calls to the Image API; the first to register the image metadata, and -the second to add the image disk data. This is known as "reserving" an image. - -The first call should be a ``POST`` to ``http://glance.openstack.example.org/v1/images``, -which will result in a new image id being registered with a status of -``queued``:: - - {'image': - {'status': 'queued', - 'id': '71c675ab-d94f-49cd-a114-e12490b328d9', - ...} - ...} - -The image data can then be added using a ``PUT`` to -``http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9``. -The image status will then be set to ``active`` by Glance. - - -**Image Metadata in HTTP Headers** - -Glance will view as image metadata any HTTP header that it receives in a -``POST`` request where the header key is prefixed with the strings -``x-image-meta-`` and ``x-image-meta-property-``. - -The list of metadata headers that Glance accepts are listed below. - -* ``x-image-meta-name`` - - This header is required, unless reserving an image. Its value should be the - name of the image. - - Note that the name of an image *is not unique to a Glance node*. It - would be an unrealistic expectation of users to know all the unique - names of all other user's images. - -* ``x-image-meta-id`` - - This header is optional. - - When present, Glance will use the supplied identifier for the image. - If the identifier already exists in that Glance node, then a - **409 Conflict** will be returned by Glance. The value of the header - must be a uuid in hexadecimal string notation - (that is 71c675ab-d94f-49cd-a114-e12490b328d9). - - When this header is *not* present, Glance will generate an identifier - for the image and return this identifier in the response (see below). - -* ``x-image-meta-store`` - - This header is optional. Valid values are one of ``file``, ``rbd``, - ``swift``, ``cinder``, ``sheepdog`` or ``vsphere``. - - When present, Glance will attempt to store the disk image data in the - backing store indicated by the value of the header. If the Glance node - does not support the backing store, Glance will return a **400 Bad Request**. - - When not present, Glance will store the disk image data in the backing - store that is marked as default. See the configuration option ``default_store`` - for more information. - -* ``x-image-meta-disk_format`` - - This header is required, unless reserving an image. Valid values are one of - ``aki``, ``ari``, ``ami``, ``raw``, ``iso``, ``vhd``, ``vhdx``, ``vdi``, - ``qcow2``, ``vmdk`` or ``ploop``. - - For more information, see :ref:`formats`. - -* ``x-image-meta-container_format`` - - This header is required, unless reserving an image. Valid values are one of - ``aki``, ``ari``, ``ami``, ``bare``, ``ova``, ``ovf``, or ``docker``. - - For more information, see :ref:`formats`. - -* ``x-image-meta-size`` - - This header is optional. - - When present, Glance assumes that the expected size of the request body - will be the value of this header. If the length in bytes of the request - body *does not match* the value of this header, Glance will return a - **400 Bad Request**. - - When not present, Glance will calculate the image's size based on the size - of the request body. - -* ``x-image-meta-checksum`` - - This header is optional. When present, it specifies the **MD5** checksum - of the image file data. - - When present, Glance will verify the checksum generated from the back-end - store while storing your image against this value and return a - **400 Bad Request** if the values do not match. - -* ``x-image-meta-is_public`` - - This header is optional. - - When Glance finds the string "true" (case-insensitive), the image is marked as - a public one, meaning that any user may view its metadata and may read - the disk image from Glance. - - When not present, the image is assumed to be *not public* and owned by - a user. - -* ``x-image-meta-min_ram`` - - This header is optional. When present, it specifies the minimum amount of - RAM in megabytes required to run this image on a server. - - When not present, the image is assumed to have a minimum RAM requirement of 0. - -* ``x-image-meta-min_disk`` - - This header is optional. When present, it specifies the expected minimum disk - space in gigabytes required to run this image on a server. - - When not present, the image is assumed to have a minimum disk space - requirement of 0. - -* ``x-image-meta-owner`` - - This header is optional and only meaningful for admins. - - Glance normally sets the owner of an image to be the tenant or user - (depending on the "owner_is_tenant" configuration option) of the - authenticated user issuing the request. However, if the authenticated user - has the Admin role, this default may be overridden by setting this header to - null or to a string identifying the owner of the image. - -* ``x-image-meta-property-*`` - - When Glance receives any HTTP header whose key begins with the string prefix - ``x-image-meta-property-``, Glance adds the key and value to a set of custom, - free-form image properties stored with the image. The key is a - lower-cased string following the prefix ``x-image-meta-property-`` with dashes - and punctuation replaced with underscores. - - For example, if the following HTTP header were sent:: - - x-image-meta-property-distro Ubuntu 10.10 - - then a key/value pair of "distro"/"Ubuntu 10.10" will be stored with the - image in Glance. - - There is no limit on the number of free-form key/value attributes that can - be attached to the image. However, keep in mind that the 8K limit on the - size of all the HTTP headers sent in a request will effectively limit the - number of image properties. - - -Update an Image -*************** - -Glance will consider any HTTP header that it receives in a ``PUT`` request -as an instance of image metadata. In this case, the header key should be -prefixed with the strings ``x-image-meta-`` and ``x-image-meta-property-``. - -If an image was previously reserved, and thus is in the ``queued`` state, then -image data can be added by including it as the request body. If the image -already has data associated with it (for example, it is not in the ``queued`` -state), then including a request body will result in a **409 Conflict** -exception. - -On success, the ``PUT`` request will return the image metadata encoded as HTTP -headers. - -See more about image statuses here: :ref:`image-statuses` - - -List Image Memberships -********************** - -We want to see a list of the other system tenants (or users, if -"owner_is_tenant" is False) that may access a given virtual machine image that -the Glance server knows about. We take the `uri` field of the image data, -append ``/members`` to it, and issue a ``GET`` request on the resulting URL. - -Continuing from the example above, in order to get the memberships for the -first image returned, we can issue a ``GET`` request to the Glance -server for -``http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9/members``. -And we will get back JSON data such as the following:: - - {'members': [ - {'member_id': 'tenant1', - 'can_share': false} - ...]} - -The `member_id` field identifies a tenant with which the image is shared. If -that tenant is authorized to further share the image, the `can_share` field is -`true`. - - -List Shared Images -****************** - -We want to see a list of images which are shared with a given tenant. We issue -a ``GET`` request to ``http://glance.openstack.example.org/v1/shared-images/tenant1``. We -will get back JSON data such as the following:: - - {'shared_images': [ - {'image_id': '71c675ab-d94f-49cd-a114-e12490b328d9', - 'can_share': false} - ...]} - -The `image_id` field identifies an image shared with the tenant named by -*member_id*. If the tenant is authorized to further share the image, the -`can_share` field is `true`. - - -Add a Member to an Image -************************ - -We want to authorize a tenant to access a private image. We issue a ``PUT`` -request to -``http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9/members/tenant1``. -With no body, this will add the membership to the image, leaving existing -memberships unmodified and defaulting new memberships to have `can_share` -set to `false`. We may also optionally attach a body of the following form:: - - {'member': - {'can_share': true} - } - -If such a body is provided, both existing and new memberships will have -`can_share` set to the provided value (either `true` or `false`). This query -will return a 204 ("No Content") status code. - - -Remove a Member from an Image -***************************** - -We want to revoke a tenant's right to access a private image. We issue a -``DELETE`` request to ``http://glance.openstack.example.org/v1/images/1/members/tenant1``. -This query will return a 204 ("No Content") status code. - - -Replace a Membership List for an Image -************************************** - -The full membership list for a given image may be replaced. We issue a ``PUT`` -request to -``http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9/members`` -with a body of the following form:: - - {'memberships': [ - {'member_id': 'tenant1', - 'can_share': false} - ...]} - -All existing memberships which are not named in the replacement body are -removed, and those which are named have their `can_share` settings changed as -specified. (The `can_share` setting may be omitted, which will cause that -setting to remain unchanged in the existing memberships.) All new memberships -will be created, with `can_share` defaulting to `false` unless it is specified -otherwise. - - -Image Membership Changes in Version 2.0 ---------------------------------------- - -Version 2.0 of the Images API eliminates the ``can_share`` attribute of image -membership. In the version 2.0 model, image sharing is not transitive. - -In version 2.0, image members have a ``status`` attribute that reflects -how the image should be treated with respect to that image member's image-list. - -* The ``status`` attribute may have one of three values: ``pending``, - ``accepted``, or ``rejected``. - -* By default, only those shared images with status ``accepted`` are included in - an image member's image-list. - -* Only an image member may change his/her own membership status. - -* Only an image owner may create members on an image. The status of a newly - created image member is ``pending``. The image owner cannot change the - status of a member. - - -Distinctions from Version 1.x API Calls -*************************************** - -* The response to a request to list the members of an image has changed. - - call: ``GET`` on ``/v2/images/{imageId}/members`` - - response: see the JSON schema at ``/v2/schemas/members`` - -* The request body in the call to create an image member has changed. - - call: ``POST`` to ``/v2/images/{imageId}/members`` - - request body:: - - { "member": "" } - - where the {memberId} is the tenant ID of the image member. - - The member status of a newly created image member is ``pending``. - -New API Calls -************* - -* Change the status of an image member - - call: ``PUT`` on ``/v2/images/{imageId}/members/{memberId}`` - - request body:: - - { "status": "" } - - where is ``pending``, ``accepted``, or ``rejected``. - The {memberId} is the tenant ID of the image member. - -Images v2 Tasks API -------------------- - -Version 2 of the OpenStack Images API introduces a Task resource that is used -to create and monitor long-running asynchronous image-related processes. See -the :ref:`tasks` section of the Glance documentation for more -information. - -The following Task calls are available: - -Create a Task -************* - -A user wants to initiate a task. The user issues a ``POST`` request to -``/v2/tasks``. The request body is of Content-type ``application/json`` and -must contain the following fields: - -* ``type``: a string specified by the enumeration defined in the Task schema - -* ``input``: a JSON object. The content is defined by the cloud provider who - has exposed the endpoint being contacted - -The response is a Task entity as defined by the Task schema. It includes an -``id`` field that can be used in a subsequent call to poll the task for status -changes. - -A task is created in ``pending`` status. - -Show a Task -*********** - -A user wants to see detailed information about a task the user owns. The user -issues a ``GET`` request to ``/v2/tasks/{taskId}``. - -The response is in ``application/json`` format. The exact structure is given -by the task schema located at ``/v2/schemas/task``. - -List Tasks -********** - -A user wants to see what tasks have been created in his or her project. The -user issues a ``GET`` request to ``/v2/tasks``. - -The response is in ``application/json`` format. The exact structure is given -by the task schema located at ``/v2/schemas/tasks``. - -Note that, as indicated by the schema, the list of tasks is provided in a -sparse format. To see more information about a particular task in the list, -the user would use the show task call described above. - -Filtering and Sorting the Tasks List -************************************ - -The ``GET /v2/tasks`` request takes query parameters that server to filter the -returned list of tasks. The following list details these query parameters. - -* ``status={status}`` - - Filters the list to display only those tasks in the specified status. See - the task schema or the :ref:`task-statuses` section of this - documentation for the legal values to use for ``{status}``. - - For example, a request to ``GET /v2/tasks?status=pending`` would return only - those tasks whose current status is ``pending``. - -* ``type={type}`` - - Filters the list to display only those tasks of the specified type. See the - enumeration defined in the task schema for the legal values to use for - ``{type}``. - - For example, a request to ``GET /v2/tasks?type=import`` would return only - import tasks. - -* ``sort_dir={direction}`` - - Sorts the list of tasks according to ``updated_at`` datetime. Legal values - are ``asc`` (ascending) and ``desc`` (descending). By default, the task list - is sorted by ``created_at`` time in descending chronological order. - - - - -API Message Localization ------------------------- -Glance supports HTTP message localization. For example, an HTTP client can -receive API messages in Chinese even if the locale language of the server is -English. - -How to use it -************* -To receive localized API messages, the HTTP client needs to specify the -**Accept-Language** header to indicate the language that will translate the -message. For more information about Accept-Language, please refer to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -A typical curl API request will be like below:: - - curl -i -X GET -H 'Accept-Language: zh' -H 'Content-Type: application/json' - http://glance.openstack.example.org/v2/images/aaa - -Then the response will be like the following:: - - HTTP/1.1 404 Not Found - Content-Length: 234 - Content-Type: text/html; charset=UTF-8 - X-Openstack-Request-Id: req-54d403a0-064e-4544-8faf-4aeef086f45a - Date: Sat, 22 Feb 2014 06:26:26 GMT - - - - 404 Not Found - - -

404 Not Found

- 找不到任何具有标识 aaa 的映像

- - - -.. note:: - Make sure to have a language package under /usr/share/locale-langpack/ on - the target Glance server. diff --git a/doc/source/user/glanceclient.rst b/doc/source/user/glanceclient.rst deleted file mode 100644 index ad90b1a7..00000000 --- a/doc/source/user/glanceclient.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. - Copyright 2011-2012 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Using Glance's Client Tools -=========================== - -The command-line tool and python library for Glance are both installed -through the python-glanceclient project. Explore the following resources -for more information: - -* `Official Docs `_ -* `Pypi Page `_ -* `GitHub Project `_ diff --git a/doc/source/user/glancemetadefcatalogapi.rst b/doc/source/user/glancemetadefcatalogapi.rst deleted file mode 100644 index cf3eaab8..00000000 --- a/doc/source/user/glancemetadefcatalogapi.rst +++ /dev/null @@ -1,605 +0,0 @@ -.. - Copyright (c) 2014 Hewlett-Packard Development Company, L.P. - - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied. - See the License for the specific language governing permissions and - limitations under the License. - -Using Glance's Metadata Definitions Catalog Public APIs -======================================================= - -A common API hosted by the Glance service for vendors, admins, services, and -users to meaningfully define available key / value pair and tag metadata. -The intent is to enable better metadata collaboration across artifacts, -services, and projects for OpenStack users. - -This is about the definition of the available metadata that can be used on -different types of resources (images, artifacts, volumes, flavors, aggregates, -etc). A definition includes the properties type, its key, it's description, -and it's constraints. This catalog will not store the values for specific -instance properties. - -For example, a definition of a virtual CPU topology property for number of -cores will include the key to use, a description, and value constraints like -requiring it to be an integer. So, a user, potentially through Horizon, would -be able to search this catalog to list the available properties they can add to -a flavor or image. They will see the virtual CPU topology property in the list -and know that it must be an integer. In the Horizon example, when the user adds -the property, its key and value will be stored in the service that owns that -resource (Nova for flavors and in Glance for images). - -Diagram: https://wiki.openstack.org/w/images/b/bb/Glance-Metadata-API.png - -Glance Metadata Definitions Catalog implementation started with API version v2. - -Authentication --------------- - -Glance depends on Keystone and the OpenStack Identity API to handle -authentication of clients. You must obtain an authentication token from -Keystone send it along with all API requests to Glance through the -``X-Auth-Token`` header. Glance will communicate back to Keystone to verify -the token validity and obtain your identity credentials. - -See :ref:`authentication` for more information on integrating with Keystone. - -Using v2.X ----------- - -For the purpose of examples, assume there is a Glance API server running -at the URL ``http://glance.openstack.example.org`` on the default port 80. - -List Available Namespaces -************************* - -We want to see a list of available namespaces that the authenticated user -has access to. This includes namespaces owned by the user, -namespaces shared with the user and public namespaces. - -We issue a ``GET`` request to ``http://glance.openstack.example.org/v2/metadefs/namespaces`` -to retrieve this list of available namespaces. -The data is returned as a JSON-encoded mapping in the following format:: - - { - "namespaces": [ - { - "namespace": "MyNamespace", - "display_name": "My User Friendly Namespace", - "description": "My description", - "visibility": "public", - "protected": true, - "owner": "The Test Owner", - "self": "/v2/metadefs/namespaces/MyNamespace", - "schema": "/v2/schemas/metadefs/namespace", - "created_at": "2014-08-28T17:13:06Z", - "updated_at": "2014-08-28T17:13:06Z", - "resource_type_associations": [ - { - "name": "OS::Nova::Aggregate", - "created_at": "2014-08-28T17:13:06Z", - "updated_at": "2014-08-28T17:13:06Z" - }, - { - "name": "OS::Nova::Flavor", - "prefix": "aggregate_instance_extra_specs:", - "created_at": "2014-08-28T17:13:06Z", - "updated_at": "2014-08-28T17:13:06Z" - } - ] - } - ], - "first": "/v2/metadefs/namespaces?sort_key=created_at&sort_dir=asc", - "schema": "/v2/schemas/metadefs/namespaces" - } - - -.. note:: - Listing namespaces will only show the summary of each namespace including - counts and resource type associations. Detailed response including all its - objects definitions, property definitions etc. will only be available on - each individual GET namespace request. - -Filtering Namespaces Lists -************************** - -``GET /v2/metadefs/namespaces`` requests take query parameters that serve to -filter the returned list of namespaces. The following -list details these query parameters. - -* ``resource_types=RESOURCE_TYPES`` - - Filters namespaces having a ``resource_types`` within the list of - comma separated ``RESOURCE_TYPES``. - -GET resource also accepts additional query parameters: - -* ``sort_key=KEY`` - - Results will be ordered by the specified sort attribute ``KEY``. Accepted - values include ``namespace``, ``created_at`` (default) and ``updated_at``. - -* ``sort_dir=DIR`` - - Results will be sorted in the direction ``DIR``. Accepted values are ``asc`` - for ascending or ``desc`` (default) for descending. - -* ``marker=NAMESPACE`` - - A namespace identifier marker may be specified. When present only - namespaces which occur after the identifier ``NAMESPACE`` will be listed, - i.e. the namespaces which have a `sort_key` later than that of the marker - ``NAMESPACE`` in the `sort_dir` direction. - -* ``limit=LIMIT`` - - When present the maximum number of results returned will not exceed ``LIMIT``. - -.. note:: - - If the specified ``LIMIT`` exceeds the operator defined limit (api_limit_max) - then the number of results returned may be less than ``LIMIT``. - -* ``visibility=PUBLIC`` - - An admin user may use the `visibility` parameter to control which results are - returned (PRIVATE or PUBLIC). - - -Retrieve Namespace -****************** - -We want to see a more detailed information about a namespace that the -authenticated user has access to. The detail includes the properties, objects, -and resource type associations. - -We issue a ``GET`` request to ``http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}`` -to retrieve the namespace details. -The data is returned as a JSON-encoded mapping in the following format:: - - { - "namespace": "MyNamespace", - "display_name": "My User Friendly Namespace", - "description": "My description", - "visibility": "public", - "protected": true, - "owner": "The Test Owner", - "schema": "/v2/schemas/metadefs/namespace", - "resource_type_associations": [ - { - "name": "OS::Glance::Image", - "prefix": "hw_", - "created_at": "2014-08-28T17:13:06Z", - "updated_at": "2014-08-28T17:13:06Z" - }, - { - "name": "OS::Cinder::Volume", - "prefix": "hw_", - "properties_target": "image", - "created_at": "2014-08-28T17:13:06Z", - "updated_at": "2014-08-28T17:13:06Z" - }, - { - "name": "OS::Nova::Flavor", - "prefix": "filter1:", - "created_at": "2014-08-28T17:13:06Z", - "updated_at": "2014-08-28T17:13:06Z" - } - ], - "properties": { - "nsprop1": { - "title": "My namespace property1", - "description": "More info here", - "type": "boolean", - "default": true - }, - "nsprop2": { - "title": "My namespace property2", - "description": "More info here", - "type": "string", - "default": "value1" - } - }, - "objects": [ - { - "name": "object1", - "description": "my-description", - "self": "/v2/metadefs/namespaces/MyNamespace/objects/object1", - "schema": "/v2/schemas/metadefs/object", - "created_at": "2014-08-28T17:13:06Z", - "updated_at": "2014-08-28T17:13:06Z", - "required": [], - "properties": { - "prop1": { - "title": "My object1 property1", - "description": "More info here", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - { - "name": "object2", - "description": "my-description", - "self": "/v2/metadefs/namespaces/MyNamespace/objects/object2", - "schema": "/v2/schemas/metadefs/object", - "created_at": "2014-08-28T17:13:06Z", - "updated_at": "2014-08-28T17:13:06Z", - "properties": { - "prop1": { - "title": "My object2 property1", - "description": "More info here", - "type": "integer", - "default": 20 - } - } - } - ] - } - -Retrieve available Resource Types -********************************* - -We want to see the list of all resource types that are available in Glance - -We issue a ``GET`` request to ``http://glance.openstack.example.org/v2/metadefs/resource_types`` -to retrieve all resource types. - -The data is returned as a JSON-encoded mapping in the following format:: - - { - "resource_types": [ - { - "created_at": "2014-08-28T17:13:04Z", - "name": "OS::Glance::Image", - "updated_at": "2014-08-28T17:13:04Z" - }, - { - "created_at": "2014-08-28T17:13:04Z", - "name": "OS::Cinder::Volume", - "updated_at": "2014-08-28T17:13:04Z" - }, - { - "created_at": "2014-08-28T17:13:04Z", - "name": "OS::Nova::Flavor", - "updated_at": "2014-08-28T17:13:04Z" - }, - { - "created_at": "2014-08-28T17:13:04Z", - "name": "OS::Nova::Aggregate", - "updated_at": "2014-08-28T17:13:04Z" - }, - { - "created_at": "2014-08-28T17:13:04Z", - "name": "OS::Nova::Server", - "updated_at": "2014-08-28T17:13:04Z" - } - ] - } - - -Retrieve Resource Types associated with a Namespace -*************************************************** - -We want to see the list of resource types that are associated for a specific -namespace - -We issue a ``GET`` request to ``http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/resource_types`` -to retrieve resource types. - -The data is returned as a JSON-encoded mapping in the following format:: - - { - "resource_type_associations" : [ - { - "name" : "OS::Glance::Image", - "prefix" : "hw_", - "created_at": "2014-08-28T17:13:04Z", - "updated_at": "2014-08-28T17:13:04Z" - }, - { - "name" :"OS::Cinder::Volume", - "prefix" : "hw_", - "properties_target" : "image", - "created_at": "2014-08-28T17:13:04Z", - "updated_at": "2014-08-28T17:13:04Z" - }, - { - "name" : "OS::Nova::Flavor", - "prefix" : "hw:", - "created_at": "2014-08-28T17:13:04Z", - "updated_at": "2014-08-28T17:13:04Z" - } - ] - } - -Add Namespace -************* - -We want to create a new namespace that can contain the properties, objects, -etc. - -We issue a ``POST`` request to add an namespace to Glance:: - - POST http://glance.openstack.example.org/v2/metadefs/namespaces/ - -The input data is an JSON-encoded mapping in the following format:: - - { - "namespace": "MyNamespace", - "display_name": "My User Friendly Namespace", - "description": "My description", - "visibility": "public", - "protected": true - } - -.. note:: - Optionally properties, objects and resource type associations could be - added in the same input. See GET Namespace output above(input will be - similar). - -Update Namespace -**************** - -We want to update an existing namespace - -We issue a ``PUT`` request to update an namespace to Glance:: - - PUT http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace} - -The input data is similar to Add Namespace - - -Delete Namespace -**************** - -We want to delete an existing namespace including all its objects, -properties etc. - -We issue a ``DELETE`` request to delete an namespace to Glance:: - - DELETE http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace} - - -Associate Resource Type with Namespace -************************************** - -We want to associate a resource type with an existing namespace - -We issue a ``POST`` request to associate resource type to Glance:: - - POST http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/resource_types - -The input data is an JSON-encoded mapping in the following format:: - - { - "name" :"OS::Cinder::Volume", - "prefix" : "hw_", - "properties_target" : "image", - "created_at": "2014-08-28T17:13:04Z", - "updated_at": "2014-08-28T17:13:04Z" - } - - -Remove Resource Type associated with a Namespace -************************************************ - -We want to de-associate namespace from a resource type - -We issue a ``DELETE`` request to de-associate namespace resource type to -Glance:: - - DELETE http://glance.openstack.example.org/v2//metadefs/namespaces/{namespace}/resource_types/{resource_type} - - -List Objects in Namespace -************************* - -We want to see the list of meta definition objects in a specific namespace - -We issue a ``GET`` request to ``http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/objects`` -to retrieve objects. - -The data is returned as a JSON-encoded mapping in the following format:: - - { - "objects": [ - { - "name": "object1", - "description": "my-description", - "self": "/v2/metadefs/namespaces/MyNamespace/objects/object1", - "schema": "/v2/schemas/metadefs/object", - "created_at": "2014-08-28T17:13:06Z", - "updated_at": "2014-08-28T17:13:06Z", - "required": [], - "properties": { - "prop1": { - "title": "My object1 property1", - "description": "More info here", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - { - "name": "object2", - "description": "my-description", - "self": "/v2/metadefs/namespaces/MyNamespace/objects/object2", - "schema": "/v2/schemas/metadefs/object", - "created_at": "2014-08-28T17:13:06Z", - "updated_at": "2014-08-28T17:13:06Z", - "properties": { - "prop1": { - "title": "My object2 property1", - "description": "More info here", - "type": "integer", - "default": 20 - } - } - } - ], - "schema": "/v2/schemas/metadefs/objects" - } - -Add object in a specific namespace -********************************** - -We want to create a new object which can group the properties - -We issue a ``POST`` request to add object to a namespace in Glance:: - - POST http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/objects - - -The input data is an JSON-encoded mapping in the following format:: - - { - "name": "StorageQOS", - "description": "Our available storage QOS.", - "required": [ - "minIOPS" - ], - "properties": { - "minIOPS": { - "type": "integer", - "description": "The minimum IOPs required", - "default": 100, - "minimum": 100, - "maximum": 30000369 - }, - "burstIOPS": { - "type": "integer", - "description": "The expected burst IOPs", - "default": 1000, - "minimum": 100, - "maximum": 30000377 - } - } - } - -Update Object in a specific namespace -************************************* - -We want to update an existing object - -We issue a ``PUT`` request to update an object to Glance:: - - PUT http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/objects/{object_name} - -The input data is similar to Add Object - - -Delete Object in a specific namespace -************************************* - -We want to delete an existing object. - -We issue a ``DELETE`` request to delete object in a namespace to Glance:: - - DELETE http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/objects/{object_name} - - -Add property definition in a specific namespace -*********************************************** - -We want to create a new property definition in a namespace - -We issue a ``POST`` request to add property definition to a namespace in -Glance:: - - POST http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/properties - - -The input data is an JSON-encoded mapping in the following format:: - - { - "name": "hypervisor_type", - "title" : "Hypervisor", - "type": "array", - "description": "The type of hypervisor required", - "items": { - "type": "string", - "enum": [ - "hyperv", - "qemu", - "kvm" - ] - } - } - - -Update property definition in a specific namespace -************************************************** - -We want to update an existing object - -We issue a ``PUT`` request to update an property definition in a namespace to -Glance:: - - PUT http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/properties/{property_name} - -The input data is similar to Add property definition - - -Delete property definition in a specific namespace -************************************************** - -We want to delete an existing object. - -We issue a ``DELETE`` request to delete property definition in a namespace to -Glance:: - - DELETE http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/properties/{property_name} - - -API Message Localization ------------------------- -Glance supports HTTP message localization. For example, an HTTP client can -receive API messages in Chinese even if the locale language of the server is -English. - -How to use it -************* -To receive localized API messages, the HTTP client needs to specify the -**Accept-Language** header to indicate the language to use to translate the -message. For more info about Accept-Language, please refer http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -A typical curl API request will be like below:: - - curl -i -X GET -H 'Accept-Language: zh' -H 'Content-Type: application/json' - http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace} - -Then the response will be like the following:: - - HTTP/1.1 404 Not Found - Content-Length: 234 - Content-Type: text/html; charset=UTF-8 - X-Openstack-Request-Id: req-54d403a0-064e-4544-8faf-4aeef086f45a - Date: Sat, 22 Feb 2014 06:26:26 GMT - - - - 404 Not Found - - -

404 Not Found

- 找不到任何具有标识 aaa 的映像

- - - -.. note:: - Be sure there is the language package under /usr/share/locale-langpack/ on - the target Glance server. diff --git a/doc/source/user/identifiers.rst b/doc/source/user/identifiers.rst deleted file mode 100644 index a6f5f741..00000000 --- a/doc/source/user/identifiers.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. - Copyright 2010 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Image Identifiers -================= - -Images are uniquely identified by way of a URI that -matches the following signature:: - - /v1/images/ - -where `` is the resource location of the Glance service -that knows about an image, and `` is the image's identifier. Image -identifiers in Glance are *uuids*, making them *globally unique*. diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst deleted file mode 100644 index f798f145..00000000 --- a/doc/source/user/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -============ - User guide -============ - -.. toctree:: - :maxdepth: 2 - - identifiers - statuses - formats - common-image-properties - metadefs-concepts - glanceapi - glanceclient - glancemetadefcatalogapi - signature diff --git a/doc/source/user/metadefs-concepts.rst b/doc/source/user/metadefs-concepts.rst deleted file mode 100644 index a2a79b45..00000000 --- a/doc/source/user/metadefs-concepts.rst +++ /dev/null @@ -1,185 +0,0 @@ -.. - Copyright (c) 2014 Hewlett-Packard Development Company, L.P. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Metadata Definition Concepts -============================ - -The metadata definition service was added to Glance in the Juno release of -OpenStack. - -It provides a common API for vendors, admins, services, and users to -meaningfully **define** available key / value pair metadata that -can be used on different types of resources (images, artifacts, volumes, -flavors, aggregates, and other resources). A definition includes a property's -key, its description, its constraints, and the resource types to which it -can be associated. - -This catalog does not store the values for specific instance properties. - -For example, a definition of a virtual CPU topology property for the number of -cores will include the base key to use (for example, cpu_cores), a description, -and value constraints like requiring it to be an integer. So, a user, -potentially through Horizon, would be able to search this catalog to list the -available properties they can add to a flavor or image. They will see the -virtual CPU topology property in the list and know that it must be an integer. - -When the user adds the property its key and value will be stored in the -service that owns that resource (for example, Nova for flavors and in Glance -for images). The catalog also includes any additional prefix required when -the property is applied to different types of resources, such as "hw\_" for -images and "hw:" for flavors. So, on an image, the user would know to set the -property as "hw_cpu_cores=1". - -Terminology ------------ - -Background -~~~~~~~~~~ -The term *metadata* can become very overloaded and confusing. This -catalog is about the additional metadata that is passed as arbitrary -key / value pairs or tags across various artifacts and OpenStack services. - -Below are a few examples of the various terms used for metadata across -OpenStack services today: - -+-------------------------+---------------------------+----------------------+ -| Nova | Cinder | Glance | -+=========================+===========================+======================+ -| Flavor | Volume & Snapshot | Image & Snapshot | -| + *extra specs* | + *image metadata* | + *properties* | -| Host Aggregate | + *metadata* | + *tags* | -| + *metadata* | VolumeType | | -| Servers | + *extra specs* | | -| + *metadata* | + *qos specs* | | -| + *scheduler_hints* | | | -| + *tags* | | | -+-------------------------+---------------------------+----------------------+ - -Catalog Concepts -~~~~~~~~~~~~~~~~ - -The below figure illustrates the concept terminology used in the metadata -definitions catalog:: - - A namespace is associated with 0 to many resource types, making it visible to - the API / UI for applying to that type of resource. RBAC Permissions are - managed at a namespace level. - - +----------------------------------------------+ - | Namespace | - | | - | +-----------------------------------------+ | - | | Object Definition | | - | | | | +--------------------+ - | | +-------------------------------------+ | | +--> | Resource Type: | - | | | Property Definition A (key=integer) | | | | | e.g. Nova Flavor | - | | +-------------------------------------+ | | | +--------------------+ - | | | | | - | | +-------------------------------------+ | | | - | | | Property Definition B (key=string) | | | | +--------------------+ - | | +-------------------------------------+ | +--+--> | Resource Type: | - | | | | | | e.g. Glance Image | - | +-----------------------------------------+ | | +--------------------+ - | | | - | +-------------------------------------+ | | - | | Property Definition C (key=boolean) | | | +--------------------+ - | +-------------------------------------+ | +--> | Resource Type: | - | | | e.g. Cinder Volume | - +----------------------------------------------+ +--------------------+ - - Properties may be defined standalone or within the context of an object. - - -Catalog Terminology -~~~~~~~~~~~~~~~~~~~ - -The following terminology is used within the metadata definition catalog. - -**Namespaces** - -Metadata definitions are contained in namespaces. - -- Specify the access controls (CRUD) for everything defined in it. Allows for - admin only, different projects, or the entire cloud to define and use the - definitions in the namespace -- Associates the contained definitions to different types of resources - -**Properties** - -A property describes a single property and its primitive constraints. Each -property can ONLY be a primitive type: - -* string, integer, number, boolean, array - -Each primitive type is described using simple JSON schema notation. This -means NO nested objects and no definition referencing. - -**Objects** - -An object describes a group of one to many properties and their primitive -constraints. Each property in the group can ONLY be a primitive type: - -* string, integer, number, boolean, array - -Each primitive type is described using simple JSON schema notation. This -means NO nested objects. - -The object may optionally define required properties under the semantic -understanding that a user who uses the object should provide all required -properties. - -**Resource Type Association** - -Resource type association specifies the relationship between resource -types and the namespaces that are applicable to them. This information can be -used to drive UI and CLI views. For example, the same namespace of -objects, properties, and tags may be used for images, snapshots, volumes, and -flavors. Or a namespace may only apply to images. - -Resource types should be aligned with Heat resource types whenever possible. -http://docs.openstack.org/developer/heat/template_guide/openstack.html - -It is important to note that the same base property key can require different -prefixes depending on the target resource type. The API provides a way to -retrieve the correct property based on the target resource type. - -Below are a few examples: - -The desired virtual CPU topology can be set on both images and flavors -via metadata. The keys have different prefixes on images than on flavors. -On flavors keys are prefixed with ``hw:``, but on images the keys are prefixed -with ``hw_``. - -For more: https://github.com/openstack/nova-specs/blob/master/specs/juno/implemented/virt-driver-vcpu-topology.rst - -Another example is the AggregateInstanceExtraSpecsFilter and scoped properties -(e.g. properties with something:something=value). For scoped / namespaced -properties, the AggregateInstanceExtraSpecsFilter requires a prefix of -"aggregate_instance_extra_specs:" to be used on flavors but not on the -aggregate itself. Otherwise, the filter will not evaluate the property during -scheduling. - -So, on a host aggregate, you may see: - -companyx:fastio=true - -But then when used on the flavor, the AggregateInstanceExtraSpecsFilter needs: - -aggregate_instance_extra_specs:companyx:fastio=true - -In some cases, there may be multiple different filters that may use -the same property with different prefixes. In this case, the correct prefix -needs to be set based on which filter is enabled. diff --git a/doc/source/user/signature.rst b/doc/source/user/signature.rst deleted file mode 100644 index 2e4dec5c..00000000 --- a/doc/source/user/signature.rst +++ /dev/null @@ -1,184 +0,0 @@ -.. - Copyright 2016 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Image Signature Verification -============================= - -Glance has the ability to perform image validation using a digital -signature and asymmetric cryptography. To trigger this, you must define -specific image properties (described below), and have stored a -certificate signed with your private key in a local Barbican installation. - -When the image properties exist on an image, Glance will validate -the uploaded image data against these properties before storing it. -If validation is unsuccessful, the upload will fail and the image will -be deleted. - -Additionally, the image properties may be used by other services (for -example, Nova) to perform data verification when the image is downloaded -from Glance. - -Requirements ------------- -Barbican key manager - See http://docs.openstack.org/developer/barbican/setup/devstack.html - -Configuration -------------- -The etc/glance-api.conf can be modified to change keystone endpoint of -barbican. By default barbican will try to connect to keystone at -http://localhost:5000/v3 but if keystone is on another host then this -should be changed. - -In glance-api.conf find the following lines:: - - [barbican] - auth_endpoint = http://localhost:5000/v3 - -Then replace http://localhost:5000/v3 with the URL of keystone, also adding /v3 -to the end of it. For example, 'https://192.168.245.9:5000/v3'. - - -Another option in etc/glance-api.conf which can be configured is which key manager -to use. By default Glance will use the default key manager defined by the Castellan -key manager interface, which is currently the Barbican key manager. - -In glance-api.conf find the following lines:: - - [key_manager] - api_class = castellan.key_manager.barbican_key_manager.BarbicanKeyManager - -Then replace the value with the desired key manager class. - -.. note:: If those lines do not exist then simply add them to the end of the file. - -Using the Signature Verification --------------------------------- - -An image will need a few properties for signature verification to be enabled, -these are:: - - img_signature - img_signature_hash_method - img_signature_key_type - img_signature_certificate_uuid - -Property img_signature -~~~~~~~~~~~~~~~~~~~~~~ -This is the signature of your image. - -.. note:: The max character limit is 255. - -Property img_signature_hash_method -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Hash methods is the method you hash with. - -Current ones you can use are: - -* SHA-224 -* SHA-256 -* SHA-384 -* SHA-512 - -Property img_signature_key_type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This is the key_types you can use for your image. - -Current ones you can use are: - -* RSA-PSS -* DSA -* ECC-CURVES - - * SECT571K1 - * SECT409K1 - * SECT571R1 - * SECT409R1 - * SECP521R1 - * SECP384R1 - -.. Note:: ECC curves - Only keysizes above 384 are included. - Not all ECC curves may be supported by the back end. - -Property img_signature_certificate_uuid -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This is the UUID of the certificate that you upload to Barbican. - -Therefore the type passed to glance is: - -* UUID - -.. Note:: The supported certificate types are: - - * X_509 - -Example Usage -------------- - -Follow these instructions to create your keys:: - - $ openssl genrsa -out private_key.pem 1024 - Generating RSA private key, 1024 bit long modulus - ...............................................++++++ - ..++++++ - e is 65537 (0x10001) - - $ openssl rsa -pubout -in private_key.pem -out public_key.pem - writing RSA key - - $ openssl req -new -key private_key.pem -out cert_request.csr - You are about to be asked to enter information that will be incorporated - into your certificate request. - - $ openssl x509 -req -days 14 -in cert_request.csr -signkey private_key.pem -out new_cert.crt - Signature ok - subject=/C=AU/ST=Some-State/O=Internet Widgits Pty Ltd - Getting Private key - -Upload your certificate. This only has to be done once as you can use -the same ``Secret href`` for many images until it expires:: - - $ openstack secret store --name test --algorithm RSA --expiration 2016-06-29 --secret-type certificate --payload-content-type "application/octet-stream" --payload-content-encoding base64 --payload "$(base64 new_cert.crt)" - +---------------+-----------------------------------------------------------------------+ - | Field | Value | - +---------------+-----------------------------------------------------------------------+ - | Secret href | http://127.0.0.1:9311/v1/secrets/cd7cc675-e573-419c-8fff-33a72734a243 | - - $ cert_uuid=cd7cc675-e573-419c-8fff-33a72734a243 - -Get an image and create the signature:: - - $ echo This is a dodgy image > myimage - - $ openssl dgst -sha256 -sign private_key.pem -sigopt rsa_padding_mode:pss -out myimage.signature myimage - - $ base64 -w 0 myimage.signature > myimage.signature.b64 - - $ image_signature=$(cat myimage.signature.b64) - -.. note:: Using Glance v1 requires '-w 0' due to not supporting multiline image properties. - Glance v2 does support multiline image properties and does not require '-w 0' but may still be used. - -Create the image:: - - $ glance image-create --name mySignedImage --container-format bare --disk-format qcow2 --property img_signature="$image_signature" --property img_signature_certificate_uuid="$cert_uuid" --property img_signature_hash_method='SHA-256' --property img_signature_key_type='RSA-PSS' < myimage - -.. note:: Creating the image can fail if validation does not succeed. - This will cause the image to be deleted. - -Other Links ------------ -* https://etherpad.openstack.org/p/mitaka-glance-image-signing-instructions -* http://docs.openstack.org/ops-guide/ops_user_facing_operations.html diff --git a/doc/source/user/statuses.rst b/doc/source/user/statuses.rst deleted file mode 100644 index 75af7d44..00000000 --- a/doc/source/user/statuses.rst +++ /dev/null @@ -1,136 +0,0 @@ -.. - Copyright 2010 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _image-statuses: - -Image Statuses -============== - -Images in Glance can be in one the following statuses: - -* ``queued`` - - The image identifier has been reserved for an image in the Glance - registry. No image data has been uploaded to Glance and the image - size was not explicitly set to zero on creation. - -* ``saving`` - - Denotes that an image's raw data is currently being uploaded to Glance. - When an image is registered with a call to `POST /images` and there - is an `x-image-meta-location` header present, that image will never be in - the `saving` status (as the image data is already available in some other - location). - -* ``active`` - - Denotes an image that is fully available in Glance. This occurs when - the image data is uploaded, or the image size is explicitly set to - zero on creation. - -* ``deactivated`` - - Denotes that access to image data is not allowed to any non-admin user. - Prohibiting downloads of an image also prohibits operations like image - export and image cloning that may require image data. - -* ``killed`` - - Denotes that an error occurred during the uploading of an image's data, - and that the image is not readable. - -* ``deleted`` - - Glance has retained the information about the image, but it is no longer - available to use. An image in this state will be removed automatically - at a later date. - -* ``pending_delete`` - - This is similar to `deleted`, however, Glance has not yet removed the - image data. An image in this state is not recoverable. - - -.. figure:: ../images/image_status_transition.png - :figwidth: 100% - :align: center - :alt: The states consist of: - "queued", "saving", "active", "pending_delete", "deactivated", - "killed", and "deleted". - The transitions consist of: - An initial transition to the "queued" state called "create image". - A transition from the "queued" state to the "active" state - called "add location". - A transition from the "queued" state to the "saving" state - called "upload". - A transition from the "queued" state to the "deleted" state - called "delete". - A transition from the "saving" state to the "active" state - called "upload succeeded". - A transition from the "saving" state to the "deleted" state - called "delete". - A transition from the "saving" state to the "killed" state - called "[v1] upload fail". - A transition from the "saving" state to the "queued" state - called "[v2] upload fail". - A transition from the "active" state to the "deleted" state - called "delete". - A transition from the "active" state to the "pending_delete" state - called "delayed delete". - A transition from the "active" state to the "deactivated" state - called "deactivate". - A transition from the "killed" state to the "deleted" state - called "deleted". - A transition from the "pending_delete" state to the "deleted" state - called "after scrub time". - A transition from the "deactivated" state to the "deleted" state - called "delete". - A transition from the "deactivated" state to the "active" state - called "reactivate". - There are no transitions out of the "deleted" state. - - - This is a representation of how the image move from one status to the next. - - * Add location from zero to more than one. - -.. _task-statuses: - -Task Statuses -============= - -Tasks in Glance can be in one the following statuses: - -* ``pending`` - - The task identifier has been reserved for a task in the Glance. - No processing has begun on it yet. - -* ``processing`` - - The task has been picked up by the underlying executor and is being run - using the backend Glance execution logic for that task type. - -* ``success`` - - Denotes that the task has had a successful run within Glance. The ``result`` - field of the task shows more details about the outcome. - -* ``failure`` - - Denotes that an error occurred during the execution of the task and it - cannot continue processing. The ``message`` field of the task shows what the - error was. diff --git a/etc/glance-api-paste.ini b/etc/glance-api-paste.ini deleted file mode 100644 index 68790274..00000000 --- a/etc/glance-api-paste.ini +++ /dev/null @@ -1,90 +0,0 @@ -# Use this pipeline for no auth or image caching - DEFAULT -[pipeline:glance-api] -pipeline = cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context rootapp - -# Use this pipeline for image caching and no auth -[pipeline:glance-api-caching] -pipeline = cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context cache rootapp - -# Use this pipeline for caching w/ management interface but no auth -[pipeline:glance-api-cachemanagement] -pipeline = cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp - -# Use this pipeline for keystone auth -[pipeline:glance-api-keystone] -pipeline = cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken context rootapp - -# Use this pipeline for keystone auth with image caching -[pipeline:glance-api-keystone+caching] -pipeline = cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken context cache rootapp - -# Use this pipeline for keystone auth with caching and cache management -[pipeline:glance-api-keystone+cachemanagement] -pipeline = cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken context cache cachemanage rootapp - -# Use this pipeline for authZ only. This means that the registry will treat a -# user as authenticated without making requests to keystone to reauthenticate -# the user. -[pipeline:glance-api-trusted-auth] -pipeline = cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler context rootapp - -# Use this pipeline for authZ only. This means that the registry will treat a -# user as authenticated without making requests to keystone to reauthenticate -# the user and uses cache management -[pipeline:glance-api-trusted-auth+cachemanagement] -pipeline = cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler context cache cachemanage rootapp - -[composite:rootapp] -paste.composite_factory = glance.api:root_app_factory -/: apiversions -/v1: apiv1app -/v2: apiv2app - -[app:apiversions] -paste.app_factory = glance.api.versions:create_resource - -[app:apiv1app] -paste.app_factory = glance.api.v1.router:API.factory - -[app:apiv2app] -paste.app_factory = glance.api.v2.router:API.factory - -[filter:healthcheck] -paste.filter_factory = oslo_middleware:Healthcheck.factory -backends = disable_by_file -disable_by_file_path = /etc/glance/healthcheck_disable - -[filter:versionnegotiation] -paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory - -[filter:cache] -paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory - -[filter:cachemanage] -paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory -delay_auth_decision = true - -[filter:gzip] -paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory - -[filter:osprofiler] -paste.filter_factory = osprofiler.web:WsgiMiddleware.factory -hmac_keys = SECRET_KEY #DEPRECATED -enabled = yes #DEPRECATED - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = glance -oslo_config_program = glance-api - -[filter:http_proxy_to_wsgi] -paste.filter_factory = oslo_middleware:HTTPProxyToWSGI.factory diff --git a/etc/glance-api.conf b/etc/glance-api.conf deleted file mode 100644 index 73b5b80a..00000000 --- a/etc/glance-api.conf +++ /dev/null @@ -1,4568 +0,0 @@ -[DEFAULT] - -# -# From glance.api -# - -# -# Set the image owner to tenant or the authenticated user. -# -# Assign a boolean value to determine the owner of an image. When set to -# True, the owner of the image is the tenant. When set to False, the -# owner of the image will be the authenticated user issuing the request. -# Setting it to False makes the image private to the associated user and -# sharing with other users within the same tenant (or "project") -# requires explicit image sharing via image membership. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#owner_is_tenant = true - -# -# Role used to identify an authenticated user as administrator. -# -# Provide a string value representing a Keystone role to identify an -# administrative user. Users with this role will be granted -# administrative privileges. The default value for this option is -# 'admin'. -# -# Possible values: -# * A string value which is a valid Keystone role -# -# Related options: -# * None -# -# (string value) -#admin_role = admin - -# -# Allow limited access to unauthenticated users. -# -# Assign a boolean to determine API access for unathenticated -# users. When set to False, the API cannot be accessed by -# unauthenticated users. When set to True, unauthenticated users can -# access the API with read-only privileges. This however only applies -# when using ContextMiddleware. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#allow_anonymous_access = false - -# -# Limit the request ID length. -# -# Provide an integer value to limit the length of the request ID to -# the specified length. The default value is 64. Users can change this -# to any ineteger value between 0 and 16384 however keeping in mind that -# a larger value may flood the logs. -# -# Possible values: -# * Integer value between 0 and 16384 -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#max_request_id_length = 64 - -# -# Public url endpoint to use for Glance/Glare versions response. -# -# This is the public url endpoint that will appear in the Glance/Glare -# "versions" response. If no value is specified, the endpoint that is -# displayed in the version's response is that of the host running the -# API service. Change the endpoint to represent the proxy URL if the -# API service is running behind a proxy. If the service is running -# behind a load balancer, add the load balancer's URL for this value. -# -# Possible values: -# * None -# * Proxy URL -# * Load balancer URL -# -# Related options: -# * None -# -# (string value) -#public_endpoint = - -# -# Allow users to add additional/custom properties to images. -# -# Glance defines a standard set of properties (in its schema) that -# appear on every image. These properties are also known as -# ``base properties``. In addition to these properties, Glance -# allows users to add custom properties to images. These are known -# as ``additional properties``. -# -# By default, this configuration option is set to ``True`` and users -# are allowed to add additional properties. The number of additional -# properties that can be added to an image can be controlled via -# ``image_property_quota`` configuration option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * image_property_quota -# -# (boolean value) -#allow_additional_image_properties = true - -# -# Maximum number of image members per image. -# -# This limits the maximum of users an image can be shared with. Any negative -# value is interpreted as unlimited. -# -# Related options: -# * None -# -# (integer value) -#image_member_quota = 128 - -# -# Maximum number of properties allowed on an image. -# -# This enforces an upper limit on the number of additional properties an image -# can have. Any negative value is interpreted as unlimited. -# -# NOTE: This won't have any impact if additional properties are disabled. Please -# refer to ``allow_additional_image_properties``. -# -# Related options: -# * ``allow_additional_image_properties`` -# -# (integer value) -#image_property_quota = 128 - -# -# Maximum number of tags allowed on an image. -# -# Any negative value is interpreted as unlimited. -# -# Related options: -# * None -# -# (integer value) -#image_tag_quota = 128 - -# -# Maximum number of locations allowed on an image. -# -# Any negative value is interpreted as unlimited. -# -# Related options: -# * None -# -# (integer value) -#image_location_quota = 10 - -# -# Python module path of data access API. -# -# Specifies the path to the API to use for accessing the data model. -# This option determines how the image catalog data will be accessed. -# -# Possible values: -# * glance.db.sqlalchemy.api -# * glance.db.registry.api -# * glance.db.simple.api -# -# If this option is set to ``glance.db.sqlalchemy.api`` then the image -# catalog data is stored in and read from the database via the -# SQLAlchemy Core and ORM APIs. -# -# Setting this option to ``glance.db.registry.api`` will force all -# database access requests to be routed through the Registry service. -# This avoids data access from the Glance API nodes for an added layer -# of security, scalability and manageability. -# -# NOTE: In v2 OpenStack Images API, the registry service is optional. -# In order to use the Registry API in v2, the option -# ``enable_v2_registry`` must be set to ``True``. -# -# Finally, when this configuration option is set to -# ``glance.db.simple.api``, image catalog data is stored in and read -# from an in-memory data structure. This is primarily used for testing. -# -# Related options: -# * enable_v2_api -# * enable_v2_registry -# -# (string value) -#data_api = glance.db.sqlalchemy.api - -# -# The default number of results to return for a request. -# -# Responses to certain API requests, like list images, may return -# multiple items. The number of results returned can be explicitly -# controlled by specifying the ``limit`` parameter in the API request. -# However, if a ``limit`` parameter is not specified, this -# configuration value will be used as the default number of results to -# be returned for any API request. -# -# NOTES: -# * The value of this configuration option may not be greater than -# the value specified by ``api_limit_max``. -# * Setting this to a very large value may slow down database -# queries and increase response times. Setting this to a -# very low value may result in poor user experience. -# -# Possible values: -# * Any positive integer -# -# Related options: -# * api_limit_max -# -# (integer value) -# Minimum value: 1 -#limit_param_default = 25 - -# -# Maximum number of results that could be returned by a request. -# -# As described in the help text of ``limit_param_default``, some -# requests may return multiple results. The number of results to be -# returned are governed either by the ``limit`` parameter in the -# request or the ``limit_param_default`` configuration option. -# The value in either case, can't be greater than the absolute maximum -# defined by this configuration option. Anything greater than this -# value is trimmed down to the maximum value defined here. -# -# NOTE: Setting this to a very large value may slow down database -# queries and increase response times. Setting this to a -# very low value may result in poor user experience. -# -# Possible values: -# * Any positive integer -# -# Related options: -# * limit_param_default -# -# (integer value) -# Minimum value: 1 -#api_limit_max = 1000 - -# -# Show direct image location when returning an image. -# -# This configuration option indicates whether to show the direct image -# location when returning image details to the user. The direct image -# location is where the image data is stored in backend storage. This -# image location is shown under the image property ``direct_url``. -# -# When multiple image locations exist for an image, the best location -# is displayed based on the location strategy indicated by the -# configuration option ``location_strategy``. -# -# NOTES: -# * Revealing image locations can present a GRAVE SECURITY RISK as -# image locations can sometimes include credentials. Hence, this -# is set to ``False`` by default. Set this to ``True`` with -# EXTREME CAUTION and ONLY IF you know what you are doing! -# * If an operator wishes to avoid showing any image location(s) -# to the user, then both this option and -# ``show_multiple_locations`` MUST be set to ``False``. -# -# Possible values: -# * True -# * False -# -# Related options: -# * show_multiple_locations -# * location_strategy -# -# (boolean value) -#show_image_direct_url = false - -# DEPRECATED: -# Show all image locations when returning an image. -# -# This configuration option indicates whether to show all the image -# locations when returning image details to the user. When multiple -# image locations exist for an image, the locations are ordered based -# on the location strategy indicated by the configuration opt -# ``location_strategy``. The image locations are shown under the -# image property ``locations``. -# -# NOTES: -# * Revealing image locations can present a GRAVE SECURITY RISK as -# image locations can sometimes include credentials. Hence, this -# is set to ``False`` by default. Set this to ``True`` with -# EXTREME CAUTION and ONLY IF you know what you are doing! -# * If an operator wishes to avoid showing any image location(s) -# to the user, then both this option and -# ``show_image_direct_url`` MUST be set to ``False``. -# -# Possible values: -# * True -# * False -# -# Related options: -# * show_image_direct_url -# * location_strategy -# -# (boolean value) -# This option is deprecated for removal since Newton. -# Its value may be silently ignored in the future. -# Reason: This option will be removed in the Pike release or later because the -# same functionality can be achieved with greater granularity by using policies. -# Please see the Newton release notes for more information. -#show_multiple_locations = false - -# -# Maximum size of image a user can upload in bytes. -# -# An image upload greater than the size mentioned here would result -# in an image creation failure. This configuration option defaults to -# 1099511627776 bytes (1 TiB). -# -# NOTES: -# * This value should only be increased after careful -# consideration and must be set less than or equal to -# 8 EiB (9223372036854775808). -# * This value must be set with careful consideration of the -# backend storage capacity. Setting this to a very low value -# may result in a large number of image failures. And, setting -# this to a very large value may result in faster consumption -# of storage. Hence, this must be set according to the nature of -# images created and storage capacity available. -# -# Possible values: -# * Any positive number less than or equal to 9223372036854775808 -# -# (integer value) -# Minimum value: 1 -# Maximum value: 9223372036854775808 -#image_size_cap = 1099511627776 - -# -# Maximum amount of image storage per tenant. -# -# This enforces an upper limit on the cumulative storage consumed by all images -# of a tenant across all stores. This is a per-tenant limit. -# -# The default unit for this configuration option is Bytes. However, storage -# units can be specified using case-sensitive literals ``B``, ``KB``, ``MB``, -# ``GB`` and ``TB`` representing Bytes, KiloBytes, MegaBytes, GigaBytes and -# TeraBytes respectively. Note that there should not be any space between the -# value and unit. Value ``0`` signifies no quota enforcement. Negative values -# are invalid and result in errors. -# -# Possible values: -# * A string that is a valid concatenation of a non-negative integer -# representing the storage value and an optional string literal -# representing storage units as mentioned above. -# -# Related options: -# * None -# -# (string value) -#user_storage_quota = 0 - -# -# Deploy the v1 OpenStack Images API. -# -# When this option is set to ``True``, Glance service will respond to -# requests on registered endpoints conforming to the v1 OpenStack -# Images API. -# -# NOTES: -# * If this option is enabled, then ``enable_v1_registry`` must -# also be set to ``True`` to enable mandatory usage of Registry -# service with v1 API. -# -# * If this option is disabled, then the ``enable_v1_registry`` -# option, which is enabled by default, is also recommended -# to be disabled. -# -# * This option is separate from ``enable_v2_api``, both v1 and v2 -# OpenStack Images API can be deployed independent of each -# other. -# -# * If deploying only the v2 Images API, this option, which is -# enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v1_registry -# * enable_v2_api -# -# (boolean value) -#enable_v1_api = true - -# -# Deploy the v2 OpenStack Images API. -# -# When this option is set to ``True``, Glance service will respond -# to requests on registered endpoints conforming to the v2 OpenStack -# Images API. -# -# NOTES: -# * If this option is disabled, then the ``enable_v2_registry`` -# option, which is enabled by default, is also recommended -# to be disabled. -# -# * This option is separate from ``enable_v1_api``, both v1 and v2 -# OpenStack Images API can be deployed independent of each -# other. -# -# * If deploying only the v1 Images API, this option, which is -# enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v2_registry -# * enable_v1_api -# -# (boolean value) -#enable_v2_api = true - -# -# Deploy the v1 API Registry service. -# -# When this option is set to ``True``, the Registry service -# will be enabled in Glance for v1 API requests. -# -# NOTES: -# * Use of Registry is mandatory in v1 API, so this option must -# be set to ``True`` if the ``enable_v1_api`` option is enabled. -# -# * If deploying only the v2 OpenStack Images API, this option, -# which is enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v1_api -# -# (boolean value) -#enable_v1_registry = true - -# -# Deploy the v2 API Registry service. -# -# When this option is set to ``True``, the Registry service -# will be enabled in Glance for v2 API requests. -# -# NOTES: -# * Use of Registry is optional in v2 API, so this option -# must only be enabled if both ``enable_v2_api`` is set to -# ``True`` and the ``data_api`` option is set to -# ``glance.db.registry.api``. -# -# * If deploying only the v1 OpenStack Images API, this option, -# which is enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v2_api -# * data_api -# -# (boolean value) -#enable_v2_registry = true - -# -# Host address of the pydev server. -# -# Provide a string value representing the hostname or IP of the -# pydev server to use for debugging. The pydev server listens for -# debug connections on this address, facilitating remote debugging -# in Glance. -# -# Possible values: -# * Valid hostname -# * Valid IP address -# -# Related options: -# * None -# -# (string value) -#pydev_worker_debug_host = localhost - -# -# Port number that the pydev server will listen on. -# -# Provide a port number to bind the pydev server to. The pydev -# process accepts debug connections on this port and facilitates -# remote debugging in Glance. -# -# Possible values: -# * A valid port number -# -# Related options: -# * None -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#pydev_worker_debug_port = 5678 - -# -# AES key for encrypting store location metadata. -# -# Provide a string value representing the AES cipher to use for -# encrypting Glance store metadata. -# -# NOTE: The AES key to use must be set to a random string of length -# 16, 24 or 32 bytes. -# -# Possible values: -# * String value representing a valid AES key -# -# Related options: -# * None -# -# (string value) -#metadata_encryption_key = - -# -# Digest algorithm to use for digital signature. -# -# Provide a string value representing the digest algorithm to -# use for generating digital signatures. By default, ``sha256`` -# is used. -# -# To get a list of the available algorithms supported by the version -# of OpenSSL on your platform, run the command: -# ``openssl list-message-digest-algorithms``. -# Examples are 'sha1', 'sha256', and 'sha512'. -# -# NOTE: ``digest_algorithm`` is not related to Glance's image signing -# and verification. It is only used to sign the universally unique -# identifier (UUID) as a part of the certificate file and key file -# validation. -# -# Possible values: -# * An OpenSSL message digest algorithm identifier -# -# Relation options: -# * None -# -# (string value) -#digest_algorithm = sha256 - -# -# Strategy to determine the preference order of image locations. -# -# This configuration option indicates the strategy to determine -# the order in which an image's locations must be accessed to -# serve the image's data. Glance then retrieves the image data -# from the first responsive active location it finds in this list. -# -# This option takes one of two possible values ``location_order`` -# and ``store_type``. The default value is ``location_order``, -# which suggests that image data be served by using locations in -# the order they are stored in Glance. The ``store_type`` value -# sets the image location preference based on the order in which -# the storage backends are listed as a comma separated list for -# the configuration option ``store_type_preference``. -# -# Possible values: -# * location_order -# * store_type -# -# Related options: -# * store_type_preference -# -# (string value) -# Allowed values: location_order, store_type -#location_strategy = location_order - -# -# The location of the property protection file. -# -# Provide a valid path to the property protection file which contains -# the rules for property protections and the roles/policies associated -# with them. -# -# A property protection file, when set, restricts the Glance image -# properties to be created, read, updated and/or deleted by a specific -# set of users that are identified by either roles or policies. -# If this configuration option is not set, by default, property -# protections won't be enforced. If a value is specified and the file -# is not found, the glance-api service will fail to start. -# More information on property protections can be found at: -# http://docs.openstack.org/developer/glance/property-protections.html -# -# Possible values: -# * Empty string -# * Valid path to the property protection configuration file -# -# Related options: -# * property_protection_rule_format -# -# (string value) -#property_protection_file = - -# -# Rule format for property protection. -# -# Provide the desired way to set property protection on Glance -# image properties. The two permissible values are ``roles`` -# and ``policies``. The default value is ``roles``. -# -# If the value is ``roles``, the property protection file must -# contain a comma separated list of user roles indicating -# permissions for each of the CRUD operations on each property -# being protected. If set to ``policies``, a policy defined in -# policy.json is used to express property protections for each -# of the CRUD operations. Examples of how property protections -# are enforced based on ``roles`` or ``policies`` can be found at: -# http://docs.openstack.org/developer/glance/property-protections.html#examples -# -# Possible values: -# * roles -# * policies -# -# Related options: -# * property_protection_file -# -# (string value) -# Allowed values: roles, policies -#property_protection_rule_format = roles - -# -# List of allowed exception modules to handle RPC exceptions. -# -# Provide a comma separated list of modules whose exceptions are -# permitted to be recreated upon receiving exception data via an RPC -# call made to Glance. The default list includes -# ``glance.common.exception``, ``builtins``, and ``exceptions``. -# -# The RPC protocol permits interaction with Glance via calls across a -# network or within the same system. Including a list of exception -# namespaces with this option enables RPC to propagate the exceptions -# back to the users. -# -# Possible values: -# * A comma separated list of valid exception modules -# -# Related options: -# * None -# (list value) -#allowed_rpc_exception_modules = glance.common.exception,builtins,exceptions - -# -# IP address to bind the glance servers to. -# -# Provide an IP address to bind the glance server to. The default -# value is ``0.0.0.0``. -# -# Edit this option to enable the server to listen on one particular -# IP address on the network card. This facilitates selection of a -# particular network interface for the server. -# -# Possible values: -# * A valid IPv4 address -# * A valid IPv6 address -# -# Related options: -# * None -# -# (string value) -#bind_host = 0.0.0.0 - -# -# Port number on which the server will listen. -# -# Provide a valid port number to bind the server's socket to. This -# port is then set to identify processes and forward network messages -# that arrive at the server. The default bind_port value for the API -# server is 9292 and for the registry server is 9191. -# -# Possible values: -# * A valid port number (0 to 65535) -# -# Related options: -# * None -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#bind_port = - -# -# Number of Glance worker processes to start. -# -# Provide a non-negative integer value to set the number of child -# process workers to service requests. By default, the number of CPUs -# available is set as the value for ``workers``. -# -# Each worker process is made to listen on the port set in the -# configuration file and contains a greenthread pool of size 1000. -# -# NOTE: Setting the number of workers to zero, triggers the creation -# of a single API process with a greenthread pool of size 1000. -# -# Possible values: -# * 0 -# * Positive integer value (typically equal to the number of CPUs) -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#workers = - -# -# Maximum line size of message headers. -# -# Provide an integer value representing a length to limit the size of -# message headers. The default value is 16384. -# -# NOTE: ``max_header_line`` may need to be increased when using large -# tokens (typically those generated by the Keystone v3 API with big -# service catalogs). However, it is to be kept in mind that larger -# values for ``max_header_line`` would flood the logs. -# -# Setting ``max_header_line`` to 0 sets no limit for the line size of -# message headers. -# -# Possible values: -# * 0 -# * Positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#max_header_line = 16384 - -# -# Set keep alive option for HTTP over TCP. -# -# Provide a boolean value to determine sending of keep alive packets. -# If set to ``False``, the server returns the header -# "Connection: close". If set to ``True``, the server returns a -# "Connection: Keep-Alive" in its responses. This enables retention of -# the same TCP connection for HTTP conversations instead of opening a -# new one with each new request. -# -# This option must be set to ``False`` if the client socket connection -# needs to be closed explicitly after the response is received and -# read successfully by the client. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#http_keepalive = true - -# -# Timeout for client connections' socket operations. -# -# Provide a valid integer value representing time in seconds to set -# the period of wait before an incoming connection can be closed. The -# default value is 900 seconds. -# -# The value zero implies wait forever. -# -# Possible values: -# * Zero -# * Positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#client_socket_timeout = 900 - -# -# Set the number of incoming connection requests. -# -# Provide a positive integer value to limit the number of requests in -# the backlog queue. The default queue size is 4096. -# -# An incoming connection to a TCP listener socket is queued before a -# connection can be established with the server. Setting the backlog -# for a TCP socket ensures a limited queue size for incoming traffic. -# -# Possible values: -# * Positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#backlog = 4096 - -# -# Set the wait time before a connection recheck. -# -# Provide a positive integer value representing time in seconds which -# is set as the idle wait time before a TCP keep alive packet can be -# sent to the host. The default value is 600 seconds. -# -# Setting ``tcp_keepidle`` helps verify at regular intervals that a -# connection is intact and prevents frequent TCP connection -# reestablishment. -# -# Possible values: -# * Positive integer value representing time in seconds -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#tcp_keepidle = 600 - -# -# Absolute path to the CA file. -# -# Provide a string value representing a valid absolute path to -# the Certificate Authority file to use for client authentication. -# -# A CA file typically contains necessary trusted certificates to -# use for the client authentication. This is essential to ensure -# that a secure connection is established to the server via the -# internet. -# -# Possible values: -# * Valid absolute path to the CA file -# -# Related options: -# * None -# -# (string value) -#ca_file = /etc/ssl/cafile - -# -# Absolute path to the certificate file. -# -# Provide a string value representing a valid absolute path to the -# certificate file which is required to start the API service -# securely. -# -# A certificate file typically is a public key container and includes -# the server's public key, server name, server information and the -# signature which was a result of the verification process using the -# CA certificate. This is required for a secure connection -# establishment. -# -# Possible values: -# * Valid absolute path to the certificate file -# -# Related options: -# * None -# -# (string value) -#cert_file = /etc/ssl/certs - -# -# Absolute path to a private key file. -# -# Provide a string value representing a valid absolute path to a -# private key file which is required to establish the client-server -# connection. -# -# Possible values: -# * Absolute path to the private key file -# -# Related options: -# * None -# -# (string value) -#key_file = /etc/ssl/key/key-file.pem - -# DEPRECATED: The HTTP header used to determine the scheme for the original -# request, even if it was removed by an SSL terminating proxy. Typical value is -# "HTTP_X_FORWARDED_PROTO". (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Use the http_proxy_to_wsgi middleware instead. -#secure_proxy_ssl_header = - -# -# The relative path to sqlite file database that will be used for image cache -# management. -# -# This is a relative path to the sqlite file database that tracks the age and -# usage statistics of image cache. The path is relative to image cache base -# directory, specified by the configuration option ``image_cache_dir``. -# -# This is a lightweight database with just one table. -# -# Possible values: -# * A valid relative path to sqlite file database -# -# Related options: -# * ``image_cache_dir`` -# -# (string value) -#image_cache_sqlite_db = cache.db - -# -# The driver to use for image cache management. -# -# This configuration option provides the flexibility to choose between the -# different image-cache drivers available. An image-cache driver is responsible -# for providing the essential functions of image-cache like write images to/read -# images from cache, track age and usage of cached images, provide a list of -# cached images, fetch size of the cache, queue images for caching and clean up -# the cache, etc. -# -# The essential functions of a driver are defined in the base class -# ``glance.image_cache.drivers.base.Driver``. All image-cache drivers (existing -# and prospective) must implement this interface. Currently available drivers -# are ``sqlite`` and ``xattr``. These drivers primarily differ in the way they -# store the information about cached images: -# * The ``sqlite`` driver uses a sqlite database (which sits on every glance -# node locally) to track the usage of cached images. -# * The ``xattr`` driver uses the extended attributes of files to store this -# information. It also requires a filesystem that sets ``atime`` on the -# files -# when accessed. -# -# Possible values: -# * sqlite -# * xattr -# -# Related options: -# * None -# -# (string value) -# Allowed values: sqlite, xattr -#image_cache_driver = sqlite - -# -# The upper limit on cache size, in bytes, after which the cache-pruner cleans -# up the image cache. -# -# NOTE: This is just a threshold for cache-pruner to act upon. It is NOT a -# hard limit beyond which the image cache would never grow. In fact, depending -# on how often the cache-pruner runs and how quickly the cache fills, the image -# cache can far exceed the size specified here very easily. Hence, care must be -# taken to appropriately schedule the cache-pruner and in setting this limit. -# -# Glance caches an image when it is downloaded. Consequently, the size of the -# image cache grows over time as the number of downloads increases. To keep the -# cache size from becoming unmanageable, it is recommended to run the -# cache-pruner as a periodic task. When the cache pruner is kicked off, it -# compares the current size of image cache and triggers a cleanup if the image -# cache grew beyond the size specified here. After the cleanup, the size of -# cache is less than or equal to size specified here. -# -# Possible values: -# * Any non-negative integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#image_cache_max_size = 10737418240 - -# -# The amount of time, in seconds, an incomplete image remains in the cache. -# -# Incomplete images are images for which download is in progress. Please see the -# description of configuration option ``image_cache_dir`` for more detail. -# Sometimes, due to various reasons, it is possible the download may hang and -# the incompletely downloaded image remains in the ``incomplete`` directory. -# This configuration option sets a time limit on how long the incomplete images -# should remain in the ``incomplete`` directory before they are cleaned up. -# Once an incomplete image spends more time than is specified here, it'll be -# removed by cache-cleaner on its next run. -# -# It is recommended to run cache-cleaner as a periodic task on the Glance API -# nodes to keep the incomplete images from occupying disk space. -# -# Possible values: -# * Any non-negative integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#image_cache_stall_time = 86400 - -# -# Base directory for image cache. -# -# This is the location where image data is cached and served out of. All cached -# images are stored directly under this directory. This directory also contains -# three subdirectories, namely, ``incomplete``, ``invalid`` and ``queue``. -# -# The ``incomplete`` subdirectory is the staging area for downloading images. An -# image is first downloaded to this directory. When the image download is -# successful it is moved to the base directory. However, if the download fails, -# the partially downloaded image file is moved to the ``invalid`` subdirectory. -# -# The ``queue``subdirectory is used for queuing images for download. This is -# used primarily by the cache-prefetcher, which can be scheduled as a periodic -# task like cache-pruner and cache-cleaner, to cache images ahead of their -# usage. -# Upon receiving the request to cache an image, Glance touches a file in the -# ``queue`` directory with the image id as the file name. The cache-prefetcher, -# when running, polls for the files in ``queue`` directory and starts -# downloading them in the order they were created. When the download is -# successful, the zero-sized file is deleted from the ``queue`` directory. -# If the download fails, the zero-sized file remains and it'll be retried the -# next time cache-prefetcher runs. -# -# Possible values: -# * A valid path -# -# Related options: -# * ``image_cache_sqlite_db`` -# -# (string value) -#image_cache_dir = - -# -# Default publisher_id for outgoing Glance notifications. -# -# This is the value that the notification driver will use to identify -# messages for events originating from the Glance service. Typically, -# this is the hostname of the instance that generated the message. -# -# Possible values: -# * Any reasonable instance identifier, for example: image.host1 -# -# Related options: -# * None -# -# (string value) -#default_publisher_id = image.localhost - -# -# List of notifications to be disabled. -# -# Specify a list of notifications that should not be emitted. -# A notification can be given either as a notification type to -# disable a single event notification, or as a notification group -# prefix to disable all event notifications within a group. -# -# Possible values: -# A comma-separated list of individual notification types or -# notification groups to be disabled. Currently supported groups: -# * image -# * image.member -# * task -# * metadef_namespace -# * metadef_object -# * metadef_property -# * metadef_resource_type -# * metadef_tag -# For a complete listing and description of each event refer to: -# http://docs.openstack.org/developer/glance/notifications.html -# -# The values must be specified as: . -# For example: image.create,task.success,metadef_tag -# -# Related options: -# * None -# -# (list value) -#disabled_notifications = - -# -# Address the registry server is hosted on. -# -# Possible values: -# * A valid IP or hostname -# -# Related options: -# * None -# -# (string value) -#registry_host = 0.0.0.0 - -# -# Port the registry server is listening on. -# -# Possible values: -# * A valid port number -# -# Related options: -# * None -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#registry_port = 9191 - -# DEPRECATED: Whether to pass through the user token when making requests to the -# registry. To prevent failures with token expiration during big files upload, -# it is recommended to set this parameter to False.If "use_user_token" is not in -# effect, then admin credentials can be specified. (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#use_user_token = true - -# DEPRECATED: The administrators user name. If "use_user_token" is not in -# effect, then admin credentials can be specified. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#admin_user = - -# DEPRECATED: The administrators password. If "use_user_token" is not in effect, -# then admin credentials can be specified. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#admin_password = - -# DEPRECATED: The tenant name of the administrative user. If "use_user_token" is -# not in effect, then admin tenant name can be specified. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#admin_tenant_name = - -# DEPRECATED: The URL to the keystone service. If "use_user_token" is not in -# effect and using keystone auth, then URL of keystone can be specified. (string -# value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#auth_url = - -# DEPRECATED: The strategy to use for authentication. If "use_user_token" is not -# in effect, then auth strategy can be specified. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#auth_strategy = noauth - -# DEPRECATED: The region for the authentication service. If "use_user_token" is -# not in effect and using keystone auth, then region name can be specified. -# (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#auth_region = - -# -# Protocol to use for communication with the registry server. -# -# Provide a string value representing the protocol to use for -# communication with the registry server. By default, this option is -# set to ``http`` and the connection is not secure. -# -# This option can be set to ``https`` to establish a secure connection -# to the registry server. In this case, provide a key to use for the -# SSL connection using the ``registry_client_key_file`` option. Also -# include the CA file and cert file using the options -# ``registry_client_ca_file`` and ``registry_client_cert_file`` -# respectively. -# -# Possible values: -# * http -# * https -# -# Related options: -# * registry_client_key_file -# * registry_client_cert_file -# * registry_client_ca_file -# -# (string value) -# Allowed values: http, https -#registry_client_protocol = http - -# -# Absolute path to the private key file. -# -# Provide a string value representing a valid absolute path to the -# private key file to use for establishing a secure connection to -# the registry server. -# -# NOTE: This option must be set if ``registry_client_protocol`` is -# set to ``https``. Alternatively, the GLANCE_CLIENT_KEY_FILE -# environment variable may be set to a filepath of the key file. -# -# Possible values: -# * String value representing a valid absolute path to the key -# file. -# -# Related options: -# * registry_client_protocol -# -# (string value) -#registry_client_key_file = /etc/ssl/key/key-file.pem - -# -# Absolute path to the certificate file. -# -# Provide a string value representing a valid absolute path to the -# certificate file to use for establishing a secure connection to -# the registry server. -# -# NOTE: This option must be set if ``registry_client_protocol`` is -# set to ``https``. Alternatively, the GLANCE_CLIENT_CERT_FILE -# environment variable may be set to a filepath of the certificate -# file. -# -# Possible values: -# * String value representing a valid absolute path to the -# certificate file. -# -# Related options: -# * registry_client_protocol -# -# (string value) -#registry_client_cert_file = /etc/ssl/certs/file.crt - -# -# Absolute path to the Certificate Authority file. -# -# Provide a string value representing a valid absolute path to the -# certificate authority file to use for establishing a secure -# connection to the registry server. -# -# NOTE: This option must be set if ``registry_client_protocol`` is -# set to ``https``. Alternatively, the GLANCE_CLIENT_CA_FILE -# environment variable may be set to a filepath of the CA file. -# This option is ignored if the ``registry_client_insecure`` option -# is set to ``True``. -# -# Possible values: -# * String value representing a valid absolute path to the CA -# file. -# -# Related options: -# * registry_client_protocol -# * registry_client_insecure -# -# (string value) -#registry_client_ca_file = /etc/ssl/cafile/file.ca - -# -# Set verification of the registry server certificate. -# -# Provide a boolean value to determine whether or not to validate -# SSL connections to the registry server. By default, this option -# is set to ``False`` and the SSL connections are validated. -# -# If set to ``True``, the connection to the registry server is not -# validated via a certifying authority and the -# ``registry_client_ca_file`` option is ignored. This is the -# registry's equivalent of specifying --insecure on the command line -# using glanceclient for the API. -# -# Possible values: -# * True -# * False -# -# Related options: -# * registry_client_protocol -# * registry_client_ca_file -# -# (boolean value) -#registry_client_insecure = false - -# -# Timeout value for registry requests. -# -# Provide an integer value representing the period of time in seconds -# that the API server will wait for a registry request to complete. -# The default value is 600 seconds. -# -# A value of 0 implies that a request will never timeout. -# -# Possible values: -# * Zero -# * Positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#registry_client_timeout = 600 - -# -# Send headers received from identity when making requests to -# registry. -# -# Typically, Glance registry can be deployed in multiple flavors, -# which may or may not include authentication. For example, -# ``trusted-auth`` is a flavor that does not require the registry -# service to authenticate the requests it receives. However, the -# registry service may still need a user context to be populated to -# serve the requests. This can be achieved by the caller -# (the Glance API usually) passing through the headers it received -# from authenticating with identity for the same request. The typical -# headers sent are ``X-User-Id``, ``X-Tenant-Id``, ``X-Roles``, -# ``X-Identity-Status`` and ``X-Service-Catalog``. -# -# Provide a boolean value to determine whether to send the identity -# headers to provide tenant and user information along with the -# requests to registry service. By default, this option is set to -# ``False``, which means that user and tenant information is not -# available readily. It must be obtained by authenticating. Hence, if -# this is set to ``False``, ``flavor`` must be set to value that -# either includes authentication or authenticated user context. -# -# Possible values: -# * True -# * False -# -# Related options: -# * flavor -# -# (boolean value) -#send_identity_headers = false - -# -# The amount of time, in seconds, to delay image scrubbing. -# -# When delayed delete is turned on, an image is put into ``pending_delete`` -# state upon deletion until the scrubber deletes its image data. Typically, soon -# after the image is put into ``pending_delete`` state, it is available for -# scrubbing. However, scrubbing can be delayed until a later point using this -# configuration option. This option denotes the time period an image spends in -# ``pending_delete`` state before it is available for scrubbing. -# -# It is important to realize that this has storage implications. The larger the -# ``scrub_time``, the longer the time to reclaim backend storage from deleted -# images. -# -# Possible values: -# * Any non-negative integer -# -# Related options: -# * ``delayed_delete`` -# -# (integer value) -# Minimum value: 0 -#scrub_time = 0 - -# -# The size of thread pool to be used for scrubbing images. -# -# When there are a large number of images to scrub, it is beneficial to scrub -# images in parallel so that the scrub queue stays in control and the backend -# storage is reclaimed in a timely fashion. This configuration option denotes -# the maximum number of images to be scrubbed in parallel. The default value is -# one, which signifies serial scrubbing. Any value above one indicates parallel -# scrubbing. -# -# Possible values: -# * Any non-zero positive integer -# -# Related options: -# * ``delayed_delete`` -# -# (integer value) -# Minimum value: 1 -#scrub_pool_size = 1 - -# -# Turn on/off delayed delete. -# -# Typically when an image is deleted, the ``glance-api`` service puts the image -# into ``deleted`` state and deletes its data at the same time. Delayed delete -# is a feature in Glance that delays the actual deletion of image data until a -# later point in time (as determined by the configuration option -# ``scrub_time``). -# When delayed delete is turned on, the ``glance-api`` service puts the image -# into ``pending_delete`` state upon deletion and leaves the image data in the -# storage backend for the image scrubber to delete at a later time. The image -# scrubber will move the image into ``deleted`` state upon successful deletion -# of image data. -# -# NOTE: When delayed delete is turned on, image scrubber MUST be running as a -# periodic task to prevent the backend storage from filling up with undesired -# usage. -# -# Possible values: -# * True -# * False -# -# Related options: -# * ``scrub_time`` -# * ``wakeup_time`` -# * ``scrub_pool_size`` -# -# (boolean value) -#delayed_delete = false - -# -# From oslo.log -# - -# If set to true, the logging level will be set to DEBUG instead of the default -# INFO level. (boolean value) -# Note: This option can be changed without restarting. -#debug = false - -# DEPRECATED: If set to false, the logging level will be set to WARNING instead -# of the default INFO level. (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#verbose = true - -# The name of a logging configuration file. This file is appended to any -# existing logging configuration files. For details about logging configuration -# files, see the Python logging module documentation. Note that when logging -# configuration files are used then all logging configuration is set in the -# configuration file and other logging configuration options are ignored (for -# example, logging_context_format_string). (string value) -# Note: This option can be changed without restarting. -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append = - -# Defines the format string for %%(asctime)s in log records. Default: -# %(default)s . This option is ignored if log_config_append is set. (string -# value) -#log_date_format = %Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to send logging output to. If no default is set, -# logging will go to stderr as defined by use_stderr. This option is ignored if -# log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file = - -# (Optional) The base directory used for relative log_file paths. This option -# is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir = - -# Uses logging handler designed to watch file system. When log file is moved or -# removed this handler will open a new log file with specified path -# instantaneously. It makes sense only if log_file option is specified and Linux -# platform is used. This option is ignored if log_config_append is set. (boolean -# value) -#watch_log_file = false - -# Use syslog for logging. Existing syslog format is DEPRECATED and will be -# changed later to honor RFC5424. This option is ignored if log_config_append is -# set. (boolean value) -#use_syslog = false - -# Syslog facility to receive log lines. This option is ignored if -# log_config_append is set. (string value) -#syslog_log_facility = LOG_USER - -# Log output to standard error. This option is ignored if log_config_append is -# set. (boolean value) -#use_stderr = false - -# Format string to use for log messages with context. (string value) -#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages when context is undefined. (string -# value) -#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Additional data to append to log message when logging level for the message is -# DEBUG. (string value) -#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. (string value) -#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s - -# Defines the format string for %(user_identity)s that is used in -# logging_context_format_string. (string value) -#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s - -# List of package logging levels in logger=LEVEL pairs. This option is ignored -# if log_config_append is set. (list value) -#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO - -# Enables or disables publication of error events. (boolean value) -#publish_errors = false - -# The format for an instance that is passed with the log message. (string value) -#instance_format = "[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log message. (string -# value) -#instance_uuid_format = "[instance: %(uuid)s] " - -# Interval, number of seconds, of log rate limiting. (integer value) -#rate_limit_interval = 0 - -# Maximum number of logged messages per rate_limit_interval. (integer value) -#rate_limit_burst = 0 - -# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG or -# empty string. Logs with level greater or equal to rate_limit_except_level are -# not filtered. An empty string means that all levels are filtered. (string -# value) -#rate_limit_except_level = CRITICAL - -# Enables or disables fatal status of deprecations. (boolean value) -#fatal_deprecations = false - -# -# From oslo.messaging -# - -# Size of RPC connection pool. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size -#rpc_conn_pool_size = 30 - -# The pool size limit for connections expiration policy (integer value) -#conn_pool_min_size = 2 - -# The time-to-live in sec of idle connections in the pool (integer value) -#conn_pool_ttl = 1200 - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. (string value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_bind_address -#rpc_zmq_bind_address = * - -# MatchMaker driver. (string value) -# Allowed values: redis, sentinel, dummy -# Deprecated group/name - [DEFAULT]/rpc_zmq_matchmaker -#rpc_zmq_matchmaker = redis - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_contexts -#rpc_zmq_contexts = 1 - -# Maximum number of ingress messages to locally buffer per topic. Default is -# unlimited. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_topic_backlog -#rpc_zmq_topic_backlog = - -# Directory for holding IPC sockets. (string value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_ipc_dir -#rpc_zmq_ipc_dir = /var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match -# "host" option, if running Nova. (string value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_host -#rpc_zmq_host = localhost - -# Number of seconds to wait before all pending messages will be sent after -# closing a socket. The default value of -1 specifies an infinite linger period. -# The value of 0 specifies no linger period. Pending messages shall be discarded -# immediately when the socket is closed. Positive values specify an upper bound -# for the linger period. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_cast_timeout -#zmq_linger = -1 - -# The default number of seconds that poll should wait. Poll raises timeout -# exception when timeout expired. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_poll_timeout -#rpc_poll_timeout = 1 - -# Expiration timeout in seconds of a name service record about existing target ( -# < 0 means no timeout). (integer value) -# Deprecated group/name - [DEFAULT]/zmq_target_expire -#zmq_target_expire = 300 - -# Update period in seconds of a name service record about existing target. -# (integer value) -# Deprecated group/name - [DEFAULT]/zmq_target_update -#zmq_target_update = 180 - -# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean -# value) -# Deprecated group/name - [DEFAULT]/use_pub_sub -#use_pub_sub = false - -# Use ROUTER remote proxy. (boolean value) -# Deprecated group/name - [DEFAULT]/use_router_proxy -#use_router_proxy = false - -# This option makes direct connections dynamic or static. It makes sense only -# with use_router_proxy=False which means to use direct connections for direct -# message types (ignored otherwise). (boolean value) -#use_dynamic_connections = false - -# How many additional connections to a host will be made for failover reasons. -# This option is actual only in dynamic connections mode. (integer value) -#zmq_failover_connections = 2 - -# Minimal port number for random ports range. (port value) -# Minimum value: 0 -# Maximum value: 65535 -# Deprecated group/name - [DEFAULT]/rpc_zmq_min_port -#rpc_zmq_min_port = 49153 - -# Maximal port number for random ports range. (integer value) -# Minimum value: 1 -# Maximum value: 65536 -# Deprecated group/name - [DEFAULT]/rpc_zmq_max_port -#rpc_zmq_max_port = 65536 - -# Number of retries to find free port number before fail with ZMQBindError. -# (integer value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_bind_port_retries -#rpc_zmq_bind_port_retries = 100 - -# Default serialization mechanism for serializing/deserializing -# outgoing/incoming messages (string value) -# Allowed values: json, msgpack -# Deprecated group/name - [DEFAULT]/rpc_zmq_serialization -#rpc_zmq_serialization = json - -# This option configures round-robin mode in zmq socket. True means not keeping -# a queue when server side disconnects. False means to keep queue and messages -# even if server is disconnected, when the server appears we send all -# accumulated messages to it. (boolean value) -#zmq_immediate = true - -# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any -# other negative value) means to skip any overrides and leave it to OS default; -# 0 and 1 (or any other positive value) mean to disable and enable the option -# respectively. (integer value) -#zmq_tcp_keepalive = -1 - -# The duration between two keepalive transmissions in idle condition. The unit -# is platform dependent, for example, seconds in Linux, milliseconds in Windows -# etc. The default value of -1 (or any other negative value and 0) means to skip -# any overrides and leave it to OS default. (integer value) -#zmq_tcp_keepalive_idle = -1 - -# The number of retransmissions to be carried out before declaring that remote -# end is not available. The default value of -1 (or any other negative value and -# 0) means to skip any overrides and leave it to OS default. (integer value) -#zmq_tcp_keepalive_cnt = -1 - -# The duration between two successive keepalive retransmissions, if -# acknowledgement to the previous keepalive transmission is not received. The -# unit is platform dependent, for example, seconds in Linux, milliseconds in -# Windows etc. The default value of -1 (or any other negative value and 0) means -# to skip any overrides and leave it to OS default. (integer value) -#zmq_tcp_keepalive_intvl = -1 - -# Maximum number of (green) threads to work concurrently. (integer value) -#rpc_thread_pool_size = 100 - -# Expiration timeout in seconds of a sent/received message after which it is not -# tracked anymore by a client/server. (integer value) -#rpc_message_ttl = 300 - -# Wait for message acknowledgements from receivers. This mechanism works only -# via proxy without PUB/SUB. (boolean value) -#rpc_use_acks = false - -# Number of seconds to wait for an ack from a cast/call. After each retry -# attempt this timeout is multiplied by some specified multiplier. (integer -# value) -#rpc_ack_timeout_base = 15 - -# Number to multiply base ack timeout by after each retry attempt. (integer -# value) -#rpc_ack_timeout_multiplier = 2 - -# Default number of message sending attempts in case of any problems occurred: -# positive value N means at most N retries, 0 means no retries, None or -1 (or -# any other negative values) mean to retry forever. This option is used only if -# acknowledgments are enabled. (integer value) -#rpc_retry_attempts = 3 - -# List of publisher hosts SubConsumer can subscribe on. This option has higher -# priority then the default publishers list taken from the matchmaker. (list -# value) -#subscribe_on = - -# Size of executor thread pool. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size -#executor_thread_pool_size = 64 - -# Seconds to wait for a response from a call. (integer value) -#rpc_response_timeout = 60 - -# A URL representing the messaging driver to use and its full configuration. -# (string value) -#transport_url = - -# DEPRECATED: The messaging driver to use, defaults to rabbit. Other drivers -# include amqp and zmq. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rpc_backend = rabbit - -# The default exchange under which topics are scoped. May be overridden by an -# exchange name specified in the transport_url option. (string value) -#control_exchange = openstack - - -[cors] - -# -# From oslo.middleware.cors -# - -# Indicate whether this resource may be shared with the domain received in the -# requests "origin" header. Format: "://[:]", no trailing -# slash. Example: https://horizon.example.com (list value) -#allowed_origin = - -# Indicate that the actual request can include user credentials (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple -# Headers. (list value) -#expose_headers = X-Image-Meta-Checksum,X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID - -# Maximum cache age of CORS preflight requests. (integer value) -#max_age = 3600 - -# Indicate which methods can be used during the actual request. (list value) -#allow_methods = GET,PUT,POST,DELETE,PATCH - -# Indicate which header field names may be used during the actual request. (list -# value) -#allow_headers = Content-MD5,X-Image-Meta-Checksum,X-Storage-Token,Accept-Encoding,X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID - - -[cors.subdomain] - -# -# From oslo.middleware.cors -# - -# Indicate whether this resource may be shared with the domain received in the -# requests "origin" header. Format: "://[:]", no trailing -# slash. Example: https://horizon.example.com (list value) -#allowed_origin = - -# Indicate that the actual request can include user credentials (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple -# Headers. (list value) -#expose_headers = X-Image-Meta-Checksum,X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID - -# Maximum cache age of CORS preflight requests. (integer value) -#max_age = 3600 - -# Indicate which methods can be used during the actual request. (list value) -#allow_methods = GET,PUT,POST,DELETE,PATCH - -# Indicate which header field names may be used during the actual request. (list -# value) -#allow_headers = Content-MD5,X-Image-Meta-Checksum,X-Storage-Token,Accept-Encoding,X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID - - -[database] - -# -# From oslo.db -# - -# DEPRECATED: The file name to use with SQLite. (string value) -# Deprecated group/name - [DEFAULT]/sqlite_db -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Should use config option connection or slave_connection to connect the -# database. -#sqlite_db = oslo.sqlite - -# If True, SQLite uses synchronous mode. (boolean value) -# Deprecated group/name - [DEFAULT]/sqlite_synchronous -#sqlite_synchronous = true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend = sqlalchemy - -# The SQLAlchemy connection string to use to connect to the database. (string -# value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = - -# The SQLAlchemy connection string to use to connect to the slave database. -# (string value) -#slave_connection = - -# The SQL mode to be used for MySQL sessions. This option, including the -# default, overrides any server-set SQL mode. To use whatever SQL mode is set by -# the server configuration, set this to no value. Example: mysql_sql_mode= -# (string value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle SQL connections are reaped. (integer value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool. (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool. Setting a value of 0 -# indicates no limit. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = 5 - -# Maximum number of database connection retries during startup. Set to -1 to -# specify an infinite retry count. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a SQL connection. (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with SQLAlchemy. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = 50 - -# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer -# value) -# Minimum value: 0 -# Maximum value: 100 -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add Python stack traces to SQL as comment strings. (boolean value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = false - -# If set, use this value for pool_timeout with SQLAlchemy. (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on connection lost. (boolean -# value) -#use_db_reconnect = false - -# Seconds between retries of a database transaction. (integer value) -#db_retry_interval = 1 - -# If True, increases the interval between retries of a database operation up to -# db_max_retry_interval. (boolean value) -#db_inc_retry_interval = true - -# If db_inc_retry_interval is set, the maximum seconds between retries of a -# database operation. (integer value) -#db_max_retry_interval = 10 - -# Maximum retries in case of connection error or deadlock error before error is -# raised. Set to -1 to specify an infinite retry count. (integer value) -#db_max_retries = 20 - -# -# From oslo.db.concurrency -# - -# Enable the experimental use of thread pooling for all DB API calls (boolean -# value) -# Deprecated group/name - [DEFAULT]/dbapi_use_tpool -#use_tpool = false - - -[glance_store] - -# -# From glance.store -# - -# -# List of enabled Glance stores. -# -# Register the storage backends to use for storing disk images -# as a comma separated list. The default stores enabled for -# storing disk images with Glance are ``file`` and ``http``. -# -# Possible values: -# * A comma separated list that could include: -# * file -# * http -# * swift -# * rbd -# * sheepdog -# * cinder -# * vmware -# -# Related Options: -# * default_store -# -# (list value) -#stores = file,http - -# -# The default scheme to use for storing images. -# -# Provide a string value representing the default scheme to use for -# storing images. If not set, Glance uses ``file`` as the default -# scheme to store images with the ``file`` store. -# -# NOTE: The value given for this configuration option must be a valid -# scheme for a store registered with the ``stores`` configuration -# option. -# -# Possible values: -# * file -# * filesystem -# * http -# * https -# * swift -# * swift+http -# * swift+https -# * swift+config -# * rbd -# * sheepdog -# * cinder -# * vsphere -# -# Related Options: -# * stores -# -# (string value) -# Allowed values: file, filesystem, http, https, swift, swift+http, swift+https, swift+config, rbd, sheepdog, cinder, vsphere -#default_store = file - -# -# Minimum interval in seconds to execute updating dynamic storage -# capabilities based on current backend status. -# -# Provide an integer value representing time in seconds to set the -# minimum interval before an update of dynamic storage capabilities -# for a storage backend can be attempted. Setting -# ``store_capabilities_update_min_interval`` does not mean updates -# occur periodically based on the set interval. Rather, the update -# is performed at the elapse of this interval set, if an operation -# of the store is triggered. -# -# By default, this option is set to zero and is disabled. Provide an -# integer value greater than zero to enable this option. -# -# NOTE: For more information on store capabilities and their updates, -# please visit: https://specs.openstack.org/openstack/glance-specs/specs/kilo -# /store-capabilities.html -# -# For more information on setting up a particular store in your -# deployment and help with the usage of this feature, please contact -# the storage driver maintainers listed here: -# http://docs.openstack.org/developer/glance_store/drivers/index.html -# -# Possible values: -# * Zero -# * Positive integer -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 0 -#store_capabilities_update_min_interval = 0 - -# -# Information to match when looking for cinder in the service catalog. -# -# When the ``cinder_endpoint_template`` is not set and any of -# ``cinder_store_auth_address``, ``cinder_store_user_name``, -# ``cinder_store_project_name``, ``cinder_store_password`` is not set, -# cinder store uses this information to lookup cinder endpoint from the service -# catalog in the current context. ``cinder_os_region_name``, if set, is taken -# into consideration to fetch the appropriate endpoint. -# -# The service catalog can be listed by the ``openstack catalog list`` command. -# -# Possible values: -# * A string of of the following form: -# ``::`` -# At least ``service_type`` and ``interface`` should be specified. -# ``service_name`` can be omitted. -# -# Related options: -# * cinder_os_region_name -# * cinder_endpoint_template -# * cinder_store_auth_address -# * cinder_store_user_name -# * cinder_store_project_name -# * cinder_store_password -# -# (string value) -#cinder_catalog_info = volumev2::publicURL - -# -# Override service catalog lookup with template for cinder endpoint. -# -# When this option is set, this value is used to generate cinder endpoint, -# instead of looking up from the service catalog. -# This value is ignored if ``cinder_store_auth_address``, -# ``cinder_store_user_name``, ``cinder_store_project_name``, and -# ``cinder_store_password`` are specified. -# -# If this configuration option is set, ``cinder_catalog_info`` will be ignored. -# -# Possible values: -# * URL template string for cinder endpoint, where ``%%(tenant)s`` is -# replaced with the current tenant (project) name. -# For example: ``http://cinder.openstack.example.org/v2/%%(tenant)s`` -# -# Related options: -# * cinder_store_auth_address -# * cinder_store_user_name -# * cinder_store_project_name -# * cinder_store_password -# * cinder_catalog_info -# -# (string value) -#cinder_endpoint_template = - -# -# Region name to lookup cinder service from the service catalog. -# -# This is used only when ``cinder_catalog_info`` is used for determining the -# endpoint. If set, the lookup for cinder endpoint by this node is filtered to -# the specified region. It is useful when multiple regions are listed in the -# catalog. If this is not set, the endpoint is looked up from every region. -# -# Possible values: -# * A string that is a valid region name. -# -# Related options: -# * cinder_catalog_info -# -# (string value) -# Deprecated group/name - [glance_store]/os_region_name -#cinder_os_region_name = - -# -# Location of a CA certificates file used for cinder client requests. -# -# The specified CA certificates file, if set, is used to verify cinder -# connections via HTTPS endpoint. If the endpoint is HTTP, this value is -# ignored. -# ``cinder_api_insecure`` must be set to ``True`` to enable the verification. -# -# Possible values: -# * Path to a ca certificates file -# -# Related options: -# * cinder_api_insecure -# -# (string value) -#cinder_ca_certificates_file = - -# -# Number of cinderclient retries on failed http calls. -# -# When a call failed by any errors, cinderclient will retry the call up to the -# specified times after sleeping a few seconds. -# -# Possible values: -# * A positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#cinder_http_retries = 3 - -# -# Time period, in seconds, to wait for a cinder volume transition to -# complete. -# -# When the cinder volume is created, deleted, or attached to the glance node to -# read/write the volume data, the volume's state is changed. For example, the -# newly created volume status changes from ``creating`` to ``available`` after -# the creation process is completed. This specifies the maximum time to wait for -# the status change. If a timeout occurs while waiting, or the status is changed -# to an unexpected value (e.g. `error``), the image creation fails. -# -# Possible values: -# * A positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#cinder_state_transition_timeout = 300 - -# -# Allow to perform insecure SSL requests to cinder. -# -# If this option is set to True, HTTPS endpoint connection is verified using the -# CA certificates file specified by ``cinder_ca_certificates_file`` option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * cinder_ca_certificates_file -# -# (boolean value) -#cinder_api_insecure = false - -# -# The address where the cinder authentication service is listening. -# -# When all of ``cinder_store_auth_address``, ``cinder_store_user_name``, -# ``cinder_store_project_name``, and ``cinder_store_password`` options are -# specified, the specified values are always used for the authentication. -# This is useful to hide the image volumes from users by storing them in a -# project/tenant specific to the image service. It also enables users to share -# the image volume among other projects under the control of glance's ACL. -# -# If either of these options are not set, the cinder endpoint is looked up -# from the service catalog, and current context's user and project are used. -# -# Possible values: -# * A valid authentication service address, for example: -# ``http://openstack.example.org/identity/v2.0`` -# -# Related options: -# * cinder_store_user_name -# * cinder_store_password -# * cinder_store_project_name -# -# (string value) -#cinder_store_auth_address = - -# -# User name to authenticate against cinder. -# -# This must be used with all the following related options. If any of these are -# not specified, the user of the current context is used. -# -# Possible values: -# * A valid user name -# -# Related options: -# * cinder_store_auth_address -# * cinder_store_password -# * cinder_store_project_name -# -# (string value) -#cinder_store_user_name = - -# -# Password for the user authenticating against cinder. -# -# This must be used with all the following related options. If any of these are -# not specified, the user of the current context is used. -# -# Possible values: -# * A valid password for the user specified by ``cinder_store_user_name`` -# -# Related options: -# * cinder_store_auth_address -# * cinder_store_user_name -# * cinder_store_project_name -# -# (string value) -#cinder_store_password = - -# -# Project name where the image volume is stored in cinder. -# -# If this configuration option is not set, the project in current context is -# used. -# -# This must be used with all the following related options. If any of these are -# not specified, the project of the current context is used. -# -# Possible values: -# * A valid project name -# -# Related options: -# * ``cinder_store_auth_address`` -# * ``cinder_store_user_name`` -# * ``cinder_store_password`` -# -# (string value) -#cinder_store_project_name = - -# -# Path to the rootwrap configuration file to use for running commands as root. -# -# The cinder store requires root privileges to operate the image volumes (for -# connecting to iSCSI/FC volumes and reading/writing the volume data, etc.). -# The configuration file should allow the required commands by cinder store and -# os-brick library. -# -# Possible values: -# * Path to the rootwrap config file -# -# Related options: -# * None -# -# (string value) -#rootwrap_config = /etc/glance/rootwrap.conf - -# -# Volume type that will be used for volume creation in cinder. -# -# Some cinder backends can have several volume types to optimize storage usage. -# Adding this option allows an operator to choose a specific volume type -# in cinder that can be optimized for images. -# -# If this is not set, then the default volume type specified in the cinder -# configuration will be used for volume creation. -# -# Possible values: -# * A valid volume type from cinder -# -# Related options: -# * None -# -# (string value) -#cinder_volume_type = - -# -# Directory to which the filesystem backend store writes images. -# -# Upon start up, Glance creates the directory if it doesn't already -# exist and verifies write access to the user under which -# ``glance-api`` runs. If the write access isn't available, a -# ``BadStoreConfiguration`` exception is raised and the filesystem -# store may not be available for adding new images. -# -# NOTE: This directory is used only when filesystem store is used as a -# storage backend. Either ``filesystem_store_datadir`` or -# ``filesystem_store_datadirs`` option must be specified in -# ``glance-api.conf``. If both options are specified, a -# ``BadStoreConfiguration`` will be raised and the filesystem store -# may not be available for adding new images. -# -# Possible values: -# * A valid path to a directory -# -# Related options: -# * ``filesystem_store_datadirs`` -# * ``filesystem_store_file_perm`` -# -# (string value) -#filesystem_store_datadir = /var/lib/glance/images - -# -# List of directories and their priorities to which the filesystem -# backend store writes images. -# -# The filesystem store can be configured to store images in multiple -# directories as opposed to using a single directory specified by the -# ``filesystem_store_datadir`` configuration option. When using -# multiple directories, each directory can be given an optional -# priority to specify the preference order in which they should -# be used. Priority is an integer that is concatenated to the -# directory path with a colon where a higher value indicates higher -# priority. When two directories have the same priority, the directory -# with most free space is used. When no priority is specified, it -# defaults to zero. -# -# More information on configuring filesystem store with multiple store -# directories can be found at -# http://docs.openstack.org/developer/glance/configuring.html -# -# NOTE: This directory is used only when filesystem store is used as a -# storage backend. Either ``filesystem_store_datadir`` or -# ``filesystem_store_datadirs`` option must be specified in -# ``glance-api.conf``. If both options are specified, a -# ``BadStoreConfiguration`` will be raised and the filesystem store -# may not be available for adding new images. -# -# Possible values: -# * List of strings of the following form: -# * ``
:`` -# -# Related options: -# * ``filesystem_store_datadir`` -# * ``filesystem_store_file_perm`` -# -# (multi valued) -#filesystem_store_datadirs = - -# -# Filesystem store metadata file. -# -# The path to a file which contains the metadata to be returned with -# any location associated with the filesystem store. The file must -# contain a valid JSON object. The object should contain the keys -# ``id`` and ``mountpoint``. The value for both keys should be a -# string. -# -# Possible values: -# * A valid path to the store metadata file -# -# Related options: -# * None -# -# (string value) -#filesystem_store_metadata_file = - -# -# File access permissions for the image files. -# -# Set the intended file access permissions for image data. This provides -# a way to enable other services, e.g. Nova, to consume images directly -# from the filesystem store. The users running the services that are -# intended to be given access to could be made a member of the group -# that owns the files created. Assigning a value less then or equal to -# zero for this configuration option signifies that no changes be made -# to the default permissions. This value will be decoded as an octal -# digit. -# -# For more information, please refer the documentation at -# http://docs.openstack.org/developer/glance/configuring.html -# -# Possible values: -# * A valid file access permission -# * Zero -# * Any negative integer -# -# Related options: -# * None -# -# (integer value) -#filesystem_store_file_perm = 0 - -# -# Path to the CA bundle file. -# -# This configuration option enables the operator to use a custom -# Certificate Authority file to verify the remote server certificate. If -# this option is set, the ``https_insecure`` option will be ignored and -# the CA file specified will be used to authenticate the server -# certificate and establish a secure connection to the server. -# -# Possible values: -# * A valid path to a CA file -# -# Related options: -# * https_insecure -# -# (string value) -#https_ca_certificates_file = - -# -# Set verification of the remote server certificate. -# -# This configuration option takes in a boolean value to determine -# whether or not to verify the remote server certificate. If set to -# True, the remote server certificate is not verified. If the option is -# set to False, then the default CA truststore is used for verification. -# -# This option is ignored if ``https_ca_certificates_file`` is set. -# The remote server certificate will then be verified using the file -# specified using the ``https_ca_certificates_file`` option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * https_ca_certificates_file -# -# (boolean value) -#https_insecure = true - -# -# The http/https proxy information to be used to connect to the remote -# server. -# -# This configuration option specifies the http/https proxy information -# that should be used to connect to the remote server. The proxy -# information should be a key value pair of the scheme and proxy, for -# example, http:10.0.0.1:3128. You can also specify proxies for multiple -# schemes by separating the key value pairs with a comma, for example, -# http:10.0.0.1:3128, https:10.0.0.1:1080. -# -# Possible values: -# * A comma separated list of scheme:proxy pairs as described above -# -# Related options: -# * None -# -# (dict value) -#http_proxy_information = - -# -# Size, in megabytes, to chunk RADOS images into. -# -# Provide an integer value representing the size in megabytes to chunk -# Glance images into. The default chunk size is 8 megabytes. For optimal -# performance, the value should be a power of two. -# -# When Ceph's RBD object storage system is used as the storage backend -# for storing Glance images, the images are chunked into objects of the -# size set using this option. These chunked objects are then stored -# across the distributed block data store to use for Glance. -# -# Possible Values: -# * Any positive integer value -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#rbd_store_chunk_size = 8 - -# -# RADOS pool in which images are stored. -# -# When RBD is used as the storage backend for storing Glance images, the -# images are stored by means of logical grouping of the objects (chunks -# of images) into a ``pool``. Each pool is defined with the number of -# placement groups it can contain. The default pool that is used is -# 'images'. -# -# More information on the RBD storage backend can be found here: -# http://ceph.com/planet/how-data-is-stored-in-ceph-cluster/ -# -# Possible Values: -# * A valid pool name -# -# Related options: -# * None -# -# (string value) -#rbd_store_pool = images - -# -# RADOS user to authenticate as. -# -# This configuration option takes in the RADOS user to authenticate as. -# This is only needed when RADOS authentication is enabled and is -# applicable only if the user is using Cephx authentication. If the -# value for this option is not set by the user or is set to None, a -# default value will be chosen, which will be based on the client. -# section in rbd_store_ceph_conf. -# -# Possible Values: -# * A valid RADOS user -# -# Related options: -# * rbd_store_ceph_conf -# -# (string value) -#rbd_store_user = - -# -# Ceph configuration file path. -# -# This configuration option takes in the path to the Ceph configuration -# file to be used. If the value for this option is not set by the user -# or is set to None, librados will locate the default configuration file -# which is located at /etc/ceph/ceph.conf. If using Cephx -# authentication, this file should include a reference to the right -# keyring in a client. section -# -# Possible Values: -# * A valid path to a configuration file -# -# Related options: -# * rbd_store_user -# -# (string value) -#rbd_store_ceph_conf = /etc/ceph/ceph.conf - -# -# Timeout value for connecting to Ceph cluster. -# -# This configuration option takes in the timeout value in seconds used -# when connecting to the Ceph cluster i.e. it sets the time to wait for -# glance-api before closing the connection. This prevents glance-api -# hangups during the connection to RBD. If the value for this option -# is set to less than or equal to 0, no timeout is set and the default -# librados value is used. -# -# Possible Values: -# * Any integer value -# -# Related options: -# * None -# -# (integer value) -#rados_connect_timeout = 0 - -# -# Chunk size for images to be stored in Sheepdog data store. -# -# Provide an integer value representing the size in mebibyte -# (1048576 bytes) to chunk Glance images into. The default -# chunk size is 64 mebibytes. -# -# When using Sheepdog distributed storage system, the images are -# chunked into objects of this size and then stored across the -# distributed data store to use for Glance. -# -# Chunk sizes, if a power of two, help avoid fragmentation and -# enable improved performance. -# -# Possible values: -# * Positive integer value representing size in mebibytes. -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 1 -#sheepdog_store_chunk_size = 64 - -# -# Port number on which the sheep daemon will listen. -# -# Provide an integer value representing a valid port number on -# which you want the Sheepdog daemon to listen on. The default -# port is 7000. -# -# The Sheepdog daemon, also called 'sheep', manages the storage -# in the distributed cluster by writing objects across the storage -# network. It identifies and acts on the messages it receives on -# the port number set using ``sheepdog_store_port`` option to store -# chunks of Glance images. -# -# Possible values: -# * A valid port number (0 to 65535) -# -# Related Options: -# * sheepdog_store_address -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#sheepdog_store_port = 7000 - -# -# Address to bind the Sheepdog daemon to. -# -# Provide a string value representing the address to bind the -# Sheepdog daemon to. The default address set for the 'sheep' -# is 127.0.0.1. -# -# The Sheepdog daemon, also called 'sheep', manages the storage -# in the distributed cluster by writing objects across the storage -# network. It identifies and acts on the messages directed to the -# address set using ``sheepdog_store_address`` option to store -# chunks of Glance images. -# -# Possible values: -# * A valid IPv4 address -# * A valid IPv6 address -# * A valid hostname -# -# Related Options: -# * sheepdog_store_port -# -# (string value) -#sheepdog_store_address = 127.0.0.1 - -# -# Set verification of the server certificate. -# -# This boolean determines whether or not to verify the server -# certificate. If this option is set to True, swiftclient won't check -# for a valid SSL certificate when authenticating. If the option is set -# to False, then the default CA truststore is used for verification. -# -# Possible values: -# * True -# * False -# -# Related options: -# * swift_store_cacert -# -# (boolean value) -#swift_store_auth_insecure = false - -# -# Path to the CA bundle file. -# -# This configuration option enables the operator to specify the path to -# a custom Certificate Authority file for SSL verification when -# connecting to Swift. -# -# Possible values: -# * A valid path to a CA file -# -# Related options: -# * swift_store_auth_insecure -# -# (string value) -#swift_store_cacert = /etc/ssl/certs/ca-certificates.crt - -# -# The region of Swift endpoint to use by Glance. -# -# Provide a string value representing a Swift region where Glance -# can connect to for image storage. By default, there is no region -# set. -# -# When Glance uses Swift as the storage backend to store images -# for a specific tenant that has multiple endpoints, setting of a -# Swift region with ``swift_store_region`` allows Glance to connect -# to Swift in the specified region as opposed to a single region -# connectivity. -# -# This option can be configured for both single-tenant and -# multi-tenant storage. -# -# NOTE: Setting the region with ``swift_store_region`` is -# tenant-specific and is necessary ``only if`` the tenant has -# multiple endpoints across different regions. -# -# Possible values: -# * A string value representing a valid Swift region. -# -# Related Options: -# * None -# -# (string value) -#swift_store_region = RegionTwo - -# -# The URL endpoint to use for Swift backend storage. -# -# Provide a string value representing the URL endpoint to use for -# storing Glance images in Swift store. By default, an endpoint -# is not set and the storage URL returned by ``auth`` is used. -# Setting an endpoint with ``swift_store_endpoint`` overrides the -# storage URL and is used for Glance image storage. -# -# NOTE: The URL should include the path up to, but excluding the -# container. The location of an object is obtained by appending -# the container and object to the configured URL. -# -# Possible values: -# * String value representing a valid URL path up to a Swift container -# -# Related Options: -# * None -# -# (string value) -#swift_store_endpoint = https://swift.openstack.example.org/v1/path_not_including_container_name - -# -# Endpoint Type of Swift service. -# -# This string value indicates the endpoint type to use to fetch the -# Swift endpoint. The endpoint type determines the actions the user will -# be allowed to perform, for instance, reading and writing to the Store. -# This setting is only used if swift_store_auth_version is greater than -# 1. -# -# Possible values: -# * publicURL -# * adminURL -# * internalURL -# -# Related options: -# * swift_store_endpoint -# -# (string value) -# Allowed values: publicURL, adminURL, internalURL -#swift_store_endpoint_type = publicURL - -# -# Type of Swift service to use. -# -# Provide a string value representing the service type to use for -# storing images while using Swift backend storage. The default -# service type is set to ``object-store``. -# -# NOTE: If ``swift_store_auth_version`` is set to 2, the value for -# this configuration option needs to be ``object-store``. If using -# a higher version of Keystone or a different auth scheme, this -# option may be modified. -# -# Possible values: -# * A string representing a valid service type for Swift storage. -# -# Related Options: -# * None -# -# (string value) -#swift_store_service_type = object-store - -# -# Name of single container to store images/name prefix for multiple containers -# -# When a single container is being used to store images, this configuration -# option indicates the container within the Glance account to be used for -# storing all images. When multiple containers are used to store images, this -# will be the name prefix for all containers. Usage of single/multiple -# containers can be controlled using the configuration option -# ``swift_store_multiple_containers_seed``. -# -# When using multiple containers, the containers will be named after the value -# set for this configuration option with the first N chars of the image UUID -# as the suffix delimited by an underscore (where N is specified by -# ``swift_store_multiple_containers_seed``). -# -# Example: if the seed is set to 3 and swift_store_container = ``glance``, then -# an image with UUID ``fdae39a1-bac5-4238-aba4-69bcc726e848`` would be placed in -# the container ``glance_fda``. All dashes in the UUID are included when -# creating the container name but do not count toward the character limit, so -# when N=10 the container name would be ``glance_fdae39a1-ba.`` -# -# Possible values: -# * If using single container, this configuration option can be any string -# that is a valid swift container name in Glance's Swift account -# * If using multiple containers, this configuration option can be any -# string as long as it satisfies the container naming rules enforced by -# Swift. The value of ``swift_store_multiple_containers_seed`` should be -# taken into account as well. -# -# Related options: -# * ``swift_store_multiple_containers_seed`` -# * ``swift_store_multi_tenant`` -# * ``swift_store_create_container_on_put`` -# -# (string value) -#swift_store_container = glance - -# -# The size threshold, in MB, after which Glance will start segmenting image -# data. -# -# Swift has an upper limit on the size of a single uploaded object. By default, -# this is 5GB. To upload objects bigger than this limit, objects are segmented -# into multiple smaller objects that are tied together with a manifest file. -# For more detail, refer to -# http://docs.openstack.org/developer/swift/overview_large_objects.html -# -# This configuration option specifies the size threshold over which the Swift -# driver will start segmenting image data into multiple smaller files. -# Currently, the Swift driver only supports creating Dynamic Large Objects. -# -# NOTE: This should be set by taking into account the large object limit -# enforced by the Swift cluster in consideration. -# -# Possible values: -# * A positive integer that is less than or equal to the large object limit -# enforced by the Swift cluster in consideration. -# -# Related options: -# * ``swift_store_large_object_chunk_size`` -# -# (integer value) -# Minimum value: 1 -#swift_store_large_object_size = 5120 - -# -# The maximum size, in MB, of the segments when image data is segmented. -# -# When image data is segmented to upload images that are larger than the limit -# enforced by the Swift cluster, image data is broken into segments that are no -# bigger than the size specified by this configuration option. -# Refer to ``swift_store_large_object_size`` for more detail. -# -# For example: if ``swift_store_large_object_size`` is 5GB and -# ``swift_store_large_object_chunk_size`` is 1GB, an image of size 6.2GB will be -# segmented into 7 segments where the first six segments will be 1GB in size and -# the seventh segment will be 0.2GB. -# -# Possible values: -# * A positive integer that is less than or equal to the large object limit -# enforced by Swift cluster in consideration. -# -# Related options: -# * ``swift_store_large_object_size`` -# -# (integer value) -# Minimum value: 1 -#swift_store_large_object_chunk_size = 200 - -# -# Create container, if it doesn't already exist, when uploading image. -# -# At the time of uploading an image, if the corresponding container doesn't -# exist, it will be created provided this configuration option is set to True. -# By default, it won't be created. This behavior is applicable for both single -# and multiple containers mode. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#swift_store_create_container_on_put = false - -# -# Store images in tenant's Swift account. -# -# This enables multi-tenant storage mode which causes Glance images to be stored -# in tenant specific Swift accounts. If this is disabled, Glance stores all -# images in its own account. More details multi-tenant store can be found at -# https://wiki.openstack.org/wiki/GlanceSwiftTenantSpecificStorage -# -# NOTE: If using multi-tenant swift store, please make sure -# that you do not set a swift configuration file with the -# 'swift_store_config_file' option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * swift_store_config_file -# -# (boolean value) -#swift_store_multi_tenant = false - -# -# Seed indicating the number of containers to use for storing images. -# -# When using a single-tenant store, images can be stored in one or more than one -# containers. When set to 0, all images will be stored in one single container. -# When set to an integer value between 1 and 32, multiple containers will be -# used to store images. This configuration option will determine how many -# containers are created. The total number of containers that will be used is -# equal to 16^N, so if this config option is set to 2, then 16^2=256 containers -# will be used to store images. -# -# Please refer to ``swift_store_container`` for more detail on the naming -# convention. More detail about using multiple containers can be found at -# https://specs.openstack.org/openstack/glance-specs/specs/kilo/swift-store- -# multiple-containers.html -# -# NOTE: This is used only when swift_store_multi_tenant is disabled. -# -# Possible values: -# * A non-negative integer less than or equal to 32 -# -# Related options: -# * ``swift_store_container`` -# * ``swift_store_multi_tenant`` -# * ``swift_store_create_container_on_put`` -# -# (integer value) -# Minimum value: 0 -# Maximum value: 32 -#swift_store_multiple_containers_seed = 0 - -# -# List of tenants that will be granted admin access. -# -# This is a list of tenants that will be granted read/write access on -# all Swift containers created by Glance in multi-tenant mode. The -# default value is an empty list. -# -# Possible values: -# * A comma separated list of strings representing UUIDs of Keystone -# projects/tenants -# -# Related options: -# * None -# -# (list value) -#swift_store_admin_tenants = - -# -# SSL layer compression for HTTPS Swift requests. -# -# Provide a boolean value to determine whether or not to compress -# HTTPS Swift requests for images at the SSL layer. By default, -# compression is enabled. -# -# When using Swift as the backend store for Glance image storage, -# SSL layer compression of HTTPS Swift requests can be set using -# this option. If set to False, SSL layer compression of HTTPS -# Swift requests is disabled. Disabling this option may improve -# performance for images which are already in a compressed format, -# for example, qcow2. -# -# Possible values: -# * True -# * False -# -# Related Options: -# * None -# -# (boolean value) -#swift_store_ssl_compression = true - -# -# The number of times a Swift download will be retried before the -# request fails. -# -# Provide an integer value representing the number of times an image -# download must be retried before erroring out. The default value is -# zero (no retry on a failed image download). When set to a positive -# integer value, ``swift_store_retry_get_count`` ensures that the -# download is attempted this many more times upon a download failure -# before sending an error message. -# -# Possible values: -# * Zero -# * Positive integer value -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 0 -#swift_store_retry_get_count = 0 - -# -# Time in seconds defining the size of the window in which a new -# token may be requested before the current token is due to expire. -# -# Typically, the Swift storage driver fetches a new token upon the -# expiration of the current token to ensure continued access to -# Swift. However, some Swift transactions (like uploading image -# segments) may not recover well if the token expires on the fly. -# -# Hence, by fetching a new token before the current token expiration, -# we make sure that the token does not expire or is close to expiry -# before a transaction is attempted. By default, the Swift storage -# driver requests for a new token 60 seconds or less before the -# current token expiration. -# -# Possible values: -# * Zero -# * Positive integer value -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 0 -#swift_store_expire_soon_interval = 60 - -# -# Use trusts for multi-tenant Swift store. -# -# This option instructs the Swift store to create a trust for each -# add/get request when the multi-tenant store is in use. Using trusts -# allows the Swift store to avoid problems that can be caused by an -# authentication token expiring during the upload or download of data. -# -# By default, ``swift_store_use_trusts`` is set to ``True``(use of -# trusts is enabled). If set to ``False``, a user token is used for -# the Swift connection instead, eliminating the overhead of trust -# creation. -# -# NOTE: This option is considered only when -# ``swift_store_multi_tenant`` is set to ``True`` -# -# Possible values: -# * True -# * False -# -# Related options: -# * swift_store_multi_tenant -# -# (boolean value) -#swift_store_use_trusts = true - -# -# Reference to default Swift account/backing store parameters. -# -# Provide a string value representing a reference to the default set -# of parameters required for using swift account/backing store for -# image storage. The default reference value for this configuration -# option is 'ref1'. This configuration option dereferences the -# parameters and facilitates image storage in Swift storage backend -# every time a new image is added. -# -# Possible values: -# * A valid string value -# -# Related options: -# * None -# -# (string value) -#default_swift_reference = ref1 - -# DEPRECATED: Version of the authentication service to use. Valid versions are 2 -# and 3 for keystone and 1 (deprecated) for swauth and rackspace. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'auth_version' in the Swift back-end configuration file is -# used instead. -#swift_store_auth_version = 2 - -# DEPRECATED: The address where the Swift authentication service is listening. -# (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'auth_address' in the Swift back-end configuration file is -# used instead. -#swift_store_auth_address = - -# DEPRECATED: The user to authenticate against the Swift authentication service. -# (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'user' in the Swift back-end configuration file is set instead. -#swift_store_user = - -# DEPRECATED: Auth key for the user authenticating against the Swift -# authentication service. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'key' in the Swift back-end configuration file is used -# to set the authentication key instead. -#swift_store_key = - -# -# Absolute path to the file containing the swift account(s) -# configurations. -# -# Include a string value representing the path to a configuration -# file that has references for each of the configured Swift -# account(s)/backing stores. By default, no file path is specified -# and customized Swift referencing is disabled. Configuring this -# option is highly recommended while using Swift storage backend for -# image storage as it avoids storage of credentials in the database. -# -# NOTE: Please do not configure this option if you have set -# ``swift_store_multi_tenant`` to ``True``. -# -# Possible values: -# * String value representing an absolute path on the glance-api -# node -# -# Related options: -# * swift_store_multi_tenant -# -# (string value) -#swift_store_config_file = - -# -# Address of the ESX/ESXi or vCenter Server target system. -# -# This configuration option sets the address of the ESX/ESXi or vCenter -# Server target system. This option is required when using the VMware -# storage backend. The address can contain an IP address (127.0.0.1) or -# a DNS name (www.my-domain.com). -# -# Possible Values: -# * A valid IPv4 or IPv6 address -# * A valid DNS name -# -# Related options: -# * vmware_server_username -# * vmware_server_password -# -# (string value) -#vmware_server_host = 127.0.0.1 - -# -# Server username. -# -# This configuration option takes the username for authenticating with -# the VMware ESX/ESXi or vCenter Server. This option is required when -# using the VMware storage backend. -# -# Possible Values: -# * Any string that is the username for a user with appropriate -# privileges -# -# Related options: -# * vmware_server_host -# * vmware_server_password -# -# (string value) -#vmware_server_username = root - -# -# Server password. -# -# This configuration option takes the password for authenticating with -# the VMware ESX/ESXi or vCenter Server. This option is required when -# using the VMware storage backend. -# -# Possible Values: -# * Any string that is a password corresponding to the username -# specified using the "vmware_server_username" option -# -# Related options: -# * vmware_server_host -# * vmware_server_username -# -# (string value) -#vmware_server_password = vmware - -# -# The number of VMware API retries. -# -# This configuration option specifies the number of times the VMware -# ESX/VC server API must be retried upon connection related issues or -# server API call overload. It is not possible to specify 'retry -# forever'. -# -# Possible Values: -# * Any positive integer value -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#vmware_api_retry_count = 10 - -# -# Interval in seconds used for polling remote tasks invoked on VMware -# ESX/VC server. -# -# This configuration option takes in the sleep time in seconds for polling an -# on-going async task as part of the VMWare ESX/VC server API call. -# -# Possible Values: -# * Any positive integer value -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#vmware_task_poll_interval = 5 - -# -# The directory where the glance images will be stored in the datastore. -# -# This configuration option specifies the path to the directory where the -# glance images will be stored in the VMware datastore. If this option -# is not set, the default directory where the glance images are stored -# is openstack_glance. -# -# Possible Values: -# * Any string that is a valid path to a directory -# -# Related options: -# * None -# -# (string value) -#vmware_store_image_dir = /openstack_glance - -# -# Set verification of the ESX/vCenter server certificate. -# -# This configuration option takes a boolean value to determine -# whether or not to verify the ESX/vCenter server certificate. If this -# option is set to True, the ESX/vCenter server certificate is not -# verified. If this option is set to False, then the default CA -# truststore is used for verification. -# -# This option is ignored if the "vmware_ca_file" option is set. In that -# case, the ESX/vCenter server certificate will then be verified using -# the file specified using the "vmware_ca_file" option . -# -# Possible Values: -# * True -# * False -# -# Related options: -# * vmware_ca_file -# -# (boolean value) -# Deprecated group/name - [glance_store]/vmware_api_insecure -#vmware_insecure = false - -# -# Absolute path to the CA bundle file. -# -# This configuration option enables the operator to use a custom -# Cerificate Authority File to verify the ESX/vCenter certificate. -# -# If this option is set, the "vmware_insecure" option will be ignored -# and the CA file specified will be used to authenticate the ESX/vCenter -# server certificate and establish a secure connection to the server. -# -# Possible Values: -# * Any string that is a valid absolute path to a CA file -# -# Related options: -# * vmware_insecure -# -# (string value) -#vmware_ca_file = /etc/ssl/certs/ca-certificates.crt - -# -# The datastores where the image can be stored. -# -# This configuration option specifies the datastores where the image can -# be stored in the VMWare store backend. This option may be specified -# multiple times for specifying multiple datastores. The datastore name -# should be specified after its datacenter path, separated by ":". An -# optional weight may be given after the datastore name, separated again -# by ":" to specify the priority. Thus, the required format becomes -# ::. -# -# When adding an image, the datastore with highest weight will be -# selected, unless there is not enough free space available in cases -# where the image size is already known. If no weight is given, it is -# assumed to be zero and the directory will be considered for selection -# last. If multiple datastores have the same weight, then the one with -# the most free space available is selected. -# -# Possible Values: -# * Any string of the format: -# :: -# -# Related options: -# * None -# -# (multi valued) -#vmware_datastores = - - -[image_format] - -# -# From glance.api -# - -# Supported values for the 'container_format' image attribute (list value) -# Deprecated group/name - [DEFAULT]/container_formats -#container_formats = ami,ari,aki,bare,ovf,ova,docker - -# Supported values for the 'disk_format' image attribute (list value) -# Deprecated group/name - [DEFAULT]/disk_formats -#disk_formats = ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,ploop - - -[keystone_authtoken] - -# -# From keystonemiddleware.auth_token -# - -# Complete "public" Identity API endpoint. This endpoint should not be an -# "admin" endpoint, as it should be accessible by all end users. Unauthenticated -# clients are redirected to this endpoint to authenticate. Although this -# endpoint should ideally be unversioned, client support in the wild varies. -# If you're using a versioned v2 endpoint here, then this should *not* be the -# same endpoint the service user utilizes for validating tokens, because normal -# end users may not be able to reach that endpoint. (string value) -#auth_uri = - -# API version of the admin Identity API endpoint. (string value) -#auth_version = - -# Do not handle authorization requests within the middleware, but delegate the -# authorization decision to downstream WSGI components. (boolean value) -#delay_auth_decision = false - -# Request timeout value for communicating with Identity API server. (integer -# value) -#http_connect_timeout = - -# How many times are we trying to reconnect when communicating with Identity API -# Server. (integer value) -#http_request_max_retries = 3 - -# Request environment key where the Swift cache object is stored. When -# auth_token middleware is deployed with a Swift cache, use this option to have -# the middleware share a caching backend with swift. Otherwise, use the -# ``memcached_servers`` option instead. (string value) -#cache = - -# Required if identity server requires client certificate (string value) -#certfile = - -# Required if identity server requires client certificate (string value) -#keyfile = - -# A PEM encoded Certificate Authority to use when verifying HTTPs connections. -# Defaults to system CAs. (string value) -#cafile = - -# Verify HTTPS connections. (boolean value) -#insecure = false - -# The region in which the identity server can be found. (string value) -#region_name = - -# DEPRECATED: Directory used to cache files related to PKI tokens. This option -# has been deprecated in the Ocata release and will be removed in the P release. -# (string value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#signing_dir = - -# Optionally specify a list of memcached server(s) to use for caching. If left -# undefined, tokens will instead be cached in-process. (list value) -# Deprecated group/name - [keystone_authtoken]/memcache_servers -#memcached_servers = - -# In order to prevent excessive effort spent validating tokens, the middleware -# caches previously-seen tokens for a configurable duration (in seconds). Set to -# -1 to disable caching completely. (integer value) -#token_cache_time = 300 - -# DEPRECATED: Determines the frequency at which the list of revoked tokens is -# retrieved from the Identity service (in seconds). A high number of revocation -# events combined with a low cache duration may significantly reduce -# performance. Only valid for PKI tokens. This option has been deprecated in the -# Ocata release and will be removed in the P release. (integer value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#revocation_cache_time = 10 - -# (Optional) If defined, indicate whether token data should be authenticated or -# authenticated and encrypted. If MAC, token data is authenticated (with HMAC) -# in the cache. If ENCRYPT, token data is encrypted and authenticated in the -# cache. If the value is not one of these options or empty, auth_token will -# raise an exception on initialization. (string value) -# Allowed values: None, MAC, ENCRYPT -#memcache_security_strategy = None - -# (Optional, mandatory if memcache_security_strategy is defined) This string is -# used for key derivation. (string value) -#memcache_secret_key = - -# (Optional) Number of seconds memcached server is considered dead before it is -# tried again. (integer value) -#memcache_pool_dead_retry = 300 - -# (Optional) Maximum total number of open connections to every memcached server. -# (integer value) -#memcache_pool_maxsize = 10 - -# (Optional) Socket timeout in seconds for communicating with a memcached -# server. (integer value) -#memcache_pool_socket_timeout = 3 - -# (Optional) Number of seconds a connection to memcached is held unused in the -# pool before it is closed. (integer value) -#memcache_pool_unused_timeout = 60 - -# (Optional) Number of seconds that an operation will wait to get a memcached -# client connection from the pool. (integer value) -#memcache_pool_conn_get_timeout = 10 - -# (Optional) Use the advanced (eventlet safe) memcached client pool. The -# advanced pool will only work under python 2.x. (boolean value) -#memcache_use_advanced_pool = false - -# (Optional) Indicate whether to set the X-Service-Catalog header. If False, -# middleware will not ask for service catalog on token validation and will not -# set the X-Service-Catalog header. (boolean value) -#include_service_catalog = true - -# Used to control the use and type of token binding. Can be set to: "disabled" -# to not check token binding. "permissive" (default) to validate binding -# information if the bind type is of a form known to the server and ignore it if -# not. "strict" like "permissive" but if the bind type is unknown the token will -# be rejected. "required" any form of token binding is needed to be allowed. -# Finally the name of a binding method that must be present in tokens. (string -# value) -#enforce_token_bind = permissive - -# DEPRECATED: If true, the revocation list will be checked for cached tokens. -# This requires that PKI tokens are configured on the identity server. (boolean -# value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#check_revocations_for_cached = false - -# DEPRECATED: Hash algorithms to use for hashing PKI tokens. This may be a -# single algorithm or multiple. The algorithms are those supported by Python -# standard hashlib.new(). The hashes will be tried in the order given, so put -# the preferred one first for performance. The result of the first hash will be -# stored in the cache. This will typically be set to multiple values only while -# migrating from a less secure algorithm to a more secure one. Once all the old -# tokens are expired this option should be set to a single value for better -# performance. (list value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#hash_algorithms = md5 - -# A choice of roles that must be present in a service token. Service tokens are -# allowed to request that an expired token can be used and so this check should -# tightly control that only actual services should be sending this token. Roles -# here are applied as an ANY check so any role in this list must be present. For -# backwards compatibility reasons this currently only affects the allow_expired -# check. (list value) -#service_token_roles = service - -# For backwards compatibility reasons we must let valid service tokens pass that -# don't pass the service_token_roles check as valid. Setting this true will -# become the default in a future release and should be enabled if possible. -# (boolean value) -#service_token_roles_required = false - -# Authentication type to load (string value) -# Deprecated group/name - [keystone_authtoken]/auth_plugin -#auth_type = - -# Config Section from which to load plugin specific options (string value) -#auth_section = - - -[matchmaker_redis] - -# -# From oslo.messaging -# - -# DEPRECATED: Host to locate redis. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#host = 127.0.0.1 - -# DEPRECATED: Use this port to connect to redis host. (port value) -# Minimum value: 0 -# Maximum value: 65535 -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#port = 6379 - -# DEPRECATED: Password for Redis server (optional). (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#password = - -# DEPRECATED: List of Redis Sentinel hosts (fault tolerance mode), e.g., -# [host:port, host1:port ... ] (list value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#sentinel_hosts = - -# Redis replica set name. (string value) -#sentinel_group_name = oslo-messaging-zeromq - -# Time in ms to wait between connection attempts. (integer value) -#wait_timeout = 2000 - -# Time in ms to wait before the transaction is killed. (integer value) -#check_timeout = 20000 - -# Timeout in ms on blocking socket operations. (integer value) -#socket_timeout = 10000 - - -[oslo_concurrency] - -# -# From oslo.concurrency -# - -# Enables or disables inter-process locks. (boolean value) -# Deprecated group/name - [DEFAULT]/disable_process_locking -#disable_process_locking = false - -# Directory to use for lock files. For security, the specified directory should -# only be writable by the user running the processes that need locking. Defaults -# to environment variable OSLO_LOCK_PATH. If external locks are used, a lock -# path must be set. (string value) -# Deprecated group/name - [DEFAULT]/lock_path -#lock_path = - - -[oslo_messaging_amqp] - -# -# From oslo.messaging -# - -# Name for the AMQP container. must be globally unique. Defaults to a generated -# UUID (string value) -# Deprecated group/name - [amqp1]/container_name -#container_name = - -# Timeout for inactive connections (in seconds) (integer value) -# Deprecated group/name - [amqp1]/idle_timeout -#idle_timeout = 0 - -# Debug: dump AMQP frames to stdout (boolean value) -# Deprecated group/name - [amqp1]/trace -#trace = false - -# CA certificate PEM file used to verify the server's certificate (string value) -# Deprecated group/name - [amqp1]/ssl_ca_file -#ssl_ca_file = - -# Self-identifying certificate PEM file for client authentication (string value) -# Deprecated group/name - [amqp1]/ssl_cert_file -#ssl_cert_file = - -# Private key PEM file used to sign ssl_cert_file certificate (optional) (string -# value) -# Deprecated group/name - [amqp1]/ssl_key_file -#ssl_key_file = - -# Password for decrypting ssl_key_file (if encrypted) (string value) -# Deprecated group/name - [amqp1]/ssl_key_password -#ssl_key_password = - -# DEPRECATED: Accept clients using either SSL or plain TCP (boolean value) -# Deprecated group/name - [amqp1]/allow_insecure_clients -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Not applicable - not a SSL server -#allow_insecure_clients = false - -# Space separated list of acceptable SASL mechanisms (string value) -# Deprecated group/name - [amqp1]/sasl_mechanisms -#sasl_mechanisms = - -# Path to directory that contains the SASL configuration (string value) -# Deprecated group/name - [amqp1]/sasl_config_dir -#sasl_config_dir = - -# Name of configuration file (without .conf suffix) (string value) -# Deprecated group/name - [amqp1]/sasl_config_name -#sasl_config_name = - -# User name for message broker authentication (string value) -# Deprecated group/name - [amqp1]/username -#username = - -# Password for message broker authentication (string value) -# Deprecated group/name - [amqp1]/password -#password = - -# Seconds to pause before attempting to re-connect. (integer value) -# Minimum value: 1 -#connection_retry_interval = 1 - -# Increase the connection_retry_interval by this many seconds after each -# unsuccessful failover attempt. (integer value) -# Minimum value: 0 -#connection_retry_backoff = 2 - -# Maximum limit for connection_retry_interval + connection_retry_backoff -# (integer value) -# Minimum value: 1 -#connection_retry_interval_max = 30 - -# Time to pause between re-connecting an AMQP 1.0 link that failed due to a -# recoverable error. (integer value) -# Minimum value: 1 -#link_retry_delay = 10 - -# The maximum number of attempts to re-send a reply message which failed due to -# a recoverable error. (integer value) -# Minimum value: -1 -#default_reply_retry = 0 - -# The deadline for an rpc reply message delivery. (integer value) -# Minimum value: 5 -#default_reply_timeout = 30 - -# The deadline for an rpc cast or call message delivery. Only used when caller -# does not provide a timeout expiry. (integer value) -# Minimum value: 5 -#default_send_timeout = 30 - -# The deadline for a sent notification message delivery. Only used when caller -# does not provide a timeout expiry. (integer value) -# Minimum value: 5 -#default_notify_timeout = 30 - -# The duration to schedule a purge of idle sender links. Detach link after -# expiry. (integer value) -# Minimum value: 1 -#default_sender_link_timeout = 600 - -# Indicates the addressing mode used by the driver. -# Permitted values: -# 'legacy' - use legacy non-routable addressing -# 'routable' - use routable addresses -# 'dynamic' - use legacy addresses if the message bus does not support routing -# otherwise use routable addressing (string value) -#addressing_mode = dynamic - -# address prefix used when sending to a specific server (string value) -# Deprecated group/name - [amqp1]/server_request_prefix -#server_request_prefix = exclusive - -# address prefix used when broadcasting to all servers (string value) -# Deprecated group/name - [amqp1]/broadcast_prefix -#broadcast_prefix = broadcast - -# address prefix when sending to any server in group (string value) -# Deprecated group/name - [amqp1]/group_request_prefix -#group_request_prefix = unicast - -# Address prefix for all generated RPC addresses (string value) -#rpc_address_prefix = openstack.org/om/rpc - -# Address prefix for all generated Notification addresses (string value) -#notify_address_prefix = openstack.org/om/notify - -# Appended to the address prefix when sending a fanout message. Used by the -# message bus to identify fanout messages. (string value) -#multicast_address = multicast - -# Appended to the address prefix when sending to a particular RPC/Notification -# server. Used by the message bus to identify messages sent to a single -# destination. (string value) -#unicast_address = unicast - -# Appended to the address prefix when sending to a group of consumers. Used by -# the message bus to identify messages that should be delivered in a round-robin -# fashion across consumers. (string value) -#anycast_address = anycast - -# Exchange name used in notification addresses. -# Exchange name resolution precedence: -# Target.exchange if set -# else default_notification_exchange if set -# else control_exchange if set -# else 'notify' (string value) -#default_notification_exchange = - -# Exchange name used in RPC addresses. -# Exchange name resolution precedence: -# Target.exchange if set -# else default_rpc_exchange if set -# else control_exchange if set -# else 'rpc' (string value) -#default_rpc_exchange = - -# Window size for incoming RPC Reply messages. (integer value) -# Minimum value: 1 -#reply_link_credit = 200 - -# Window size for incoming RPC Request messages (integer value) -# Minimum value: 1 -#rpc_server_credit = 100 - -# Window size for incoming Notification messages (integer value) -# Minimum value: 1 -#notify_server_credit = 100 - -# Send messages of this type pre-settled. -# Pre-settled messages will not receive acknowledgement -# from the peer. Note well: pre-settled messages may be -# silently discarded if the delivery fails. -# Permitted values: -# 'rpc-call' - send RPC Calls pre-settled -# 'rpc-reply'- send RPC Replies pre-settled -# 'rpc-cast' - Send RPC Casts pre-settled -# 'notify' - Send Notifications pre-settled -# (multi valued) -#pre_settled = rpc-cast -#pre_settled = rpc-reply - - -[oslo_messaging_kafka] - -# -# From oslo.messaging -# - -# DEPRECATED: Default Kafka broker Host (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#kafka_default_host = localhost - -# DEPRECATED: Default Kafka broker Port (port value) -# Minimum value: 0 -# Maximum value: 65535 -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#kafka_default_port = 9092 - -# Max fetch bytes of Kafka consumer (integer value) -#kafka_max_fetch_bytes = 1048576 - -# Default timeout(s) for Kafka consumers (integer value) -#kafka_consumer_timeout = 1.0 - -# Pool Size for Kafka Consumers (integer value) -#pool_size = 10 - -# The pool size limit for connections expiration policy (integer value) -#conn_pool_min_size = 2 - -# The time-to-live in sec of idle connections in the pool (integer value) -#conn_pool_ttl = 1200 - -# Group id for Kafka consumer. Consumers in one group will coordinate message -# consumption (string value) -#consumer_group = oslo_messaging_consumer - -# Upper bound on the delay for KafkaProducer batching in seconds (floating point -# value) -#producer_batch_timeout = 0.0 - -# Size of batch for the producer async send (integer value) -#producer_batch_size = 16384 - - -[oslo_messaging_notifications] - -# -# From oslo.messaging -# - -# The Drivers(s) to handle sending notifications. Possible values are messaging, -# messagingv2, routing, log, test, noop (multi valued) -# Deprecated group/name - [DEFAULT]/notification_driver -#driver = - -# A URL representing the messaging driver to use for notifications. If not set, -# we fall back to the same configuration used for RPC. (string value) -# Deprecated group/name - [DEFAULT]/notification_transport_url -#transport_url = - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -# Deprecated group/name - [DEFAULT]/notification_topics -#topics = notifications - - -[oslo_messaging_rabbit] - -# -# From oslo.messaging -# - -# Use durable queues in AMQP. (boolean value) -# Deprecated group/name - [DEFAULT]/amqp_durable_queues -# Deprecated group/name - [DEFAULT]/rabbit_durable_queues -#amqp_durable_queues = false - -# Auto-delete queues in AMQP. (boolean value) -# Deprecated group/name - [DEFAULT]/amqp_auto_delete -#amqp_auto_delete = false - -# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and -# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some -# distributions. (string value) -# Deprecated group/name - [DEFAULT]/kombu_ssl_version -#kombu_ssl_version = - -# SSL key file (valid only if SSL enabled). (string value) -# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile -#kombu_ssl_keyfile = - -# SSL cert file (valid only if SSL enabled). (string value) -# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile -#kombu_ssl_certfile = - -# SSL certification authority file (valid only if SSL enabled). (string value) -# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs -#kombu_ssl_ca_certs = - -# How long to wait before reconnecting in response to an AMQP consumer cancel -# notification. (floating point value) -# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay -#kombu_reconnect_delay = 1.0 - -# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not -# be used. This option may not be available in future versions. (string value) -#kombu_compression = - -# How long to wait a missing client before abandoning to send it its replies. -# This value should not be longer than rpc_response_timeout. (integer value) -# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout -#kombu_missing_consumer_retry_timeout = 60 - -# Determines how the next RabbitMQ node is chosen in case the one we are -# currently connected to becomes unavailable. Takes effect only if more than one -# RabbitMQ node is provided in config. (string value) -# Allowed values: round-robin, shuffle -#kombu_failover_strategy = round-robin - -# DEPRECATED: The RabbitMQ broker address where a single node is used. (string -# value) -# Deprecated group/name - [DEFAULT]/rabbit_host -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rabbit_host = localhost - -# DEPRECATED: The RabbitMQ broker port where a single node is used. (port value) -# Minimum value: 0 -# Maximum value: 65535 -# Deprecated group/name - [DEFAULT]/rabbit_port -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rabbit_port = 5672 - -# DEPRECATED: RabbitMQ HA cluster host:port pairs. (list value) -# Deprecated group/name - [DEFAULT]/rabbit_hosts -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rabbit_hosts = $rabbit_host:$rabbit_port - -# Connect over SSL for RabbitMQ. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_use_ssl -#rabbit_use_ssl = false - -# DEPRECATED: The RabbitMQ userid. (string value) -# Deprecated group/name - [DEFAULT]/rabbit_userid -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rabbit_userid = guest - -# DEPRECATED: The RabbitMQ password. (string value) -# Deprecated group/name - [DEFAULT]/rabbit_password -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rabbit_password = guest - -# The RabbitMQ login method. (string value) -# Allowed values: PLAIN, AMQPLAIN, RABBIT-CR-DEMO -# Deprecated group/name - [DEFAULT]/rabbit_login_method -#rabbit_login_method = AMQPLAIN - -# DEPRECATED: The RabbitMQ virtual host. (string value) -# Deprecated group/name - [DEFAULT]/rabbit_virtual_host -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rabbit_virtual_host = / - -# How frequently to retry connecting with RabbitMQ. (integer value) -#rabbit_retry_interval = 1 - -# How long to backoff for between retries when connecting to RabbitMQ. (integer -# value) -# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff -#rabbit_retry_backoff = 2 - -# Maximum interval of RabbitMQ connection retries. Default is 30 seconds. -# (integer value) -#rabbit_interval_max = 30 - -# DEPRECATED: Maximum number of RabbitMQ connection retries. Default is 0 -# (infinite retry count). (integer value) -# Deprecated group/name - [DEFAULT]/rabbit_max_retries -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#rabbit_max_retries = 0 - -# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this -# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring -# is no longer controlled by the x-ha-policy argument when declaring a queue. If -# you just want to make sure that all queues (except those with auto-generated -# names) are mirrored across all nodes, run: "rabbitmqctl set_policy HA -# '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_ha_queues -#rabbit_ha_queues = false - -# Positive integer representing duration in seconds for queue TTL (x-expires). -# Queues which are unused for the duration of the TTL are automatically deleted. -# The parameter affects only reply and fanout queues. (integer value) -# Minimum value: 1 -#rabbit_transient_queues_ttl = 1800 - -# Specifies the number of messages to prefetch. Setting to zero allows unlimited -# messages. (integer value) -#rabbit_qos_prefetch_count = 0 - -# Number of seconds after which the Rabbit broker is considered down if -# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer -# value) -#heartbeat_timeout_threshold = 60 - -# How often times during the heartbeat_timeout_threshold we check the heartbeat. -# (integer value) -#heartbeat_rate = 2 - -# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) -# Deprecated group/name - [DEFAULT]/fake_rabbit -#fake_rabbit = false - -# Maximum number of channels to allow (integer value) -#channel_max = - -# The maximum byte size for an AMQP frame (integer value) -#frame_max = - -# How often to send heartbeats for consumer's connections (integer value) -#heartbeat_interval = 3 - -# Enable SSL (boolean value) -#ssl = - -# Arguments passed to ssl.wrap_socket (dict value) -#ssl_options = - -# Set socket timeout in seconds for connection's socket (floating point value) -#socket_timeout = 0.25 - -# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point value) -#tcp_user_timeout = 0.25 - -# Set delay for reconnection to some host which has connection error (floating -# point value) -#host_connection_reconnect_delay = 0.25 - -# Connection factory implementation (string value) -# Allowed values: new, single, read_write -#connection_factory = single - -# Maximum number of connections to keep queued. (integer value) -#pool_max_size = 30 - -# Maximum number of connections to create above `pool_max_size`. (integer value) -#pool_max_overflow = 0 - -# Default number of seconds to wait for a connections to available (integer -# value) -#pool_timeout = 30 - -# Lifetime of a connection (since creation) in seconds or None for no recycling. -# Expired connections are closed on acquire. (integer value) -#pool_recycle = 600 - -# Threshold at which inactive (since release) connections are considered stale -# in seconds or None for no staleness. Stale connections are closed on acquire. -# (integer value) -#pool_stale = 60 - -# Default serialization mechanism for serializing/deserializing -# outgoing/incoming messages (string value) -# Allowed values: json, msgpack -#default_serializer_type = json - -# Persist notification messages. (boolean value) -#notification_persistence = false - -# Exchange name for sending notifications (string value) -#default_notification_exchange = ${control_exchange}_notification - -# Max number of not acknowledged message which RabbitMQ can send to notification -# listener. (integer value) -#notification_listener_prefetch_count = 100 - -# Reconnecting retry count in case of connectivity problem during sending -# notification, -1 means infinite retry. (integer value) -#default_notification_retry_attempts = -1 - -# Reconnecting retry delay in case of connectivity problem during sending -# notification message (floating point value) -#notification_retry_delay = 0.25 - -# Time to live for rpc queues without consumers in seconds. (integer value) -#rpc_queue_expiration = 60 - -# Exchange name for sending RPC messages (string value) -#default_rpc_exchange = ${control_exchange}_rpc - -# Exchange name for receiving RPC replies (string value) -#rpc_reply_exchange = ${control_exchange}_rpc_reply - -# Max number of not acknowledged message which RabbitMQ can send to rpc -# listener. (integer value) -#rpc_listener_prefetch_count = 100 - -# Max number of not acknowledged message which RabbitMQ can send to rpc reply -# listener. (integer value) -#rpc_reply_listener_prefetch_count = 100 - -# Reconnecting retry count in case of connectivity problem during sending reply. -# -1 means infinite retry during rpc_timeout (integer value) -#rpc_reply_retry_attempts = -1 - -# Reconnecting retry delay in case of connectivity problem during sending reply. -# (floating point value) -#rpc_reply_retry_delay = 0.25 - -# Reconnecting retry count in case of connectivity problem during sending RPC -# message, -1 means infinite retry. If actual retry attempts in not 0 the rpc -# request could be processed more than one time (integer value) -#default_rpc_retry_attempts = -1 - -# Reconnecting retry delay in case of connectivity problem during sending RPC -# message (floating point value) -#rpc_retry_delay = 0.25 - - -[oslo_messaging_zmq] - -# -# From oslo.messaging -# - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. (string value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_bind_address -#rpc_zmq_bind_address = * - -# MatchMaker driver. (string value) -# Allowed values: redis, sentinel, dummy -# Deprecated group/name - [DEFAULT]/rpc_zmq_matchmaker -#rpc_zmq_matchmaker = redis - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_contexts -#rpc_zmq_contexts = 1 - -# Maximum number of ingress messages to locally buffer per topic. Default is -# unlimited. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_topic_backlog -#rpc_zmq_topic_backlog = - -# Directory for holding IPC sockets. (string value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_ipc_dir -#rpc_zmq_ipc_dir = /var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match -# "host" option, if running Nova. (string value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_host -#rpc_zmq_host = localhost - -# Number of seconds to wait before all pending messages will be sent after -# closing a socket. The default value of -1 specifies an infinite linger period. -# The value of 0 specifies no linger period. Pending messages shall be discarded -# immediately when the socket is closed. Positive values specify an upper bound -# for the linger period. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_cast_timeout -#zmq_linger = -1 - -# The default number of seconds that poll should wait. Poll raises timeout -# exception when timeout expired. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_poll_timeout -#rpc_poll_timeout = 1 - -# Expiration timeout in seconds of a name service record about existing target ( -# < 0 means no timeout). (integer value) -# Deprecated group/name - [DEFAULT]/zmq_target_expire -#zmq_target_expire = 300 - -# Update period in seconds of a name service record about existing target. -# (integer value) -# Deprecated group/name - [DEFAULT]/zmq_target_update -#zmq_target_update = 180 - -# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean -# value) -# Deprecated group/name - [DEFAULT]/use_pub_sub -#use_pub_sub = false - -# Use ROUTER remote proxy. (boolean value) -# Deprecated group/name - [DEFAULT]/use_router_proxy -#use_router_proxy = false - -# This option makes direct connections dynamic or static. It makes sense only -# with use_router_proxy=False which means to use direct connections for direct -# message types (ignored otherwise). (boolean value) -#use_dynamic_connections = false - -# How many additional connections to a host will be made for failover reasons. -# This option is actual only in dynamic connections mode. (integer value) -#zmq_failover_connections = 2 - -# Minimal port number for random ports range. (port value) -# Minimum value: 0 -# Maximum value: 65535 -# Deprecated group/name - [DEFAULT]/rpc_zmq_min_port -#rpc_zmq_min_port = 49153 - -# Maximal port number for random ports range. (integer value) -# Minimum value: 1 -# Maximum value: 65536 -# Deprecated group/name - [DEFAULT]/rpc_zmq_max_port -#rpc_zmq_max_port = 65536 - -# Number of retries to find free port number before fail with ZMQBindError. -# (integer value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_bind_port_retries -#rpc_zmq_bind_port_retries = 100 - -# Default serialization mechanism for serializing/deserializing -# outgoing/incoming messages (string value) -# Allowed values: json, msgpack -# Deprecated group/name - [DEFAULT]/rpc_zmq_serialization -#rpc_zmq_serialization = json - -# This option configures round-robin mode in zmq socket. True means not keeping -# a queue when server side disconnects. False means to keep queue and messages -# even if server is disconnected, when the server appears we send all -# accumulated messages to it. (boolean value) -#zmq_immediate = true - -# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any -# other negative value) means to skip any overrides and leave it to OS default; -# 0 and 1 (or any other positive value) mean to disable and enable the option -# respectively. (integer value) -#zmq_tcp_keepalive = -1 - -# The duration between two keepalive transmissions in idle condition. The unit -# is platform dependent, for example, seconds in Linux, milliseconds in Windows -# etc. The default value of -1 (or any other negative value and 0) means to skip -# any overrides and leave it to OS default. (integer value) -#zmq_tcp_keepalive_idle = -1 - -# The number of retransmissions to be carried out before declaring that remote -# end is not available. The default value of -1 (or any other negative value and -# 0) means to skip any overrides and leave it to OS default. (integer value) -#zmq_tcp_keepalive_cnt = -1 - -# The duration between two successive keepalive retransmissions, if -# acknowledgement to the previous keepalive transmission is not received. The -# unit is platform dependent, for example, seconds in Linux, milliseconds in -# Windows etc. The default value of -1 (or any other negative value and 0) means -# to skip any overrides and leave it to OS default. (integer value) -#zmq_tcp_keepalive_intvl = -1 - -# Maximum number of (green) threads to work concurrently. (integer value) -#rpc_thread_pool_size = 100 - -# Expiration timeout in seconds of a sent/received message after which it is not -# tracked anymore by a client/server. (integer value) -#rpc_message_ttl = 300 - -# Wait for message acknowledgements from receivers. This mechanism works only -# via proxy without PUB/SUB. (boolean value) -#rpc_use_acks = false - -# Number of seconds to wait for an ack from a cast/call. After each retry -# attempt this timeout is multiplied by some specified multiplier. (integer -# value) -#rpc_ack_timeout_base = 15 - -# Number to multiply base ack timeout by after each retry attempt. (integer -# value) -#rpc_ack_timeout_multiplier = 2 - -# Default number of message sending attempts in case of any problems occurred: -# positive value N means at most N retries, 0 means no retries, None or -1 (or -# any other negative values) mean to retry forever. This option is used only if -# acknowledgments are enabled. (integer value) -#rpc_retry_attempts = 3 - -# List of publisher hosts SubConsumer can subscribe on. This option has higher -# priority then the default publishers list taken from the matchmaker. (list -# value) -#subscribe_on = - - -[oslo_middleware] - -# -# From oslo.middleware.http_proxy_to_wsgi -# - -# Whether the application is behind a proxy or not. This determines if the -# middleware should parse the headers or not. (boolean value) -#enable_proxy_headers_parsing = false - - -[oslo_policy] - -# -# From oslo.policy -# - -# The file that defines policies. (string value) -# Deprecated group/name - [DEFAULT]/policy_file -#policy_file = policy.json - -# Default rule. Enforced when a requested rule is not found. (string value) -# Deprecated group/name - [DEFAULT]/policy_default_rule -#policy_default_rule = default - -# Directories where policy configuration files are stored. They can be relative -# to any directory in the search path defined by the config_dir option, or -# absolute paths. The file defined by policy_file must exist for these -# directories to be searched. Missing or empty directories are ignored. (multi -# valued) -# Deprecated group/name - [DEFAULT]/policy_dirs -#policy_dirs = policy.d - - -[paste_deploy] - -# -# From glance.api -# - -# -# Deployment flavor to use in the server application pipeline. -# -# Provide a string value representing the appropriate deployment -# flavor used in the server application pipleline. This is typically -# the partial name of a pipeline in the paste configuration file with -# the service name removed. -# -# For example, if your paste section name in the paste configuration -# file is [pipeline:glance-api-keystone], set ``flavor`` to -# ``keystone``. -# -# Possible values: -# * String value representing a partial pipeline name. -# -# Related Options: -# * config_file -# -# (string value) -#flavor = keystone - -# -# Name of the paste configuration file. -# -# Provide a string value representing the name of the paste -# configuration file to use for configuring piplelines for -# server application deployments. -# -# NOTES: -# * Provide the name or the path relative to the glance directory -# for the paste configuration file and not the absolute path. -# * The sample paste configuration file shipped with Glance need -# not be edited in most cases as it comes with ready-made -# pipelines for all common deployment flavors. -# -# If no value is specified for this option, the ``paste.ini`` file -# with the prefix of the corresponding Glance service's configuration -# file name will be searched for in the known configuration -# directories. (For example, if this option is missing from or has no -# value set in ``glance-api.conf``, the service will look for a file -# named ``glance-api-paste.ini``.) If the paste configuration file is -# not found, the service will not start. -# -# Possible values: -# * A string value representing the name of the paste configuration -# file. -# -# Related Options: -# * flavor -# -# (string value) -#config_file = glance-api-paste.ini - - -[profiler] - -# -# From glance.api -# - -# -# Enables the profiling for all services on this node. Default value is False -# (fully disable the profiling feature). -# -# Possible values: -# -# * True: Enables the feature -# * False: Disables the feature. The profiling cannot be started via this -# project -# operations. If the profiling is triggered by another project, this project -# part -# will be empty. -# (boolean value) -# Deprecated group/name - [profiler]/profiler_enabled -#enabled = false - -# -# Enables SQL requests profiling in services. Default value is False (SQL -# requests won't be traced). -# -# Possible values: -# -# * True: Enables SQL requests profiling. Each SQL query will be part of the -# trace and can the be analyzed by how much time was spent for that. -# * False: Disables SQL requests profiling. The spent time is only shown on a -# higher level of operations. Single SQL queries cannot be analyzed this -# way. -# (boolean value) -#trace_sqlalchemy = false - -# -# Secret key(s) to use for encrypting context data for performance profiling. -# This string value should have the following format: [,,...], -# where each key is some random string. A user who triggers the profiling via -# the REST API has to set one of these keys in the headers of the REST API call -# to include profiling results of this node for this particular project. -# -# Both "enabled" flag and "hmac_keys" config options should be set to enable -# profiling. Also, to generate correct profiling information across all services -# at least one key needs to be consistent between OpenStack projects. This -# ensures it can be used from client side to generate the trace, containing -# information from all possible resources. (string value) -#hmac_keys = SECRET_KEY - -# -# Connection string for a notifier backend. Default value is messaging:// which -# sets the notifier to oslo_messaging. -# -# Examples of possible values: -# -# * messaging://: use oslo_messaging driver for sending notifications. -# * mongodb://127.0.0.1:27017 : use mongodb driver for sending notifications. -# * elasticsearch://127.0.0.1:9200 : use elasticsearch driver for sending -# notifications. -# (string value) -#connection_string = messaging:// - -# -# Document type for notification indexing in elasticsearch. -# (string value) -#es_doc_type = notification - -# -# This parameter is a time value parameter (for example: es_scroll_time=2m), -# indicating for how long the nodes that participate in the search will maintain -# relevant resources in order to continue and support it. -# (string value) -#es_scroll_time = 2m - -# -# Elasticsearch splits large requests in batches. This parameter defines -# maximum size of each batch (for example: es_scroll_size=10000). -# (integer value) -#es_scroll_size = 10000 - -# -# Redissentinel provides a timeout option on the connections. -# This parameter defines that timeout (for example: socket_timeout=0.1). -# (floating point value) -#socket_timeout = 0.1 - -# -# Redissentinel uses a service name to identify a master redis service. -# This parameter defines the name (for example: -# sentinal_service_name=mymaster). -# (string value) -#sentinel_service_name = mymaster - - -[store_type_location_strategy] - -# -# From glance.api -# - -# -# Preference order of storage backends. -# -# Provide a comma separated list of store names in the order in -# which images should be retrieved from storage backends. -# These store names must be registered with the ``stores`` -# configuration option. -# -# NOTE: The ``store_type_preference`` configuration option is applied -# only if ``store_type`` is chosen as a value for the -# ``location_strategy`` configuration option. An empty list will not -# change the location order. -# -# Possible values: -# * Empty list -# * Comma separated list of registered store names. Legal values are: -# * file -# * http -# * rbd -# * swift -# * sheepdog -# * cinder -# * vmware -# -# Related options: -# * location_strategy -# * stores -# -# (list value) -#store_type_preference = - - -[task] - -# -# From glance.api -# - -# Time in hours for which a task lives after, either succeeding or failing -# (integer value) -# Deprecated group/name - [DEFAULT]/task_time_to_live -#task_time_to_live = 48 - -# -# Task executor to be used to run task scripts. -# -# Provide a string value representing the executor to use for task -# executions. By default, ``TaskFlow`` executor is used. -# -# ``TaskFlow`` helps make task executions easy, consistent, scalable -# and reliable. It also enables creation of lightweight task objects -# and/or functions that are combined together into flows in a -# declarative manner. -# -# Possible values: -# * taskflow -# -# Related Options: -# * None -# -# (string value) -#task_executor = taskflow - -# -# Absolute path to the work directory to use for asynchronous -# task operations. -# -# The directory set here will be used to operate over images - -# normally before they are imported in the destination store. -# -# NOTE: When providing a value for ``work_dir``, please make sure -# that enough space is provided for concurrent tasks to run -# efficiently without running out of space. -# -# A rough estimation can be done by multiplying the number of -# ``max_workers`` with an average image size (e.g 500MB). The image -# size estimation should be done based on the average size in your -# deployment. Note that depending on the tasks running you may need -# to multiply this number by some factor depending on what the task -# does. For example, you may want to double the available size if -# image conversion is enabled. All this being said, remember these -# are just estimations and you should do them based on the worst -# case scenario and be prepared to act in case they were wrong. -# -# Possible values: -# * String value representing the absolute path to the working -# directory -# -# Related Options: -# * None -# -# (string value) -#work_dir = /work_dir - - -[taskflow_executor] - -# -# From glance.api -# - -# -# Set the taskflow engine mode. -# -# Provide a string type value to set the mode in which the taskflow -# engine would schedule tasks to the workers on the hosts. Based on -# this mode, the engine executes tasks either in single or multiple -# threads. The possible values for this configuration option are: -# ``serial`` and ``parallel``. When set to ``serial``, the engine runs -# all the tasks in a single thread which results in serial execution -# of tasks. Setting this to ``parallel`` makes the engine run tasks in -# multiple threads. This results in parallel execution of tasks. -# -# Possible values: -# * serial -# * parallel -# -# Related options: -# * max_workers -# -# (string value) -# Allowed values: serial, parallel -#engine_mode = parallel - -# -# Set the number of engine executable tasks. -# -# Provide an integer value to limit the number of workers that can be -# instantiated on the hosts. In other words, this number defines the -# number of parallel tasks that can be executed at the same time by -# the taskflow engine. This value can be greater than one when the -# engine mode is set to parallel. -# -# Possible values: -# * Integer value greater than or equal to 1 -# -# Related options: -# * engine_mode -# -# (integer value) -# Minimum value: 1 -# Deprecated group/name - [task]/eventlet_executor_pool_size -#max_workers = 10 - -# -# Set the desired image conversion format. -# -# Provide a valid image format to which you want images to be -# converted before they are stored for consumption by Glance. -# Appropriate image format conversions are desirable for specific -# storage backends in order to facilitate efficient handling of -# bandwidth and usage of the storage infrastructure. -# -# By default, ``conversion_format`` is not set and must be set -# explicitly in the configuration file. -# -# The allowed values for this option are ``raw``, ``qcow2`` and -# ``vmdk``. The ``raw`` format is the unstructured disk format and -# should be chosen when RBD or Ceph storage backends are used for -# image storage. ``qcow2`` is supported by the QEMU emulator that -# expands dynamically and supports Copy on Write. The ``vmdk`` is -# another common disk format supported by many common virtual machine -# monitors like VMWare Workstation. -# -# Possible values: -# * qcow2 -# * raw -# * vmdk -# -# Related options: -# * disk_formats -# -# (string value) -# Allowed values: qcow2, raw, vmdk -#conversion_format = raw diff --git a/etc/glance-cache.conf b/etc/glance-cache.conf deleted file mode 100644 index 56a20211..00000000 --- a/etc/glance-cache.conf +++ /dev/null @@ -1,2334 +0,0 @@ -[DEFAULT] - -# -# From glance.cache -# - -# -# Allow users to add additional/custom properties to images. -# -# Glance defines a standard set of properties (in its schema) that -# appear on every image. These properties are also known as -# ``base properties``. In addition to these properties, Glance -# allows users to add custom properties to images. These are known -# as ``additional properties``. -# -# By default, this configuration option is set to ``True`` and users -# are allowed to add additional properties. The number of additional -# properties that can be added to an image can be controlled via -# ``image_property_quota`` configuration option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * image_property_quota -# -# (boolean value) -#allow_additional_image_properties = true - -# -# Maximum number of image members per image. -# -# This limits the maximum of users an image can be shared with. Any negative -# value is interpreted as unlimited. -# -# Related options: -# * None -# -# (integer value) -#image_member_quota = 128 - -# -# Maximum number of properties allowed on an image. -# -# This enforces an upper limit on the number of additional properties an image -# can have. Any negative value is interpreted as unlimited. -# -# NOTE: This won't have any impact if additional properties are disabled. Please -# refer to ``allow_additional_image_properties``. -# -# Related options: -# * ``allow_additional_image_properties`` -# -# (integer value) -#image_property_quota = 128 - -# -# Maximum number of tags allowed on an image. -# -# Any negative value is interpreted as unlimited. -# -# Related options: -# * None -# -# (integer value) -#image_tag_quota = 128 - -# -# Maximum number of locations allowed on an image. -# -# Any negative value is interpreted as unlimited. -# -# Related options: -# * None -# -# (integer value) -#image_location_quota = 10 - -# -# Python module path of data access API. -# -# Specifies the path to the API to use for accessing the data model. -# This option determines how the image catalog data will be accessed. -# -# Possible values: -# * glance.db.sqlalchemy.api -# * glance.db.registry.api -# * glance.db.simple.api -# -# If this option is set to ``glance.db.sqlalchemy.api`` then the image -# catalog data is stored in and read from the database via the -# SQLAlchemy Core and ORM APIs. -# -# Setting this option to ``glance.db.registry.api`` will force all -# database access requests to be routed through the Registry service. -# This avoids data access from the Glance API nodes for an added layer -# of security, scalability and manageability. -# -# NOTE: In v2 OpenStack Images API, the registry service is optional. -# In order to use the Registry API in v2, the option -# ``enable_v2_registry`` must be set to ``True``. -# -# Finally, when this configuration option is set to -# ``glance.db.simple.api``, image catalog data is stored in and read -# from an in-memory data structure. This is primarily used for testing. -# -# Related options: -# * enable_v2_api -# * enable_v2_registry -# -# (string value) -#data_api = glance.db.sqlalchemy.api - -# -# The default number of results to return for a request. -# -# Responses to certain API requests, like list images, may return -# multiple items. The number of results returned can be explicitly -# controlled by specifying the ``limit`` parameter in the API request. -# However, if a ``limit`` parameter is not specified, this -# configuration value will be used as the default number of results to -# be returned for any API request. -# -# NOTES: -# * The value of this configuration option may not be greater than -# the value specified by ``api_limit_max``. -# * Setting this to a very large value may slow down database -# queries and increase response times. Setting this to a -# very low value may result in poor user experience. -# -# Possible values: -# * Any positive integer -# -# Related options: -# * api_limit_max -# -# (integer value) -# Minimum value: 1 -#limit_param_default = 25 - -# -# Maximum number of results that could be returned by a request. -# -# As described in the help text of ``limit_param_default``, some -# requests may return multiple results. The number of results to be -# returned are governed either by the ``limit`` parameter in the -# request or the ``limit_param_default`` configuration option. -# The value in either case, can't be greater than the absolute maximum -# defined by this configuration option. Anything greater than this -# value is trimmed down to the maximum value defined here. -# -# NOTE: Setting this to a very large value may slow down database -# queries and increase response times. Setting this to a -# very low value may result in poor user experience. -# -# Possible values: -# * Any positive integer -# -# Related options: -# * limit_param_default -# -# (integer value) -# Minimum value: 1 -#api_limit_max = 1000 - -# -# Show direct image location when returning an image. -# -# This configuration option indicates whether to show the direct image -# location when returning image details to the user. The direct image -# location is where the image data is stored in backend storage. This -# image location is shown under the image property ``direct_url``. -# -# When multiple image locations exist for an image, the best location -# is displayed based on the location strategy indicated by the -# configuration option ``location_strategy``. -# -# NOTES: -# * Revealing image locations can present a GRAVE SECURITY RISK as -# image locations can sometimes include credentials. Hence, this -# is set to ``False`` by default. Set this to ``True`` with -# EXTREME CAUTION and ONLY IF you know what you are doing! -# * If an operator wishes to avoid showing any image location(s) -# to the user, then both this option and -# ``show_multiple_locations`` MUST be set to ``False``. -# -# Possible values: -# * True -# * False -# -# Related options: -# * show_multiple_locations -# * location_strategy -# -# (boolean value) -#show_image_direct_url = false - -# DEPRECATED: -# Show all image locations when returning an image. -# -# This configuration option indicates whether to show all the image -# locations when returning image details to the user. When multiple -# image locations exist for an image, the locations are ordered based -# on the location strategy indicated by the configuration opt -# ``location_strategy``. The image locations are shown under the -# image property ``locations``. -# -# NOTES: -# * Revealing image locations can present a GRAVE SECURITY RISK as -# image locations can sometimes include credentials. Hence, this -# is set to ``False`` by default. Set this to ``True`` with -# EXTREME CAUTION and ONLY IF you know what you are doing! -# * If an operator wishes to avoid showing any image location(s) -# to the user, then both this option and -# ``show_image_direct_url`` MUST be set to ``False``. -# -# Possible values: -# * True -# * False -# -# Related options: -# * show_image_direct_url -# * location_strategy -# -# (boolean value) -# This option is deprecated for removal since Newton. -# Its value may be silently ignored in the future. -# Reason: This option will be removed in the Pike release or later because the -# same functionality can be achieved with greater granularity by using policies. -# Please see the Newton release notes for more information. -#show_multiple_locations = false - -# -# Maximum size of image a user can upload in bytes. -# -# An image upload greater than the size mentioned here would result -# in an image creation failure. This configuration option defaults to -# 1099511627776 bytes (1 TiB). -# -# NOTES: -# * This value should only be increased after careful -# consideration and must be set less than or equal to -# 8 EiB (9223372036854775808). -# * This value must be set with careful consideration of the -# backend storage capacity. Setting this to a very low value -# may result in a large number of image failures. And, setting -# this to a very large value may result in faster consumption -# of storage. Hence, this must be set according to the nature of -# images created and storage capacity available. -# -# Possible values: -# * Any positive number less than or equal to 9223372036854775808 -# -# (integer value) -# Minimum value: 1 -# Maximum value: 9223372036854775808 -#image_size_cap = 1099511627776 - -# -# Maximum amount of image storage per tenant. -# -# This enforces an upper limit on the cumulative storage consumed by all images -# of a tenant across all stores. This is a per-tenant limit. -# -# The default unit for this configuration option is Bytes. However, storage -# units can be specified using case-sensitive literals ``B``, ``KB``, ``MB``, -# ``GB`` and ``TB`` representing Bytes, KiloBytes, MegaBytes, GigaBytes and -# TeraBytes respectively. Note that there should not be any space between the -# value and unit. Value ``0`` signifies no quota enforcement. Negative values -# are invalid and result in errors. -# -# Possible values: -# * A string that is a valid concatenation of a non-negative integer -# representing the storage value and an optional string literal -# representing storage units as mentioned above. -# -# Related options: -# * None -# -# (string value) -#user_storage_quota = 0 - -# -# Deploy the v1 OpenStack Images API. -# -# When this option is set to ``True``, Glance service will respond to -# requests on registered endpoints conforming to the v1 OpenStack -# Images API. -# -# NOTES: -# * If this option is enabled, then ``enable_v1_registry`` must -# also be set to ``True`` to enable mandatory usage of Registry -# service with v1 API. -# -# * If this option is disabled, then the ``enable_v1_registry`` -# option, which is enabled by default, is also recommended -# to be disabled. -# -# * This option is separate from ``enable_v2_api``, both v1 and v2 -# OpenStack Images API can be deployed independent of each -# other. -# -# * If deploying only the v2 Images API, this option, which is -# enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v1_registry -# * enable_v2_api -# -# (boolean value) -#enable_v1_api = true - -# -# Deploy the v2 OpenStack Images API. -# -# When this option is set to ``True``, Glance service will respond -# to requests on registered endpoints conforming to the v2 OpenStack -# Images API. -# -# NOTES: -# * If this option is disabled, then the ``enable_v2_registry`` -# option, which is enabled by default, is also recommended -# to be disabled. -# -# * This option is separate from ``enable_v1_api``, both v1 and v2 -# OpenStack Images API can be deployed independent of each -# other. -# -# * If deploying only the v1 Images API, this option, which is -# enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v2_registry -# * enable_v1_api -# -# (boolean value) -#enable_v2_api = true - -# -# Deploy the v1 API Registry service. -# -# When this option is set to ``True``, the Registry service -# will be enabled in Glance for v1 API requests. -# -# NOTES: -# * Use of Registry is mandatory in v1 API, so this option must -# be set to ``True`` if the ``enable_v1_api`` option is enabled. -# -# * If deploying only the v2 OpenStack Images API, this option, -# which is enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v1_api -# -# (boolean value) -#enable_v1_registry = true - -# -# Deploy the v2 API Registry service. -# -# When this option is set to ``True``, the Registry service -# will be enabled in Glance for v2 API requests. -# -# NOTES: -# * Use of Registry is optional in v2 API, so this option -# must only be enabled if both ``enable_v2_api`` is set to -# ``True`` and the ``data_api`` option is set to -# ``glance.db.registry.api``. -# -# * If deploying only the v1 OpenStack Images API, this option, -# which is enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v2_api -# * data_api -# -# (boolean value) -#enable_v2_registry = true - -# -# Host address of the pydev server. -# -# Provide a string value representing the hostname or IP of the -# pydev server to use for debugging. The pydev server listens for -# debug connections on this address, facilitating remote debugging -# in Glance. -# -# Possible values: -# * Valid hostname -# * Valid IP address -# -# Related options: -# * None -# -# (string value) -#pydev_worker_debug_host = localhost - -# -# Port number that the pydev server will listen on. -# -# Provide a port number to bind the pydev server to. The pydev -# process accepts debug connections on this port and facilitates -# remote debugging in Glance. -# -# Possible values: -# * A valid port number -# -# Related options: -# * None -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#pydev_worker_debug_port = 5678 - -# -# AES key for encrypting store location metadata. -# -# Provide a string value representing the AES cipher to use for -# encrypting Glance store metadata. -# -# NOTE: The AES key to use must be set to a random string of length -# 16, 24 or 32 bytes. -# -# Possible values: -# * String value representing a valid AES key -# -# Related options: -# * None -# -# (string value) -#metadata_encryption_key = - -# -# Digest algorithm to use for digital signature. -# -# Provide a string value representing the digest algorithm to -# use for generating digital signatures. By default, ``sha256`` -# is used. -# -# To get a list of the available algorithms supported by the version -# of OpenSSL on your platform, run the command: -# ``openssl list-message-digest-algorithms``. -# Examples are 'sha1', 'sha256', and 'sha512'. -# -# NOTE: ``digest_algorithm`` is not related to Glance's image signing -# and verification. It is only used to sign the universally unique -# identifier (UUID) as a part of the certificate file and key file -# validation. -# -# Possible values: -# * An OpenSSL message digest algorithm identifier -# -# Relation options: -# * None -# -# (string value) -#digest_algorithm = sha256 - -# -# The relative path to sqlite file database that will be used for image cache -# management. -# -# This is a relative path to the sqlite file database that tracks the age and -# usage statistics of image cache. The path is relative to image cache base -# directory, specified by the configuration option ``image_cache_dir``. -# -# This is a lightweight database with just one table. -# -# Possible values: -# * A valid relative path to sqlite file database -# -# Related options: -# * ``image_cache_dir`` -# -# (string value) -#image_cache_sqlite_db = cache.db - -# -# The driver to use for image cache management. -# -# This configuration option provides the flexibility to choose between the -# different image-cache drivers available. An image-cache driver is responsible -# for providing the essential functions of image-cache like write images to/read -# images from cache, track age and usage of cached images, provide a list of -# cached images, fetch size of the cache, queue images for caching and clean up -# the cache, etc. -# -# The essential functions of a driver are defined in the base class -# ``glance.image_cache.drivers.base.Driver``. All image-cache drivers (existing -# and prospective) must implement this interface. Currently available drivers -# are ``sqlite`` and ``xattr``. These drivers primarily differ in the way they -# store the information about cached images: -# * The ``sqlite`` driver uses a sqlite database (which sits on every glance -# node locally) to track the usage of cached images. -# * The ``xattr`` driver uses the extended attributes of files to store this -# information. It also requires a filesystem that sets ``atime`` on the -# files -# when accessed. -# -# Possible values: -# * sqlite -# * xattr -# -# Related options: -# * None -# -# (string value) -# Allowed values: sqlite, xattr -#image_cache_driver = sqlite - -# -# The upper limit on cache size, in bytes, after which the cache-pruner cleans -# up the image cache. -# -# NOTE: This is just a threshold for cache-pruner to act upon. It is NOT a -# hard limit beyond which the image cache would never grow. In fact, depending -# on how often the cache-pruner runs and how quickly the cache fills, the image -# cache can far exceed the size specified here very easily. Hence, care must be -# taken to appropriately schedule the cache-pruner and in setting this limit. -# -# Glance caches an image when it is downloaded. Consequently, the size of the -# image cache grows over time as the number of downloads increases. To keep the -# cache size from becoming unmanageable, it is recommended to run the -# cache-pruner as a periodic task. When the cache pruner is kicked off, it -# compares the current size of image cache and triggers a cleanup if the image -# cache grew beyond the size specified here. After the cleanup, the size of -# cache is less than or equal to size specified here. -# -# Possible values: -# * Any non-negative integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#image_cache_max_size = 10737418240 - -# -# The amount of time, in seconds, an incomplete image remains in the cache. -# -# Incomplete images are images for which download is in progress. Please see the -# description of configuration option ``image_cache_dir`` for more detail. -# Sometimes, due to various reasons, it is possible the download may hang and -# the incompletely downloaded image remains in the ``incomplete`` directory. -# This configuration option sets a time limit on how long the incomplete images -# should remain in the ``incomplete`` directory before they are cleaned up. -# Once an incomplete image spends more time than is specified here, it'll be -# removed by cache-cleaner on its next run. -# -# It is recommended to run cache-cleaner as a periodic task on the Glance API -# nodes to keep the incomplete images from occupying disk space. -# -# Possible values: -# * Any non-negative integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#image_cache_stall_time = 86400 - -# -# Base directory for image cache. -# -# This is the location where image data is cached and served out of. All cached -# images are stored directly under this directory. This directory also contains -# three subdirectories, namely, ``incomplete``, ``invalid`` and ``queue``. -# -# The ``incomplete`` subdirectory is the staging area for downloading images. An -# image is first downloaded to this directory. When the image download is -# successful it is moved to the base directory. However, if the download fails, -# the partially downloaded image file is moved to the ``invalid`` subdirectory. -# -# The ``queue``subdirectory is used for queuing images for download. This is -# used primarily by the cache-prefetcher, which can be scheduled as a periodic -# task like cache-pruner and cache-cleaner, to cache images ahead of their -# usage. -# Upon receiving the request to cache an image, Glance touches a file in the -# ``queue`` directory with the image id as the file name. The cache-prefetcher, -# when running, polls for the files in ``queue`` directory and starts -# downloading them in the order they were created. When the download is -# successful, the zero-sized file is deleted from the ``queue`` directory. -# If the download fails, the zero-sized file remains and it'll be retried the -# next time cache-prefetcher runs. -# -# Possible values: -# * A valid path -# -# Related options: -# * ``image_cache_sqlite_db`` -# -# (string value) -#image_cache_dir = - -# -# Address the registry server is hosted on. -# -# Possible values: -# * A valid IP or hostname -# -# Related options: -# * None -# -# (string value) -#registry_host = 0.0.0.0 - -# -# Port the registry server is listening on. -# -# Possible values: -# * A valid port number -# -# Related options: -# * None -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#registry_port = 9191 - -# -# Protocol to use for communication with the registry server. -# -# Provide a string value representing the protocol to use for -# communication with the registry server. By default, this option is -# set to ``http`` and the connection is not secure. -# -# This option can be set to ``https`` to establish a secure connection -# to the registry server. In this case, provide a key to use for the -# SSL connection using the ``registry_client_key_file`` option. Also -# include the CA file and cert file using the options -# ``registry_client_ca_file`` and ``registry_client_cert_file`` -# respectively. -# -# Possible values: -# * http -# * https -# -# Related options: -# * registry_client_key_file -# * registry_client_cert_file -# * registry_client_ca_file -# -# (string value) -# Allowed values: http, https -#registry_client_protocol = http - -# -# Absolute path to the private key file. -# -# Provide a string value representing a valid absolute path to the -# private key file to use for establishing a secure connection to -# the registry server. -# -# NOTE: This option must be set if ``registry_client_protocol`` is -# set to ``https``. Alternatively, the GLANCE_CLIENT_KEY_FILE -# environment variable may be set to a filepath of the key file. -# -# Possible values: -# * String value representing a valid absolute path to the key -# file. -# -# Related options: -# * registry_client_protocol -# -# (string value) -#registry_client_key_file = /etc/ssl/key/key-file.pem - -# -# Absolute path to the certificate file. -# -# Provide a string value representing a valid absolute path to the -# certificate file to use for establishing a secure connection to -# the registry server. -# -# NOTE: This option must be set if ``registry_client_protocol`` is -# set to ``https``. Alternatively, the GLANCE_CLIENT_CERT_FILE -# environment variable may be set to a filepath of the certificate -# file. -# -# Possible values: -# * String value representing a valid absolute path to the -# certificate file. -# -# Related options: -# * registry_client_protocol -# -# (string value) -#registry_client_cert_file = /etc/ssl/certs/file.crt - -# -# Absolute path to the Certificate Authority file. -# -# Provide a string value representing a valid absolute path to the -# certificate authority file to use for establishing a secure -# connection to the registry server. -# -# NOTE: This option must be set if ``registry_client_protocol`` is -# set to ``https``. Alternatively, the GLANCE_CLIENT_CA_FILE -# environment variable may be set to a filepath of the CA file. -# This option is ignored if the ``registry_client_insecure`` option -# is set to ``True``. -# -# Possible values: -# * String value representing a valid absolute path to the CA -# file. -# -# Related options: -# * registry_client_protocol -# * registry_client_insecure -# -# (string value) -#registry_client_ca_file = /etc/ssl/cafile/file.ca - -# -# Set verification of the registry server certificate. -# -# Provide a boolean value to determine whether or not to validate -# SSL connections to the registry server. By default, this option -# is set to ``False`` and the SSL connections are validated. -# -# If set to ``True``, the connection to the registry server is not -# validated via a certifying authority and the -# ``registry_client_ca_file`` option is ignored. This is the -# registry's equivalent of specifying --insecure on the command line -# using glanceclient for the API. -# -# Possible values: -# * True -# * False -# -# Related options: -# * registry_client_protocol -# * registry_client_ca_file -# -# (boolean value) -#registry_client_insecure = false - -# -# Timeout value for registry requests. -# -# Provide an integer value representing the period of time in seconds -# that the API server will wait for a registry request to complete. -# The default value is 600 seconds. -# -# A value of 0 implies that a request will never timeout. -# -# Possible values: -# * Zero -# * Positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#registry_client_timeout = 600 - -# DEPRECATED: Whether to pass through the user token when making requests to the -# registry. To prevent failures with token expiration during big files upload, -# it is recommended to set this parameter to False.If "use_user_token" is not in -# effect, then admin credentials can be specified. (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#use_user_token = true - -# DEPRECATED: The administrators user name. If "use_user_token" is not in -# effect, then admin credentials can be specified. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#admin_user = - -# DEPRECATED: The administrators password. If "use_user_token" is not in effect, -# then admin credentials can be specified. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#admin_password = - -# DEPRECATED: The tenant name of the administrative user. If "use_user_token" is -# not in effect, then admin tenant name can be specified. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#admin_tenant_name = - -# DEPRECATED: The URL to the keystone service. If "use_user_token" is not in -# effect and using keystone auth, then URL of keystone can be specified. (string -# value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#auth_url = - -# DEPRECATED: The strategy to use for authentication. If "use_user_token" is not -# in effect, then auth strategy can be specified. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#auth_strategy = noauth - -# DEPRECATED: The region for the authentication service. If "use_user_token" is -# not in effect and using keystone auth, then region name can be specified. -# (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#auth_region = - -# -# From oslo.log -# - -# If set to true, the logging level will be set to DEBUG instead of the default -# INFO level. (boolean value) -# Note: This option can be changed without restarting. -#debug = false - -# DEPRECATED: If set to false, the logging level will be set to WARNING instead -# of the default INFO level. (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#verbose = true - -# The name of a logging configuration file. This file is appended to any -# existing logging configuration files. For details about logging configuration -# files, see the Python logging module documentation. Note that when logging -# configuration files are used then all logging configuration is set in the -# configuration file and other logging configuration options are ignored (for -# example, logging_context_format_string). (string value) -# Note: This option can be changed without restarting. -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append = - -# Defines the format string for %%(asctime)s in log records. Default: -# %(default)s . This option is ignored if log_config_append is set. (string -# value) -#log_date_format = %Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to send logging output to. If no default is set, -# logging will go to stderr as defined by use_stderr. This option is ignored if -# log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file = - -# (Optional) The base directory used for relative log_file paths. This option -# is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir = - -# Uses logging handler designed to watch file system. When log file is moved or -# removed this handler will open a new log file with specified path -# instantaneously. It makes sense only if log_file option is specified and Linux -# platform is used. This option is ignored if log_config_append is set. (boolean -# value) -#watch_log_file = false - -# Use syslog for logging. Existing syslog format is DEPRECATED and will be -# changed later to honor RFC5424. This option is ignored if log_config_append is -# set. (boolean value) -#use_syslog = false - -# Syslog facility to receive log lines. This option is ignored if -# log_config_append is set. (string value) -#syslog_log_facility = LOG_USER - -# Log output to standard error. This option is ignored if log_config_append is -# set. (boolean value) -#use_stderr = false - -# Format string to use for log messages with context. (string value) -#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages when context is undefined. (string -# value) -#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Additional data to append to log message when logging level for the message is -# DEBUG. (string value) -#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. (string value) -#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s - -# Defines the format string for %(user_identity)s that is used in -# logging_context_format_string. (string value) -#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s - -# List of package logging levels in logger=LEVEL pairs. This option is ignored -# if log_config_append is set. (list value) -#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO - -# Enables or disables publication of error events. (boolean value) -#publish_errors = false - -# The format for an instance that is passed with the log message. (string value) -#instance_format = "[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log message. (string -# value) -#instance_uuid_format = "[instance: %(uuid)s] " - -# Interval, number of seconds, of log rate limiting. (integer value) -#rate_limit_interval = 0 - -# Maximum number of logged messages per rate_limit_interval. (integer value) -#rate_limit_burst = 0 - -# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG or -# empty string. Logs with level greater or equal to rate_limit_except_level are -# not filtered. An empty string means that all levels are filtered. (string -# value) -#rate_limit_except_level = CRITICAL - -# Enables or disables fatal status of deprecations. (boolean value) -#fatal_deprecations = false - - -[glance_store] - -# -# From glance.store -# - -# -# List of enabled Glance stores. -# -# Register the storage backends to use for storing disk images -# as a comma separated list. The default stores enabled for -# storing disk images with Glance are ``file`` and ``http``. -# -# Possible values: -# * A comma separated list that could include: -# * file -# * http -# * swift -# * rbd -# * sheepdog -# * cinder -# * vmware -# -# Related Options: -# * default_store -# -# (list value) -#stores = file,http - -# -# The default scheme to use for storing images. -# -# Provide a string value representing the default scheme to use for -# storing images. If not set, Glance uses ``file`` as the default -# scheme to store images with the ``file`` store. -# -# NOTE: The value given for this configuration option must be a valid -# scheme for a store registered with the ``stores`` configuration -# option. -# -# Possible values: -# * file -# * filesystem -# * http -# * https -# * swift -# * swift+http -# * swift+https -# * swift+config -# * rbd -# * sheepdog -# * cinder -# * vsphere -# -# Related Options: -# * stores -# -# (string value) -# Allowed values: file, filesystem, http, https, swift, swift+http, swift+https, swift+config, rbd, sheepdog, cinder, vsphere -#default_store = file - -# -# Minimum interval in seconds to execute updating dynamic storage -# capabilities based on current backend status. -# -# Provide an integer value representing time in seconds to set the -# minimum interval before an update of dynamic storage capabilities -# for a storage backend can be attempted. Setting -# ``store_capabilities_update_min_interval`` does not mean updates -# occur periodically based on the set interval. Rather, the update -# is performed at the elapse of this interval set, if an operation -# of the store is triggered. -# -# By default, this option is set to zero and is disabled. Provide an -# integer value greater than zero to enable this option. -# -# NOTE: For more information on store capabilities and their updates, -# please visit: https://specs.openstack.org/openstack/glance-specs/specs/kilo -# /store-capabilities.html -# -# For more information on setting up a particular store in your -# deployment and help with the usage of this feature, please contact -# the storage driver maintainers listed here: -# http://docs.openstack.org/developer/glance_store/drivers/index.html -# -# Possible values: -# * Zero -# * Positive integer -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 0 -#store_capabilities_update_min_interval = 0 - -# -# Information to match when looking for cinder in the service catalog. -# -# When the ``cinder_endpoint_template`` is not set and any of -# ``cinder_store_auth_address``, ``cinder_store_user_name``, -# ``cinder_store_project_name``, ``cinder_store_password`` is not set, -# cinder store uses this information to lookup cinder endpoint from the service -# catalog in the current context. ``cinder_os_region_name``, if set, is taken -# into consideration to fetch the appropriate endpoint. -# -# The service catalog can be listed by the ``openstack catalog list`` command. -# -# Possible values: -# * A string of of the following form: -# ``::`` -# At least ``service_type`` and ``interface`` should be specified. -# ``service_name`` can be omitted. -# -# Related options: -# * cinder_os_region_name -# * cinder_endpoint_template -# * cinder_store_auth_address -# * cinder_store_user_name -# * cinder_store_project_name -# * cinder_store_password -# -# (string value) -#cinder_catalog_info = volumev2::publicURL - -# -# Override service catalog lookup with template for cinder endpoint. -# -# When this option is set, this value is used to generate cinder endpoint, -# instead of looking up from the service catalog. -# This value is ignored if ``cinder_store_auth_address``, -# ``cinder_store_user_name``, ``cinder_store_project_name``, and -# ``cinder_store_password`` are specified. -# -# If this configuration option is set, ``cinder_catalog_info`` will be ignored. -# -# Possible values: -# * URL template string for cinder endpoint, where ``%%(tenant)s`` is -# replaced with the current tenant (project) name. -# For example: ``http://cinder.openstack.example.org/v2/%%(tenant)s`` -# -# Related options: -# * cinder_store_auth_address -# * cinder_store_user_name -# * cinder_store_project_name -# * cinder_store_password -# * cinder_catalog_info -# -# (string value) -#cinder_endpoint_template = - -# -# Region name to lookup cinder service from the service catalog. -# -# This is used only when ``cinder_catalog_info`` is used for determining the -# endpoint. If set, the lookup for cinder endpoint by this node is filtered to -# the specified region. It is useful when multiple regions are listed in the -# catalog. If this is not set, the endpoint is looked up from every region. -# -# Possible values: -# * A string that is a valid region name. -# -# Related options: -# * cinder_catalog_info -# -# (string value) -# Deprecated group/name - [glance_store]/os_region_name -#cinder_os_region_name = - -# -# Location of a CA certificates file used for cinder client requests. -# -# The specified CA certificates file, if set, is used to verify cinder -# connections via HTTPS endpoint. If the endpoint is HTTP, this value is -# ignored. -# ``cinder_api_insecure`` must be set to ``True`` to enable the verification. -# -# Possible values: -# * Path to a ca certificates file -# -# Related options: -# * cinder_api_insecure -# -# (string value) -#cinder_ca_certificates_file = - -# -# Number of cinderclient retries on failed http calls. -# -# When a call failed by any errors, cinderclient will retry the call up to the -# specified times after sleeping a few seconds. -# -# Possible values: -# * A positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#cinder_http_retries = 3 - -# -# Time period, in seconds, to wait for a cinder volume transition to -# complete. -# -# When the cinder volume is created, deleted, or attached to the glance node to -# read/write the volume data, the volume's state is changed. For example, the -# newly created volume status changes from ``creating`` to ``available`` after -# the creation process is completed. This specifies the maximum time to wait for -# the status change. If a timeout occurs while waiting, or the status is changed -# to an unexpected value (e.g. `error``), the image creation fails. -# -# Possible values: -# * A positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#cinder_state_transition_timeout = 300 - -# -# Allow to perform insecure SSL requests to cinder. -# -# If this option is set to True, HTTPS endpoint connection is verified using the -# CA certificates file specified by ``cinder_ca_certificates_file`` option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * cinder_ca_certificates_file -# -# (boolean value) -#cinder_api_insecure = false - -# -# The address where the cinder authentication service is listening. -# -# When all of ``cinder_store_auth_address``, ``cinder_store_user_name``, -# ``cinder_store_project_name``, and ``cinder_store_password`` options are -# specified, the specified values are always used for the authentication. -# This is useful to hide the image volumes from users by storing them in a -# project/tenant specific to the image service. It also enables users to share -# the image volume among other projects under the control of glance's ACL. -# -# If either of these options are not set, the cinder endpoint is looked up -# from the service catalog, and current context's user and project are used. -# -# Possible values: -# * A valid authentication service address, for example: -# ``http://openstack.example.org/identity/v2.0`` -# -# Related options: -# * cinder_store_user_name -# * cinder_store_password -# * cinder_store_project_name -# -# (string value) -#cinder_store_auth_address = - -# -# User name to authenticate against cinder. -# -# This must be used with all the following related options. If any of these are -# not specified, the user of the current context is used. -# -# Possible values: -# * A valid user name -# -# Related options: -# * cinder_store_auth_address -# * cinder_store_password -# * cinder_store_project_name -# -# (string value) -#cinder_store_user_name = - -# -# Password for the user authenticating against cinder. -# -# This must be used with all the following related options. If any of these are -# not specified, the user of the current context is used. -# -# Possible values: -# * A valid password for the user specified by ``cinder_store_user_name`` -# -# Related options: -# * cinder_store_auth_address -# * cinder_store_user_name -# * cinder_store_project_name -# -# (string value) -#cinder_store_password = - -# -# Project name where the image volume is stored in cinder. -# -# If this configuration option is not set, the project in current context is -# used. -# -# This must be used with all the following related options. If any of these are -# not specified, the project of the current context is used. -# -# Possible values: -# * A valid project name -# -# Related options: -# * ``cinder_store_auth_address`` -# * ``cinder_store_user_name`` -# * ``cinder_store_password`` -# -# (string value) -#cinder_store_project_name = - -# -# Path to the rootwrap configuration file to use for running commands as root. -# -# The cinder store requires root privileges to operate the image volumes (for -# connecting to iSCSI/FC volumes and reading/writing the volume data, etc.). -# The configuration file should allow the required commands by cinder store and -# os-brick library. -# -# Possible values: -# * Path to the rootwrap config file -# -# Related options: -# * None -# -# (string value) -#rootwrap_config = /etc/glance/rootwrap.conf - -# -# Volume type that will be used for volume creation in cinder. -# -# Some cinder backends can have several volume types to optimize storage usage. -# Adding this option allows an operator to choose a specific volume type -# in cinder that can be optimized for images. -# -# If this is not set, then the default volume type specified in the cinder -# configuration will be used for volume creation. -# -# Possible values: -# * A valid volume type from cinder -# -# Related options: -# * None -# -# (string value) -#cinder_volume_type = - -# -# Directory to which the filesystem backend store writes images. -# -# Upon start up, Glance creates the directory if it doesn't already -# exist and verifies write access to the user under which -# ``glance-api`` runs. If the write access isn't available, a -# ``BadStoreConfiguration`` exception is raised and the filesystem -# store may not be available for adding new images. -# -# NOTE: This directory is used only when filesystem store is used as a -# storage backend. Either ``filesystem_store_datadir`` or -# ``filesystem_store_datadirs`` option must be specified in -# ``glance-api.conf``. If both options are specified, a -# ``BadStoreConfiguration`` will be raised and the filesystem store -# may not be available for adding new images. -# -# Possible values: -# * A valid path to a directory -# -# Related options: -# * ``filesystem_store_datadirs`` -# * ``filesystem_store_file_perm`` -# -# (string value) -#filesystem_store_datadir = /var/lib/glance/images - -# -# List of directories and their priorities to which the filesystem -# backend store writes images. -# -# The filesystem store can be configured to store images in multiple -# directories as opposed to using a single directory specified by the -# ``filesystem_store_datadir`` configuration option. When using -# multiple directories, each directory can be given an optional -# priority to specify the preference order in which they should -# be used. Priority is an integer that is concatenated to the -# directory path with a colon where a higher value indicates higher -# priority. When two directories have the same priority, the directory -# with most free space is used. When no priority is specified, it -# defaults to zero. -# -# More information on configuring filesystem store with multiple store -# directories can be found at -# http://docs.openstack.org/developer/glance/configuring.html -# -# NOTE: This directory is used only when filesystem store is used as a -# storage backend. Either ``filesystem_store_datadir`` or -# ``filesystem_store_datadirs`` option must be specified in -# ``glance-api.conf``. If both options are specified, a -# ``BadStoreConfiguration`` will be raised and the filesystem store -# may not be available for adding new images. -# -# Possible values: -# * List of strings of the following form: -# * ``:`` -# -# Related options: -# * ``filesystem_store_datadir`` -# * ``filesystem_store_file_perm`` -# -# (multi valued) -#filesystem_store_datadirs = - -# -# Filesystem store metadata file. -# -# The path to a file which contains the metadata to be returned with -# any location associated with the filesystem store. The file must -# contain a valid JSON object. The object should contain the keys -# ``id`` and ``mountpoint``. The value for both keys should be a -# string. -# -# Possible values: -# * A valid path to the store metadata file -# -# Related options: -# * None -# -# (string value) -#filesystem_store_metadata_file = - -# -# File access permissions for the image files. -# -# Set the intended file access permissions for image data. This provides -# a way to enable other services, e.g. Nova, to consume images directly -# from the filesystem store. The users running the services that are -# intended to be given access to could be made a member of the group -# that owns the files created. Assigning a value less then or equal to -# zero for this configuration option signifies that no changes be made -# to the default permissions. This value will be decoded as an octal -# digit. -# -# For more information, please refer the documentation at -# http://docs.openstack.org/developer/glance/configuring.html -# -# Possible values: -# * A valid file access permission -# * Zero -# * Any negative integer -# -# Related options: -# * None -# -# (integer value) -#filesystem_store_file_perm = 0 - -# -# Path to the CA bundle file. -# -# This configuration option enables the operator to use a custom -# Certificate Authority file to verify the remote server certificate. If -# this option is set, the ``https_insecure`` option will be ignored and -# the CA file specified will be used to authenticate the server -# certificate and establish a secure connection to the server. -# -# Possible values: -# * A valid path to a CA file -# -# Related options: -# * https_insecure -# -# (string value) -#https_ca_certificates_file = - -# -# Set verification of the remote server certificate. -# -# This configuration option takes in a boolean value to determine -# whether or not to verify the remote server certificate. If set to -# True, the remote server certificate is not verified. If the option is -# set to False, then the default CA truststore is used for verification. -# -# This option is ignored if ``https_ca_certificates_file`` is set. -# The remote server certificate will then be verified using the file -# specified using the ``https_ca_certificates_file`` option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * https_ca_certificates_file -# -# (boolean value) -#https_insecure = true - -# -# The http/https proxy information to be used to connect to the remote -# server. -# -# This configuration option specifies the http/https proxy information -# that should be used to connect to the remote server. The proxy -# information should be a key value pair of the scheme and proxy, for -# example, http:10.0.0.1:3128. You can also specify proxies for multiple -# schemes by separating the key value pairs with a comma, for example, -# http:10.0.0.1:3128, https:10.0.0.1:1080. -# -# Possible values: -# * A comma separated list of scheme:proxy pairs as described above -# -# Related options: -# * None -# -# (dict value) -#http_proxy_information = - -# -# Size, in megabytes, to chunk RADOS images into. -# -# Provide an integer value representing the size in megabytes to chunk -# Glance images into. The default chunk size is 8 megabytes. For optimal -# performance, the value should be a power of two. -# -# When Ceph's RBD object storage system is used as the storage backend -# for storing Glance images, the images are chunked into objects of the -# size set using this option. These chunked objects are then stored -# across the distributed block data store to use for Glance. -# -# Possible Values: -# * Any positive integer value -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#rbd_store_chunk_size = 8 - -# -# RADOS pool in which images are stored. -# -# When RBD is used as the storage backend for storing Glance images, the -# images are stored by means of logical grouping of the objects (chunks -# of images) into a ``pool``. Each pool is defined with the number of -# placement groups it can contain. The default pool that is used is -# 'images'. -# -# More information on the RBD storage backend can be found here: -# http://ceph.com/planet/how-data-is-stored-in-ceph-cluster/ -# -# Possible Values: -# * A valid pool name -# -# Related options: -# * None -# -# (string value) -#rbd_store_pool = images - -# -# RADOS user to authenticate as. -# -# This configuration option takes in the RADOS user to authenticate as. -# This is only needed when RADOS authentication is enabled and is -# applicable only if the user is using Cephx authentication. If the -# value for this option is not set by the user or is set to None, a -# default value will be chosen, which will be based on the client. -# section in rbd_store_ceph_conf. -# -# Possible Values: -# * A valid RADOS user -# -# Related options: -# * rbd_store_ceph_conf -# -# (string value) -#rbd_store_user = - -# -# Ceph configuration file path. -# -# This configuration option takes in the path to the Ceph configuration -# file to be used. If the value for this option is not set by the user -# or is set to None, librados will locate the default configuration file -# which is located at /etc/ceph/ceph.conf. If using Cephx -# authentication, this file should include a reference to the right -# keyring in a client. section -# -# Possible Values: -# * A valid path to a configuration file -# -# Related options: -# * rbd_store_user -# -# (string value) -#rbd_store_ceph_conf = /etc/ceph/ceph.conf - -# -# Timeout value for connecting to Ceph cluster. -# -# This configuration option takes in the timeout value in seconds used -# when connecting to the Ceph cluster i.e. it sets the time to wait for -# glance-api before closing the connection. This prevents glance-api -# hangups during the connection to RBD. If the value for this option -# is set to less than or equal to 0, no timeout is set and the default -# librados value is used. -# -# Possible Values: -# * Any integer value -# -# Related options: -# * None -# -# (integer value) -#rados_connect_timeout = 0 - -# -# Chunk size for images to be stored in Sheepdog data store. -# -# Provide an integer value representing the size in mebibyte -# (1048576 bytes) to chunk Glance images into. The default -# chunk size is 64 mebibytes. -# -# When using Sheepdog distributed storage system, the images are -# chunked into objects of this size and then stored across the -# distributed data store to use for Glance. -# -# Chunk sizes, if a power of two, help avoid fragmentation and -# enable improved performance. -# -# Possible values: -# * Positive integer value representing size in mebibytes. -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 1 -#sheepdog_store_chunk_size = 64 - -# -# Port number on which the sheep daemon will listen. -# -# Provide an integer value representing a valid port number on -# which you want the Sheepdog daemon to listen on. The default -# port is 7000. -# -# The Sheepdog daemon, also called 'sheep', manages the storage -# in the distributed cluster by writing objects across the storage -# network. It identifies and acts on the messages it receives on -# the port number set using ``sheepdog_store_port`` option to store -# chunks of Glance images. -# -# Possible values: -# * A valid port number (0 to 65535) -# -# Related Options: -# * sheepdog_store_address -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#sheepdog_store_port = 7000 - -# -# Address to bind the Sheepdog daemon to. -# -# Provide a string value representing the address to bind the -# Sheepdog daemon to. The default address set for the 'sheep' -# is 127.0.0.1. -# -# The Sheepdog daemon, also called 'sheep', manages the storage -# in the distributed cluster by writing objects across the storage -# network. It identifies and acts on the messages directed to the -# address set using ``sheepdog_store_address`` option to store -# chunks of Glance images. -# -# Possible values: -# * A valid IPv4 address -# * A valid IPv6 address -# * A valid hostname -# -# Related Options: -# * sheepdog_store_port -# -# (string value) -#sheepdog_store_address = 127.0.0.1 - -# -# Set verification of the server certificate. -# -# This boolean determines whether or not to verify the server -# certificate. If this option is set to True, swiftclient won't check -# for a valid SSL certificate when authenticating. If the option is set -# to False, then the default CA truststore is used for verification. -# -# Possible values: -# * True -# * False -# -# Related options: -# * swift_store_cacert -# -# (boolean value) -#swift_store_auth_insecure = false - -# -# Path to the CA bundle file. -# -# This configuration option enables the operator to specify the path to -# a custom Certificate Authority file for SSL verification when -# connecting to Swift. -# -# Possible values: -# * A valid path to a CA file -# -# Related options: -# * swift_store_auth_insecure -# -# (string value) -#swift_store_cacert = /etc/ssl/certs/ca-certificates.crt - -# -# The region of Swift endpoint to use by Glance. -# -# Provide a string value representing a Swift region where Glance -# can connect to for image storage. By default, there is no region -# set. -# -# When Glance uses Swift as the storage backend to store images -# for a specific tenant that has multiple endpoints, setting of a -# Swift region with ``swift_store_region`` allows Glance to connect -# to Swift in the specified region as opposed to a single region -# connectivity. -# -# This option can be configured for both single-tenant and -# multi-tenant storage. -# -# NOTE: Setting the region with ``swift_store_region`` is -# tenant-specific and is necessary ``only if`` the tenant has -# multiple endpoints across different regions. -# -# Possible values: -# * A string value representing a valid Swift region. -# -# Related Options: -# * None -# -# (string value) -#swift_store_region = RegionTwo - -# -# The URL endpoint to use for Swift backend storage. -# -# Provide a string value representing the URL endpoint to use for -# storing Glance images in Swift store. By default, an endpoint -# is not set and the storage URL returned by ``auth`` is used. -# Setting an endpoint with ``swift_store_endpoint`` overrides the -# storage URL and is used for Glance image storage. -# -# NOTE: The URL should include the path up to, but excluding the -# container. The location of an object is obtained by appending -# the container and object to the configured URL. -# -# Possible values: -# * String value representing a valid URL path up to a Swift container -# -# Related Options: -# * None -# -# (string value) -#swift_store_endpoint = https://swift.openstack.example.org/v1/path_not_including_container_name - -# -# Endpoint Type of Swift service. -# -# This string value indicates the endpoint type to use to fetch the -# Swift endpoint. The endpoint type determines the actions the user will -# be allowed to perform, for instance, reading and writing to the Store. -# This setting is only used if swift_store_auth_version is greater than -# 1. -# -# Possible values: -# * publicURL -# * adminURL -# * internalURL -# -# Related options: -# * swift_store_endpoint -# -# (string value) -# Allowed values: publicURL, adminURL, internalURL -#swift_store_endpoint_type = publicURL - -# -# Type of Swift service to use. -# -# Provide a string value representing the service type to use for -# storing images while using Swift backend storage. The default -# service type is set to ``object-store``. -# -# NOTE: If ``swift_store_auth_version`` is set to 2, the value for -# this configuration option needs to be ``object-store``. If using -# a higher version of Keystone or a different auth scheme, this -# option may be modified. -# -# Possible values: -# * A string representing a valid service type for Swift storage. -# -# Related Options: -# * None -# -# (string value) -#swift_store_service_type = object-store - -# -# Name of single container to store images/name prefix for multiple containers -# -# When a single container is being used to store images, this configuration -# option indicates the container within the Glance account to be used for -# storing all images. When multiple containers are used to store images, this -# will be the name prefix for all containers. Usage of single/multiple -# containers can be controlled using the configuration option -# ``swift_store_multiple_containers_seed``. -# -# When using multiple containers, the containers will be named after the value -# set for this configuration option with the first N chars of the image UUID -# as the suffix delimited by an underscore (where N is specified by -# ``swift_store_multiple_containers_seed``). -# -# Example: if the seed is set to 3 and swift_store_container = ``glance``, then -# an image with UUID ``fdae39a1-bac5-4238-aba4-69bcc726e848`` would be placed in -# the container ``glance_fda``. All dashes in the UUID are included when -# creating the container name but do not count toward the character limit, so -# when N=10 the container name would be ``glance_fdae39a1-ba.`` -# -# Possible values: -# * If using single container, this configuration option can be any string -# that is a valid swift container name in Glance's Swift account -# * If using multiple containers, this configuration option can be any -# string as long as it satisfies the container naming rules enforced by -# Swift. The value of ``swift_store_multiple_containers_seed`` should be -# taken into account as well. -# -# Related options: -# * ``swift_store_multiple_containers_seed`` -# * ``swift_store_multi_tenant`` -# * ``swift_store_create_container_on_put`` -# -# (string value) -#swift_store_container = glance - -# -# The size threshold, in MB, after which Glance will start segmenting image -# data. -# -# Swift has an upper limit on the size of a single uploaded object. By default, -# this is 5GB. To upload objects bigger than this limit, objects are segmented -# into multiple smaller objects that are tied together with a manifest file. -# For more detail, refer to -# http://docs.openstack.org/developer/swift/overview_large_objects.html -# -# This configuration option specifies the size threshold over which the Swift -# driver will start segmenting image data into multiple smaller files. -# Currently, the Swift driver only supports creating Dynamic Large Objects. -# -# NOTE: This should be set by taking into account the large object limit -# enforced by the Swift cluster in consideration. -# -# Possible values: -# * A positive integer that is less than or equal to the large object limit -# enforced by the Swift cluster in consideration. -# -# Related options: -# * ``swift_store_large_object_chunk_size`` -# -# (integer value) -# Minimum value: 1 -#swift_store_large_object_size = 5120 - -# -# The maximum size, in MB, of the segments when image data is segmented. -# -# When image data is segmented to upload images that are larger than the limit -# enforced by the Swift cluster, image data is broken into segments that are no -# bigger than the size specified by this configuration option. -# Refer to ``swift_store_large_object_size`` for more detail. -# -# For example: if ``swift_store_large_object_size`` is 5GB and -# ``swift_store_large_object_chunk_size`` is 1GB, an image of size 6.2GB will be -# segmented into 7 segments where the first six segments will be 1GB in size and -# the seventh segment will be 0.2GB. -# -# Possible values: -# * A positive integer that is less than or equal to the large object limit -# enforced by Swift cluster in consideration. -# -# Related options: -# * ``swift_store_large_object_size`` -# -# (integer value) -# Minimum value: 1 -#swift_store_large_object_chunk_size = 200 - -# -# Create container, if it doesn't already exist, when uploading image. -# -# At the time of uploading an image, if the corresponding container doesn't -# exist, it will be created provided this configuration option is set to True. -# By default, it won't be created. This behavior is applicable for both single -# and multiple containers mode. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#swift_store_create_container_on_put = false - -# -# Store images in tenant's Swift account. -# -# This enables multi-tenant storage mode which causes Glance images to be stored -# in tenant specific Swift accounts. If this is disabled, Glance stores all -# images in its own account. More details multi-tenant store can be found at -# https://wiki.openstack.org/wiki/GlanceSwiftTenantSpecificStorage -# -# NOTE: If using multi-tenant swift store, please make sure -# that you do not set a swift configuration file with the -# 'swift_store_config_file' option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * swift_store_config_file -# -# (boolean value) -#swift_store_multi_tenant = false - -# -# Seed indicating the number of containers to use for storing images. -# -# When using a single-tenant store, images can be stored in one or more than one -# containers. When set to 0, all images will be stored in one single container. -# When set to an integer value between 1 and 32, multiple containers will be -# used to store images. This configuration option will determine how many -# containers are created. The total number of containers that will be used is -# equal to 16^N, so if this config option is set to 2, then 16^2=256 containers -# will be used to store images. -# -# Please refer to ``swift_store_container`` for more detail on the naming -# convention. More detail about using multiple containers can be found at -# https://specs.openstack.org/openstack/glance-specs/specs/kilo/swift-store- -# multiple-containers.html -# -# NOTE: This is used only when swift_store_multi_tenant is disabled. -# -# Possible values: -# * A non-negative integer less than or equal to 32 -# -# Related options: -# * ``swift_store_container`` -# * ``swift_store_multi_tenant`` -# * ``swift_store_create_container_on_put`` -# -# (integer value) -# Minimum value: 0 -# Maximum value: 32 -#swift_store_multiple_containers_seed = 0 - -# -# List of tenants that will be granted admin access. -# -# This is a list of tenants that will be granted read/write access on -# all Swift containers created by Glance in multi-tenant mode. The -# default value is an empty list. -# -# Possible values: -# * A comma separated list of strings representing UUIDs of Keystone -# projects/tenants -# -# Related options: -# * None -# -# (list value) -#swift_store_admin_tenants = - -# -# SSL layer compression for HTTPS Swift requests. -# -# Provide a boolean value to determine whether or not to compress -# HTTPS Swift requests for images at the SSL layer. By default, -# compression is enabled. -# -# When using Swift as the backend store for Glance image storage, -# SSL layer compression of HTTPS Swift requests can be set using -# this option. If set to False, SSL layer compression of HTTPS -# Swift requests is disabled. Disabling this option may improve -# performance for images which are already in a compressed format, -# for example, qcow2. -# -# Possible values: -# * True -# * False -# -# Related Options: -# * None -# -# (boolean value) -#swift_store_ssl_compression = true - -# -# The number of times a Swift download will be retried before the -# request fails. -# -# Provide an integer value representing the number of times an image -# download must be retried before erroring out. The default value is -# zero (no retry on a failed image download). When set to a positive -# integer value, ``swift_store_retry_get_count`` ensures that the -# download is attempted this many more times upon a download failure -# before sending an error message. -# -# Possible values: -# * Zero -# * Positive integer value -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 0 -#swift_store_retry_get_count = 0 - -# -# Time in seconds defining the size of the window in which a new -# token may be requested before the current token is due to expire. -# -# Typically, the Swift storage driver fetches a new token upon the -# expiration of the current token to ensure continued access to -# Swift. However, some Swift transactions (like uploading image -# segments) may not recover well if the token expires on the fly. -# -# Hence, by fetching a new token before the current token expiration, -# we make sure that the token does not expire or is close to expiry -# before a transaction is attempted. By default, the Swift storage -# driver requests for a new token 60 seconds or less before the -# current token expiration. -# -# Possible values: -# * Zero -# * Positive integer value -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 0 -#swift_store_expire_soon_interval = 60 - -# -# Use trusts for multi-tenant Swift store. -# -# This option instructs the Swift store to create a trust for each -# add/get request when the multi-tenant store is in use. Using trusts -# allows the Swift store to avoid problems that can be caused by an -# authentication token expiring during the upload or download of data. -# -# By default, ``swift_store_use_trusts`` is set to ``True``(use of -# trusts is enabled). If set to ``False``, a user token is used for -# the Swift connection instead, eliminating the overhead of trust -# creation. -# -# NOTE: This option is considered only when -# ``swift_store_multi_tenant`` is set to ``True`` -# -# Possible values: -# * True -# * False -# -# Related options: -# * swift_store_multi_tenant -# -# (boolean value) -#swift_store_use_trusts = true - -# -# Reference to default Swift account/backing store parameters. -# -# Provide a string value representing a reference to the default set -# of parameters required for using swift account/backing store for -# image storage. The default reference value for this configuration -# option is 'ref1'. This configuration option dereferences the -# parameters and facilitates image storage in Swift storage backend -# every time a new image is added. -# -# Possible values: -# * A valid string value -# -# Related options: -# * None -# -# (string value) -#default_swift_reference = ref1 - -# DEPRECATED: Version of the authentication service to use. Valid versions are 2 -# and 3 for keystone and 1 (deprecated) for swauth and rackspace. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'auth_version' in the Swift back-end configuration file is -# used instead. -#swift_store_auth_version = 2 - -# DEPRECATED: The address where the Swift authentication service is listening. -# (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'auth_address' in the Swift back-end configuration file is -# used instead. -#swift_store_auth_address = - -# DEPRECATED: The user to authenticate against the Swift authentication service. -# (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'user' in the Swift back-end configuration file is set instead. -#swift_store_user = - -# DEPRECATED: Auth key for the user authenticating against the Swift -# authentication service. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'key' in the Swift back-end configuration file is used -# to set the authentication key instead. -#swift_store_key = - -# -# Absolute path to the file containing the swift account(s) -# configurations. -# -# Include a string value representing the path to a configuration -# file that has references for each of the configured Swift -# account(s)/backing stores. By default, no file path is specified -# and customized Swift referencing is disabled. Configuring this -# option is highly recommended while using Swift storage backend for -# image storage as it avoids storage of credentials in the database. -# -# NOTE: Please do not configure this option if you have set -# ``swift_store_multi_tenant`` to ``True``. -# -# Possible values: -# * String value representing an absolute path on the glance-api -# node -# -# Related options: -# * swift_store_multi_tenant -# -# (string value) -#swift_store_config_file = - -# -# Address of the ESX/ESXi or vCenter Server target system. -# -# This configuration option sets the address of the ESX/ESXi or vCenter -# Server target system. This option is required when using the VMware -# storage backend. The address can contain an IP address (127.0.0.1) or -# a DNS name (www.my-domain.com). -# -# Possible Values: -# * A valid IPv4 or IPv6 address -# * A valid DNS name -# -# Related options: -# * vmware_server_username -# * vmware_server_password -# -# (string value) -#vmware_server_host = 127.0.0.1 - -# -# Server username. -# -# This configuration option takes the username for authenticating with -# the VMware ESX/ESXi or vCenter Server. This option is required when -# using the VMware storage backend. -# -# Possible Values: -# * Any string that is the username for a user with appropriate -# privileges -# -# Related options: -# * vmware_server_host -# * vmware_server_password -# -# (string value) -#vmware_server_username = root - -# -# Server password. -# -# This configuration option takes the password for authenticating with -# the VMware ESX/ESXi or vCenter Server. This option is required when -# using the VMware storage backend. -# -# Possible Values: -# * Any string that is a password corresponding to the username -# specified using the "vmware_server_username" option -# -# Related options: -# * vmware_server_host -# * vmware_server_username -# -# (string value) -#vmware_server_password = vmware - -# -# The number of VMware API retries. -# -# This configuration option specifies the number of times the VMware -# ESX/VC server API must be retried upon connection related issues or -# server API call overload. It is not possible to specify 'retry -# forever'. -# -# Possible Values: -# * Any positive integer value -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#vmware_api_retry_count = 10 - -# -# Interval in seconds used for polling remote tasks invoked on VMware -# ESX/VC server. -# -# This configuration option takes in the sleep time in seconds for polling an -# on-going async task as part of the VMWare ESX/VC server API call. -# -# Possible Values: -# * Any positive integer value -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#vmware_task_poll_interval = 5 - -# -# The directory where the glance images will be stored in the datastore. -# -# This configuration option specifies the path to the directory where the -# glance images will be stored in the VMware datastore. If this option -# is not set, the default directory where the glance images are stored -# is openstack_glance. -# -# Possible Values: -# * Any string that is a valid path to a directory -# -# Related options: -# * None -# -# (string value) -#vmware_store_image_dir = /openstack_glance - -# -# Set verification of the ESX/vCenter server certificate. -# -# This configuration option takes a boolean value to determine -# whether or not to verify the ESX/vCenter server certificate. If this -# option is set to True, the ESX/vCenter server certificate is not -# verified. If this option is set to False, then the default CA -# truststore is used for verification. -# -# This option is ignored if the "vmware_ca_file" option is set. In that -# case, the ESX/vCenter server certificate will then be verified using -# the file specified using the "vmware_ca_file" option . -# -# Possible Values: -# * True -# * False -# -# Related options: -# * vmware_ca_file -# -# (boolean value) -# Deprecated group/name - [glance_store]/vmware_api_insecure -#vmware_insecure = false - -# -# Absolute path to the CA bundle file. -# -# This configuration option enables the operator to use a custom -# Cerificate Authority File to verify the ESX/vCenter certificate. -# -# If this option is set, the "vmware_insecure" option will be ignored -# and the CA file specified will be used to authenticate the ESX/vCenter -# server certificate and establish a secure connection to the server. -# -# Possible Values: -# * Any string that is a valid absolute path to a CA file -# -# Related options: -# * vmware_insecure -# -# (string value) -#vmware_ca_file = /etc/ssl/certs/ca-certificates.crt - -# -# The datastores where the image can be stored. -# -# This configuration option specifies the datastores where the image can -# be stored in the VMWare store backend. This option may be specified -# multiple times for specifying multiple datastores. The datastore name -# should be specified after its datacenter path, separated by ":". An -# optional weight may be given after the datastore name, separated again -# by ":" to specify the priority. Thus, the required format becomes -# ::. -# -# When adding an image, the datastore with highest weight will be -# selected, unless there is not enough free space available in cases -# where the image size is already known. If no weight is given, it is -# assumed to be zero and the directory will be considered for selection -# last. If multiple datastores have the same weight, then the one with -# the most free space available is selected. -# -# Possible Values: -# * Any string of the format: -# :: -# -# Related options: -# * None -# -# (multi valued) -#vmware_datastores = - - -[oslo_policy] - -# -# From oslo.policy -# - -# The file that defines policies. (string value) -# Deprecated group/name - [DEFAULT]/policy_file -#policy_file = policy.json - -# Default rule. Enforced when a requested rule is not found. (string value) -# Deprecated group/name - [DEFAULT]/policy_default_rule -#policy_default_rule = default - -# Directories where policy configuration files are stored. They can be relative -# to any directory in the search path defined by the config_dir option, or -# absolute paths. The file defined by policy_file must exist for these -# directories to be searched. Missing or empty directories are ignored. (multi -# valued) -# Deprecated group/name - [DEFAULT]/policy_dirs -#policy_dirs = policy.d diff --git a/etc/glance-manage.conf b/etc/glance-manage.conf deleted file mode 100644 index 45def89c..00000000 --- a/etc/glance-manage.conf +++ /dev/null @@ -1,225 +0,0 @@ -[DEFAULT] - -# -# From oslo.log -# - -# If set to true, the logging level will be set to DEBUG instead of the default -# INFO level. (boolean value) -# Note: This option can be changed without restarting. -#debug = false - -# DEPRECATED: If set to false, the logging level will be set to WARNING instead -# of the default INFO level. (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#verbose = true - -# The name of a logging configuration file. This file is appended to any -# existing logging configuration files. For details about logging configuration -# files, see the Python logging module documentation. Note that when logging -# configuration files are used then all logging configuration is set in the -# configuration file and other logging configuration options are ignored (for -# example, logging_context_format_string). (string value) -# Note: This option can be changed without restarting. -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append = - -# Defines the format string for %%(asctime)s in log records. Default: -# %(default)s . This option is ignored if log_config_append is set. (string -# value) -#log_date_format = %Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to send logging output to. If no default is set, -# logging will go to stderr as defined by use_stderr. This option is ignored if -# log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file = - -# (Optional) The base directory used for relative log_file paths. This option -# is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir = - -# Uses logging handler designed to watch file system. When log file is moved or -# removed this handler will open a new log file with specified path -# instantaneously. It makes sense only if log_file option is specified and Linux -# platform is used. This option is ignored if log_config_append is set. (boolean -# value) -#watch_log_file = false - -# Use syslog for logging. Existing syslog format is DEPRECATED and will be -# changed later to honor RFC5424. This option is ignored if log_config_append is -# set. (boolean value) -#use_syslog = false - -# Syslog facility to receive log lines. This option is ignored if -# log_config_append is set. (string value) -#syslog_log_facility = LOG_USER - -# Log output to standard error. This option is ignored if log_config_append is -# set. (boolean value) -#use_stderr = false - -# Format string to use for log messages with context. (string value) -#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages when context is undefined. (string -# value) -#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Additional data to append to log message when logging level for the message is -# DEBUG. (string value) -#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. (string value) -#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s - -# Defines the format string for %(user_identity)s that is used in -# logging_context_format_string. (string value) -#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s - -# List of package logging levels in logger=LEVEL pairs. This option is ignored -# if log_config_append is set. (list value) -#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO - -# Enables or disables publication of error events. (boolean value) -#publish_errors = false - -# The format for an instance that is passed with the log message. (string value) -#instance_format = "[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log message. (string -# value) -#instance_uuid_format = "[instance: %(uuid)s] " - -# Interval, number of seconds, of log rate limiting. (integer value) -#rate_limit_interval = 0 - -# Maximum number of logged messages per rate_limit_interval. (integer value) -#rate_limit_burst = 0 - -# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG or -# empty string. Logs with level greater or equal to rate_limit_except_level are -# not filtered. An empty string means that all levels are filtered. (string -# value) -#rate_limit_except_level = CRITICAL - -# Enables or disables fatal status of deprecations. (boolean value) -#fatal_deprecations = false - - -[database] - -# -# From oslo.db -# - -# DEPRECATED: The file name to use with SQLite. (string value) -# Deprecated group/name - [DEFAULT]/sqlite_db -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Should use config option connection or slave_connection to connect the -# database. -#sqlite_db = oslo.sqlite - -# If True, SQLite uses synchronous mode. (boolean value) -# Deprecated group/name - [DEFAULT]/sqlite_synchronous -#sqlite_synchronous = true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend = sqlalchemy - -# The SQLAlchemy connection string to use to connect to the database. (string -# value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = - -# The SQLAlchemy connection string to use to connect to the slave database. -# (string value) -#slave_connection = - -# The SQL mode to be used for MySQL sessions. This option, including the -# default, overrides any server-set SQL mode. To use whatever SQL mode is set by -# the server configuration, set this to no value. Example: mysql_sql_mode= -# (string value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle SQL connections are reaped. (integer value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool. (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool. Setting a value of 0 -# indicates no limit. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = 5 - -# Maximum number of database connection retries during startup. Set to -1 to -# specify an infinite retry count. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a SQL connection. (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with SQLAlchemy. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = 50 - -# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer -# value) -# Minimum value: 0 -# Maximum value: 100 -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add Python stack traces to SQL as comment strings. (boolean value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = false - -# If set, use this value for pool_timeout with SQLAlchemy. (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on connection lost. (boolean -# value) -#use_db_reconnect = false - -# Seconds between retries of a database transaction. (integer value) -#db_retry_interval = 1 - -# If True, increases the interval between retries of a database operation up to -# db_max_retry_interval. (boolean value) -#db_inc_retry_interval = true - -# If db_inc_retry_interval is set, the maximum seconds between retries of a -# database operation. (integer value) -#db_max_retry_interval = 10 - -# Maximum retries in case of connection error or deadlock error before error is -# raised. Set to -1 to specify an infinite retry count. (integer value) -#db_max_retries = 20 - -# -# From oslo.db.concurrency -# - -# Enable the experimental use of thread pooling for all DB API calls (boolean -# value) -# Deprecated group/name - [DEFAULT]/dbapi_use_tpool -#use_tpool = false diff --git a/etc/glance-registry-paste.ini b/etc/glance-registry-paste.ini deleted file mode 100644 index 492dbc6f..00000000 --- a/etc/glance-registry-paste.ini +++ /dev/null @@ -1,35 +0,0 @@ -# Use this pipeline for no auth - DEFAULT -[pipeline:glance-registry] -pipeline = healthcheck osprofiler unauthenticated-context registryapp - -# Use this pipeline for keystone auth -[pipeline:glance-registry-keystone] -pipeline = healthcheck osprofiler authtoken context registryapp - -# Use this pipeline for authZ only. This means that the registry will treat a -# user as authenticated without making requests to keystone to reauthenticate -# the user. -[pipeline:glance-registry-trusted-auth] -pipeline = healthcheck osprofiler context registryapp - -[app:registryapp] -paste.app_factory = glance.registry.api:API.factory - -[filter:healthcheck] -paste.filter_factory = oslo_middleware:Healthcheck.factory -backends = disable_by_file -disable_by_file_path = /etc/glance/healthcheck_disable - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory - -[filter:osprofiler] -paste.filter_factory = osprofiler.web:WsgiMiddleware.factory -hmac_keys = SECRET_KEY #DEPRECATED -enabled = yes #DEPRECATED diff --git a/etc/glance-registry.conf b/etc/glance-registry.conf deleted file mode 100644 index 1357d02b..00000000 --- a/etc/glance-registry.conf +++ /dev/null @@ -1,2279 +0,0 @@ -[DEFAULT] - -# -# From glance.registry -# - -# -# Set the image owner to tenant or the authenticated user. -# -# Assign a boolean value to determine the owner of an image. When set to -# True, the owner of the image is the tenant. When set to False, the -# owner of the image will be the authenticated user issuing the request. -# Setting it to False makes the image private to the associated user and -# sharing with other users within the same tenant (or "project") -# requires explicit image sharing via image membership. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#owner_is_tenant = true - -# -# Role used to identify an authenticated user as administrator. -# -# Provide a string value representing a Keystone role to identify an -# administrative user. Users with this role will be granted -# administrative privileges. The default value for this option is -# 'admin'. -# -# Possible values: -# * A string value which is a valid Keystone role -# -# Related options: -# * None -# -# (string value) -#admin_role = admin - -# -# Allow limited access to unauthenticated users. -# -# Assign a boolean to determine API access for unathenticated -# users. When set to False, the API cannot be accessed by -# unauthenticated users. When set to True, unauthenticated users can -# access the API with read-only privileges. This however only applies -# when using ContextMiddleware. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#allow_anonymous_access = false - -# -# Limit the request ID length. -# -# Provide an integer value to limit the length of the request ID to -# the specified length. The default value is 64. Users can change this -# to any ineteger value between 0 and 16384 however keeping in mind that -# a larger value may flood the logs. -# -# Possible values: -# * Integer value between 0 and 16384 -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#max_request_id_length = 64 - -# -# Allow users to add additional/custom properties to images. -# -# Glance defines a standard set of properties (in its schema) that -# appear on every image. These properties are also known as -# ``base properties``. In addition to these properties, Glance -# allows users to add custom properties to images. These are known -# as ``additional properties``. -# -# By default, this configuration option is set to ``True`` and users -# are allowed to add additional properties. The number of additional -# properties that can be added to an image can be controlled via -# ``image_property_quota`` configuration option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * image_property_quota -# -# (boolean value) -#allow_additional_image_properties = true - -# -# Maximum number of image members per image. -# -# This limits the maximum of users an image can be shared with. Any negative -# value is interpreted as unlimited. -# -# Related options: -# * None -# -# (integer value) -#image_member_quota = 128 - -# -# Maximum number of properties allowed on an image. -# -# This enforces an upper limit on the number of additional properties an image -# can have. Any negative value is interpreted as unlimited. -# -# NOTE: This won't have any impact if additional properties are disabled. Please -# refer to ``allow_additional_image_properties``. -# -# Related options: -# * ``allow_additional_image_properties`` -# -# (integer value) -#image_property_quota = 128 - -# -# Maximum number of tags allowed on an image. -# -# Any negative value is interpreted as unlimited. -# -# Related options: -# * None -# -# (integer value) -#image_tag_quota = 128 - -# -# Maximum number of locations allowed on an image. -# -# Any negative value is interpreted as unlimited. -# -# Related options: -# * None -# -# (integer value) -#image_location_quota = 10 - -# -# Python module path of data access API. -# -# Specifies the path to the API to use for accessing the data model. -# This option determines how the image catalog data will be accessed. -# -# Possible values: -# * glance.db.sqlalchemy.api -# * glance.db.registry.api -# * glance.db.simple.api -# -# If this option is set to ``glance.db.sqlalchemy.api`` then the image -# catalog data is stored in and read from the database via the -# SQLAlchemy Core and ORM APIs. -# -# Setting this option to ``glance.db.registry.api`` will force all -# database access requests to be routed through the Registry service. -# This avoids data access from the Glance API nodes for an added layer -# of security, scalability and manageability. -# -# NOTE: In v2 OpenStack Images API, the registry service is optional. -# In order to use the Registry API in v2, the option -# ``enable_v2_registry`` must be set to ``True``. -# -# Finally, when this configuration option is set to -# ``glance.db.simple.api``, image catalog data is stored in and read -# from an in-memory data structure. This is primarily used for testing. -# -# Related options: -# * enable_v2_api -# * enable_v2_registry -# -# (string value) -#data_api = glance.db.sqlalchemy.api - -# -# The default number of results to return for a request. -# -# Responses to certain API requests, like list images, may return -# multiple items. The number of results returned can be explicitly -# controlled by specifying the ``limit`` parameter in the API request. -# However, if a ``limit`` parameter is not specified, this -# configuration value will be used as the default number of results to -# be returned for any API request. -# -# NOTES: -# * The value of this configuration option may not be greater than -# the value specified by ``api_limit_max``. -# * Setting this to a very large value may slow down database -# queries and increase response times. Setting this to a -# very low value may result in poor user experience. -# -# Possible values: -# * Any positive integer -# -# Related options: -# * api_limit_max -# -# (integer value) -# Minimum value: 1 -#limit_param_default = 25 - -# -# Maximum number of results that could be returned by a request. -# -# As described in the help text of ``limit_param_default``, some -# requests may return multiple results. The number of results to be -# returned are governed either by the ``limit`` parameter in the -# request or the ``limit_param_default`` configuration option. -# The value in either case, can't be greater than the absolute maximum -# defined by this configuration option. Anything greater than this -# value is trimmed down to the maximum value defined here. -# -# NOTE: Setting this to a very large value may slow down database -# queries and increase response times. Setting this to a -# very low value may result in poor user experience. -# -# Possible values: -# * Any positive integer -# -# Related options: -# * limit_param_default -# -# (integer value) -# Minimum value: 1 -#api_limit_max = 1000 - -# -# Show direct image location when returning an image. -# -# This configuration option indicates whether to show the direct image -# location when returning image details to the user. The direct image -# location is where the image data is stored in backend storage. This -# image location is shown under the image property ``direct_url``. -# -# When multiple image locations exist for an image, the best location -# is displayed based on the location strategy indicated by the -# configuration option ``location_strategy``. -# -# NOTES: -# * Revealing image locations can present a GRAVE SECURITY RISK as -# image locations can sometimes include credentials. Hence, this -# is set to ``False`` by default. Set this to ``True`` with -# EXTREME CAUTION and ONLY IF you know what you are doing! -# * If an operator wishes to avoid showing any image location(s) -# to the user, then both this option and -# ``show_multiple_locations`` MUST be set to ``False``. -# -# Possible values: -# * True -# * False -# -# Related options: -# * show_multiple_locations -# * location_strategy -# -# (boolean value) -#show_image_direct_url = false - -# DEPRECATED: -# Show all image locations when returning an image. -# -# This configuration option indicates whether to show all the image -# locations when returning image details to the user. When multiple -# image locations exist for an image, the locations are ordered based -# on the location strategy indicated by the configuration opt -# ``location_strategy``. The image locations are shown under the -# image property ``locations``. -# -# NOTES: -# * Revealing image locations can present a GRAVE SECURITY RISK as -# image locations can sometimes include credentials. Hence, this -# is set to ``False`` by default. Set this to ``True`` with -# EXTREME CAUTION and ONLY IF you know what you are doing! -# * If an operator wishes to avoid showing any image location(s) -# to the user, then both this option and -# ``show_image_direct_url`` MUST be set to ``False``. -# -# Possible values: -# * True -# * False -# -# Related options: -# * show_image_direct_url -# * location_strategy -# -# (boolean value) -# This option is deprecated for removal since Newton. -# Its value may be silently ignored in the future. -# Reason: This option will be removed in the Pike release or later because the -# same functionality can be achieved with greater granularity by using policies. -# Please see the Newton release notes for more information. -#show_multiple_locations = false - -# -# Maximum size of image a user can upload in bytes. -# -# An image upload greater than the size mentioned here would result -# in an image creation failure. This configuration option defaults to -# 1099511627776 bytes (1 TiB). -# -# NOTES: -# * This value should only be increased after careful -# consideration and must be set less than or equal to -# 8 EiB (9223372036854775808). -# * This value must be set with careful consideration of the -# backend storage capacity. Setting this to a very low value -# may result in a large number of image failures. And, setting -# this to a very large value may result in faster consumption -# of storage. Hence, this must be set according to the nature of -# images created and storage capacity available. -# -# Possible values: -# * Any positive number less than or equal to 9223372036854775808 -# -# (integer value) -# Minimum value: 1 -# Maximum value: 9223372036854775808 -#image_size_cap = 1099511627776 - -# -# Maximum amount of image storage per tenant. -# -# This enforces an upper limit on the cumulative storage consumed by all images -# of a tenant across all stores. This is a per-tenant limit. -# -# The default unit for this configuration option is Bytes. However, storage -# units can be specified using case-sensitive literals ``B``, ``KB``, ``MB``, -# ``GB`` and ``TB`` representing Bytes, KiloBytes, MegaBytes, GigaBytes and -# TeraBytes respectively. Note that there should not be any space between the -# value and unit. Value ``0`` signifies no quota enforcement. Negative values -# are invalid and result in errors. -# -# Possible values: -# * A string that is a valid concatenation of a non-negative integer -# representing the storage value and an optional string literal -# representing storage units as mentioned above. -# -# Related options: -# * None -# -# (string value) -#user_storage_quota = 0 - -# -# Deploy the v1 OpenStack Images API. -# -# When this option is set to ``True``, Glance service will respond to -# requests on registered endpoints conforming to the v1 OpenStack -# Images API. -# -# NOTES: -# * If this option is enabled, then ``enable_v1_registry`` must -# also be set to ``True`` to enable mandatory usage of Registry -# service with v1 API. -# -# * If this option is disabled, then the ``enable_v1_registry`` -# option, which is enabled by default, is also recommended -# to be disabled. -# -# * This option is separate from ``enable_v2_api``, both v1 and v2 -# OpenStack Images API can be deployed independent of each -# other. -# -# * If deploying only the v2 Images API, this option, which is -# enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v1_registry -# * enable_v2_api -# -# (boolean value) -#enable_v1_api = true - -# -# Deploy the v2 OpenStack Images API. -# -# When this option is set to ``True``, Glance service will respond -# to requests on registered endpoints conforming to the v2 OpenStack -# Images API. -# -# NOTES: -# * If this option is disabled, then the ``enable_v2_registry`` -# option, which is enabled by default, is also recommended -# to be disabled. -# -# * This option is separate from ``enable_v1_api``, both v1 and v2 -# OpenStack Images API can be deployed independent of each -# other. -# -# * If deploying only the v1 Images API, this option, which is -# enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v2_registry -# * enable_v1_api -# -# (boolean value) -#enable_v2_api = true - -# -# Deploy the v1 API Registry service. -# -# When this option is set to ``True``, the Registry service -# will be enabled in Glance for v1 API requests. -# -# NOTES: -# * Use of Registry is mandatory in v1 API, so this option must -# be set to ``True`` if the ``enable_v1_api`` option is enabled. -# -# * If deploying only the v2 OpenStack Images API, this option, -# which is enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v1_api -# -# (boolean value) -#enable_v1_registry = true - -# -# Deploy the v2 API Registry service. -# -# When this option is set to ``True``, the Registry service -# will be enabled in Glance for v2 API requests. -# -# NOTES: -# * Use of Registry is optional in v2 API, so this option -# must only be enabled if both ``enable_v2_api`` is set to -# ``True`` and the ``data_api`` option is set to -# ``glance.db.registry.api``. -# -# * If deploying only the v1 OpenStack Images API, this option, -# which is enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v2_api -# * data_api -# -# (boolean value) -#enable_v2_registry = true - -# -# Host address of the pydev server. -# -# Provide a string value representing the hostname or IP of the -# pydev server to use for debugging. The pydev server listens for -# debug connections on this address, facilitating remote debugging -# in Glance. -# -# Possible values: -# * Valid hostname -# * Valid IP address -# -# Related options: -# * None -# -# (string value) -#pydev_worker_debug_host = localhost - -# -# Port number that the pydev server will listen on. -# -# Provide a port number to bind the pydev server to. The pydev -# process accepts debug connections on this port and facilitates -# remote debugging in Glance. -# -# Possible values: -# * A valid port number -# -# Related options: -# * None -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#pydev_worker_debug_port = 5678 - -# -# AES key for encrypting store location metadata. -# -# Provide a string value representing the AES cipher to use for -# encrypting Glance store metadata. -# -# NOTE: The AES key to use must be set to a random string of length -# 16, 24 or 32 bytes. -# -# Possible values: -# * String value representing a valid AES key -# -# Related options: -# * None -# -# (string value) -#metadata_encryption_key = - -# -# Digest algorithm to use for digital signature. -# -# Provide a string value representing the digest algorithm to -# use for generating digital signatures. By default, ``sha256`` -# is used. -# -# To get a list of the available algorithms supported by the version -# of OpenSSL on your platform, run the command: -# ``openssl list-message-digest-algorithms``. -# Examples are 'sha1', 'sha256', and 'sha512'. -# -# NOTE: ``digest_algorithm`` is not related to Glance's image signing -# and verification. It is only used to sign the universally unique -# identifier (UUID) as a part of the certificate file and key file -# validation. -# -# Possible values: -# * An OpenSSL message digest algorithm identifier -# -# Relation options: -# * None -# -# (string value) -#digest_algorithm = sha256 - -# -# IP address to bind the glance servers to. -# -# Provide an IP address to bind the glance server to. The default -# value is ``0.0.0.0``. -# -# Edit this option to enable the server to listen on one particular -# IP address on the network card. This facilitates selection of a -# particular network interface for the server. -# -# Possible values: -# * A valid IPv4 address -# * A valid IPv6 address -# -# Related options: -# * None -# -# (string value) -#bind_host = 0.0.0.0 - -# -# Port number on which the server will listen. -# -# Provide a valid port number to bind the server's socket to. This -# port is then set to identify processes and forward network messages -# that arrive at the server. The default bind_port value for the API -# server is 9292 and for the registry server is 9191. -# -# Possible values: -# * A valid port number (0 to 65535) -# -# Related options: -# * None -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#bind_port = - -# -# Set the number of incoming connection requests. -# -# Provide a positive integer value to limit the number of requests in -# the backlog queue. The default queue size is 4096. -# -# An incoming connection to a TCP listener socket is queued before a -# connection can be established with the server. Setting the backlog -# for a TCP socket ensures a limited queue size for incoming traffic. -# -# Possible values: -# * Positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#backlog = 4096 - -# -# Set the wait time before a connection recheck. -# -# Provide a positive integer value representing time in seconds which -# is set as the idle wait time before a TCP keep alive packet can be -# sent to the host. The default value is 600 seconds. -# -# Setting ``tcp_keepidle`` helps verify at regular intervals that a -# connection is intact and prevents frequent TCP connection -# reestablishment. -# -# Possible values: -# * Positive integer value representing time in seconds -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#tcp_keepidle = 600 - -# -# Absolute path to the CA file. -# -# Provide a string value representing a valid absolute path to -# the Certificate Authority file to use for client authentication. -# -# A CA file typically contains necessary trusted certificates to -# use for the client authentication. This is essential to ensure -# that a secure connection is established to the server via the -# internet. -# -# Possible values: -# * Valid absolute path to the CA file -# -# Related options: -# * None -# -# (string value) -#ca_file = /etc/ssl/cafile - -# -# Absolute path to the certificate file. -# -# Provide a string value representing a valid absolute path to the -# certificate file which is required to start the API service -# securely. -# -# A certificate file typically is a public key container and includes -# the server's public key, server name, server information and the -# signature which was a result of the verification process using the -# CA certificate. This is required for a secure connection -# establishment. -# -# Possible values: -# * Valid absolute path to the certificate file -# -# Related options: -# * None -# -# (string value) -#cert_file = /etc/ssl/certs - -# -# Absolute path to a private key file. -# -# Provide a string value representing a valid absolute path to a -# private key file which is required to establish the client-server -# connection. -# -# Possible values: -# * Absolute path to the private key file -# -# Related options: -# * None -# -# (string value) -#key_file = /etc/ssl/key/key-file.pem - -# DEPRECATED: The HTTP header used to determine the scheme for the original -# request, even if it was removed by an SSL terminating proxy. Typical value is -# "HTTP_X_FORWARDED_PROTO". (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Use the http_proxy_to_wsgi middleware instead. -#secure_proxy_ssl_header = - -# -# Number of Glance worker processes to start. -# -# Provide a non-negative integer value to set the number of child -# process workers to service requests. By default, the number of CPUs -# available is set as the value for ``workers``. -# -# Each worker process is made to listen on the port set in the -# configuration file and contains a greenthread pool of size 1000. -# -# NOTE: Setting the number of workers to zero, triggers the creation -# of a single API process with a greenthread pool of size 1000. -# -# Possible values: -# * 0 -# * Positive integer value (typically equal to the number of CPUs) -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#workers = - -# -# Maximum line size of message headers. -# -# Provide an integer value representing a length to limit the size of -# message headers. The default value is 16384. -# -# NOTE: ``max_header_line`` may need to be increased when using large -# tokens (typically those generated by the Keystone v3 API with big -# service catalogs). However, it is to be kept in mind that larger -# values for ``max_header_line`` would flood the logs. -# -# Setting ``max_header_line`` to 0 sets no limit for the line size of -# message headers. -# -# Possible values: -# * 0 -# * Positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#max_header_line = 16384 - -# -# Set keep alive option for HTTP over TCP. -# -# Provide a boolean value to determine sending of keep alive packets. -# If set to ``False``, the server returns the header -# "Connection: close". If set to ``True``, the server returns a -# "Connection: Keep-Alive" in its responses. This enables retention of -# the same TCP connection for HTTP conversations instead of opening a -# new one with each new request. -# -# This option must be set to ``False`` if the client socket connection -# needs to be closed explicitly after the response is received and -# read successfully by the client. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#http_keepalive = true - -# -# Timeout for client connections' socket operations. -# -# Provide a valid integer value representing time in seconds to set -# the period of wait before an incoming connection can be closed. The -# default value is 900 seconds. -# -# The value zero implies wait forever. -# -# Possible values: -# * Zero -# * Positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#client_socket_timeout = 900 - -# -# From oslo.log -# - -# If set to true, the logging level will be set to DEBUG instead of the default -# INFO level. (boolean value) -# Note: This option can be changed without restarting. -#debug = false - -# DEPRECATED: If set to false, the logging level will be set to WARNING instead -# of the default INFO level. (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#verbose = true - -# The name of a logging configuration file. This file is appended to any -# existing logging configuration files. For details about logging configuration -# files, see the Python logging module documentation. Note that when logging -# configuration files are used then all logging configuration is set in the -# configuration file and other logging configuration options are ignored (for -# example, logging_context_format_string). (string value) -# Note: This option can be changed without restarting. -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append = - -# Defines the format string for %%(asctime)s in log records. Default: -# %(default)s . This option is ignored if log_config_append is set. (string -# value) -#log_date_format = %Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to send logging output to. If no default is set, -# logging will go to stderr as defined by use_stderr. This option is ignored if -# log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file = - -# (Optional) The base directory used for relative log_file paths. This option -# is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir = - -# Uses logging handler designed to watch file system. When log file is moved or -# removed this handler will open a new log file with specified path -# instantaneously. It makes sense only if log_file option is specified and Linux -# platform is used. This option is ignored if log_config_append is set. (boolean -# value) -#watch_log_file = false - -# Use syslog for logging. Existing syslog format is DEPRECATED and will be -# changed later to honor RFC5424. This option is ignored if log_config_append is -# set. (boolean value) -#use_syslog = false - -# Syslog facility to receive log lines. This option is ignored if -# log_config_append is set. (string value) -#syslog_log_facility = LOG_USER - -# Log output to standard error. This option is ignored if log_config_append is -# set. (boolean value) -#use_stderr = false - -# Format string to use for log messages with context. (string value) -#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages when context is undefined. (string -# value) -#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Additional data to append to log message when logging level for the message is -# DEBUG. (string value) -#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. (string value) -#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s - -# Defines the format string for %(user_identity)s that is used in -# logging_context_format_string. (string value) -#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s - -# List of package logging levels in logger=LEVEL pairs. This option is ignored -# if log_config_append is set. (list value) -#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO - -# Enables or disables publication of error events. (boolean value) -#publish_errors = false - -# The format for an instance that is passed with the log message. (string value) -#instance_format = "[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log message. (string -# value) -#instance_uuid_format = "[instance: %(uuid)s] " - -# Interval, number of seconds, of log rate limiting. (integer value) -#rate_limit_interval = 0 - -# Maximum number of logged messages per rate_limit_interval. (integer value) -#rate_limit_burst = 0 - -# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG or -# empty string. Logs with level greater or equal to rate_limit_except_level are -# not filtered. An empty string means that all levels are filtered. (string -# value) -#rate_limit_except_level = CRITICAL - -# Enables or disables fatal status of deprecations. (boolean value) -#fatal_deprecations = false - -# -# From oslo.messaging -# - -# Size of RPC connection pool. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size -#rpc_conn_pool_size = 30 - -# The pool size limit for connections expiration policy (integer value) -#conn_pool_min_size = 2 - -# The time-to-live in sec of idle connections in the pool (integer value) -#conn_pool_ttl = 1200 - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. (string value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_bind_address -#rpc_zmq_bind_address = * - -# MatchMaker driver. (string value) -# Allowed values: redis, sentinel, dummy -# Deprecated group/name - [DEFAULT]/rpc_zmq_matchmaker -#rpc_zmq_matchmaker = redis - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_contexts -#rpc_zmq_contexts = 1 - -# Maximum number of ingress messages to locally buffer per topic. Default is -# unlimited. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_topic_backlog -#rpc_zmq_topic_backlog = - -# Directory for holding IPC sockets. (string value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_ipc_dir -#rpc_zmq_ipc_dir = /var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match -# "host" option, if running Nova. (string value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_host -#rpc_zmq_host = localhost - -# Number of seconds to wait before all pending messages will be sent after -# closing a socket. The default value of -1 specifies an infinite linger period. -# The value of 0 specifies no linger period. Pending messages shall be discarded -# immediately when the socket is closed. Positive values specify an upper bound -# for the linger period. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_cast_timeout -#zmq_linger = -1 - -# The default number of seconds that poll should wait. Poll raises timeout -# exception when timeout expired. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_poll_timeout -#rpc_poll_timeout = 1 - -# Expiration timeout in seconds of a name service record about existing target ( -# < 0 means no timeout). (integer value) -# Deprecated group/name - [DEFAULT]/zmq_target_expire -#zmq_target_expire = 300 - -# Update period in seconds of a name service record about existing target. -# (integer value) -# Deprecated group/name - [DEFAULT]/zmq_target_update -#zmq_target_update = 180 - -# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean -# value) -# Deprecated group/name - [DEFAULT]/use_pub_sub -#use_pub_sub = false - -# Use ROUTER remote proxy. (boolean value) -# Deprecated group/name - [DEFAULT]/use_router_proxy -#use_router_proxy = false - -# This option makes direct connections dynamic or static. It makes sense only -# with use_router_proxy=False which means to use direct connections for direct -# message types (ignored otherwise). (boolean value) -#use_dynamic_connections = false - -# How many additional connections to a host will be made for failover reasons. -# This option is actual only in dynamic connections mode. (integer value) -#zmq_failover_connections = 2 - -# Minimal port number for random ports range. (port value) -# Minimum value: 0 -# Maximum value: 65535 -# Deprecated group/name - [DEFAULT]/rpc_zmq_min_port -#rpc_zmq_min_port = 49153 - -# Maximal port number for random ports range. (integer value) -# Minimum value: 1 -# Maximum value: 65536 -# Deprecated group/name - [DEFAULT]/rpc_zmq_max_port -#rpc_zmq_max_port = 65536 - -# Number of retries to find free port number before fail with ZMQBindError. -# (integer value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_bind_port_retries -#rpc_zmq_bind_port_retries = 100 - -# Default serialization mechanism for serializing/deserializing -# outgoing/incoming messages (string value) -# Allowed values: json, msgpack -# Deprecated group/name - [DEFAULT]/rpc_zmq_serialization -#rpc_zmq_serialization = json - -# This option configures round-robin mode in zmq socket. True means not keeping -# a queue when server side disconnects. False means to keep queue and messages -# even if server is disconnected, when the server appears we send all -# accumulated messages to it. (boolean value) -#zmq_immediate = true - -# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any -# other negative value) means to skip any overrides and leave it to OS default; -# 0 and 1 (or any other positive value) mean to disable and enable the option -# respectively. (integer value) -#zmq_tcp_keepalive = -1 - -# The duration between two keepalive transmissions in idle condition. The unit -# is platform dependent, for example, seconds in Linux, milliseconds in Windows -# etc. The default value of -1 (or any other negative value and 0) means to skip -# any overrides and leave it to OS default. (integer value) -#zmq_tcp_keepalive_idle = -1 - -# The number of retransmissions to be carried out before declaring that remote -# end is not available. The default value of -1 (or any other negative value and -# 0) means to skip any overrides and leave it to OS default. (integer value) -#zmq_tcp_keepalive_cnt = -1 - -# The duration between two successive keepalive retransmissions, if -# acknowledgement to the previous keepalive transmission is not received. The -# unit is platform dependent, for example, seconds in Linux, milliseconds in -# Windows etc. The default value of -1 (or any other negative value and 0) means -# to skip any overrides and leave it to OS default. (integer value) -#zmq_tcp_keepalive_intvl = -1 - -# Maximum number of (green) threads to work concurrently. (integer value) -#rpc_thread_pool_size = 100 - -# Expiration timeout in seconds of a sent/received message after which it is not -# tracked anymore by a client/server. (integer value) -#rpc_message_ttl = 300 - -# Wait for message acknowledgements from receivers. This mechanism works only -# via proxy without PUB/SUB. (boolean value) -#rpc_use_acks = false - -# Number of seconds to wait for an ack from a cast/call. After each retry -# attempt this timeout is multiplied by some specified multiplier. (integer -# value) -#rpc_ack_timeout_base = 15 - -# Number to multiply base ack timeout by after each retry attempt. (integer -# value) -#rpc_ack_timeout_multiplier = 2 - -# Default number of message sending attempts in case of any problems occurred: -# positive value N means at most N retries, 0 means no retries, None or -1 (or -# any other negative values) mean to retry forever. This option is used only if -# acknowledgments are enabled. (integer value) -#rpc_retry_attempts = 3 - -# List of publisher hosts SubConsumer can subscribe on. This option has higher -# priority then the default publishers list taken from the matchmaker. (list -# value) -#subscribe_on = - -# Size of executor thread pool. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size -#executor_thread_pool_size = 64 - -# Seconds to wait for a response from a call. (integer value) -#rpc_response_timeout = 60 - -# A URL representing the messaging driver to use and its full configuration. -# (string value) -#transport_url = - -# DEPRECATED: The messaging driver to use, defaults to rabbit. Other drivers -# include amqp and zmq. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rpc_backend = rabbit - -# The default exchange under which topics are scoped. May be overridden by an -# exchange name specified in the transport_url option. (string value) -#control_exchange = openstack - - -[database] - -# -# From oslo.db -# - -# DEPRECATED: The file name to use with SQLite. (string value) -# Deprecated group/name - [DEFAULT]/sqlite_db -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Should use config option connection or slave_connection to connect the -# database. -#sqlite_db = oslo.sqlite - -# If True, SQLite uses synchronous mode. (boolean value) -# Deprecated group/name - [DEFAULT]/sqlite_synchronous -#sqlite_synchronous = true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend = sqlalchemy - -# The SQLAlchemy connection string to use to connect to the database. (string -# value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = - -# The SQLAlchemy connection string to use to connect to the slave database. -# (string value) -#slave_connection = - -# The SQL mode to be used for MySQL sessions. This option, including the -# default, overrides any server-set SQL mode. To use whatever SQL mode is set by -# the server configuration, set this to no value. Example: mysql_sql_mode= -# (string value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle SQL connections are reaped. (integer value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool. (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool. Setting a value of 0 -# indicates no limit. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = 5 - -# Maximum number of database connection retries during startup. Set to -1 to -# specify an infinite retry count. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a SQL connection. (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with SQLAlchemy. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = 50 - -# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer -# value) -# Minimum value: 0 -# Maximum value: 100 -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add Python stack traces to SQL as comment strings. (boolean value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = false - -# If set, use this value for pool_timeout with SQLAlchemy. (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on connection lost. (boolean -# value) -#use_db_reconnect = false - -# Seconds between retries of a database transaction. (integer value) -#db_retry_interval = 1 - -# If True, increases the interval between retries of a database operation up to -# db_max_retry_interval. (boolean value) -#db_inc_retry_interval = true - -# If db_inc_retry_interval is set, the maximum seconds between retries of a -# database operation. (integer value) -#db_max_retry_interval = 10 - -# Maximum retries in case of connection error or deadlock error before error is -# raised. Set to -1 to specify an infinite retry count. (integer value) -#db_max_retries = 20 - -# -# From oslo.db.concurrency -# - -# Enable the experimental use of thread pooling for all DB API calls (boolean -# value) -# Deprecated group/name - [DEFAULT]/dbapi_use_tpool -#use_tpool = false - - -[keystone_authtoken] - -# -# From keystonemiddleware.auth_token -# - -# Complete "public" Identity API endpoint. This endpoint should not be an -# "admin" endpoint, as it should be accessible by all end users. Unauthenticated -# clients are redirected to this endpoint to authenticate. Although this -# endpoint should ideally be unversioned, client support in the wild varies. -# If you're using a versioned v2 endpoint here, then this should *not* be the -# same endpoint the service user utilizes for validating tokens, because normal -# end users may not be able to reach that endpoint. (string value) -#auth_uri = - -# API version of the admin Identity API endpoint. (string value) -#auth_version = - -# Do not handle authorization requests within the middleware, but delegate the -# authorization decision to downstream WSGI components. (boolean value) -#delay_auth_decision = false - -# Request timeout value for communicating with Identity API server. (integer -# value) -#http_connect_timeout = - -# How many times are we trying to reconnect when communicating with Identity API -# Server. (integer value) -#http_request_max_retries = 3 - -# Request environment key where the Swift cache object is stored. When -# auth_token middleware is deployed with a Swift cache, use this option to have -# the middleware share a caching backend with swift. Otherwise, use the -# ``memcached_servers`` option instead. (string value) -#cache = - -# Required if identity server requires client certificate (string value) -#certfile = - -# Required if identity server requires client certificate (string value) -#keyfile = - -# A PEM encoded Certificate Authority to use when verifying HTTPs connections. -# Defaults to system CAs. (string value) -#cafile = - -# Verify HTTPS connections. (boolean value) -#insecure = false - -# The region in which the identity server can be found. (string value) -#region_name = - -# DEPRECATED: Directory used to cache files related to PKI tokens. This option -# has been deprecated in the Ocata release and will be removed in the P release. -# (string value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#signing_dir = - -# Optionally specify a list of memcached server(s) to use for caching. If left -# undefined, tokens will instead be cached in-process. (list value) -# Deprecated group/name - [keystone_authtoken]/memcache_servers -#memcached_servers = - -# In order to prevent excessive effort spent validating tokens, the middleware -# caches previously-seen tokens for a configurable duration (in seconds). Set to -# -1 to disable caching completely. (integer value) -#token_cache_time = 300 - -# DEPRECATED: Determines the frequency at which the list of revoked tokens is -# retrieved from the Identity service (in seconds). A high number of revocation -# events combined with a low cache duration may significantly reduce -# performance. Only valid for PKI tokens. This option has been deprecated in the -# Ocata release and will be removed in the P release. (integer value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#revocation_cache_time = 10 - -# (Optional) If defined, indicate whether token data should be authenticated or -# authenticated and encrypted. If MAC, token data is authenticated (with HMAC) -# in the cache. If ENCRYPT, token data is encrypted and authenticated in the -# cache. If the value is not one of these options or empty, auth_token will -# raise an exception on initialization. (string value) -# Allowed values: None, MAC, ENCRYPT -#memcache_security_strategy = None - -# (Optional, mandatory if memcache_security_strategy is defined) This string is -# used for key derivation. (string value) -#memcache_secret_key = - -# (Optional) Number of seconds memcached server is considered dead before it is -# tried again. (integer value) -#memcache_pool_dead_retry = 300 - -# (Optional) Maximum total number of open connections to every memcached server. -# (integer value) -#memcache_pool_maxsize = 10 - -# (Optional) Socket timeout in seconds for communicating with a memcached -# server. (integer value) -#memcache_pool_socket_timeout = 3 - -# (Optional) Number of seconds a connection to memcached is held unused in the -# pool before it is closed. (integer value) -#memcache_pool_unused_timeout = 60 - -# (Optional) Number of seconds that an operation will wait to get a memcached -# client connection from the pool. (integer value) -#memcache_pool_conn_get_timeout = 10 - -# (Optional) Use the advanced (eventlet safe) memcached client pool. The -# advanced pool will only work under python 2.x. (boolean value) -#memcache_use_advanced_pool = false - -# (Optional) Indicate whether to set the X-Service-Catalog header. If False, -# middleware will not ask for service catalog on token validation and will not -# set the X-Service-Catalog header. (boolean value) -#include_service_catalog = true - -# Used to control the use and type of token binding. Can be set to: "disabled" -# to not check token binding. "permissive" (default) to validate binding -# information if the bind type is of a form known to the server and ignore it if -# not. "strict" like "permissive" but if the bind type is unknown the token will -# be rejected. "required" any form of token binding is needed to be allowed. -# Finally the name of a binding method that must be present in tokens. (string -# value) -#enforce_token_bind = permissive - -# DEPRECATED: If true, the revocation list will be checked for cached tokens. -# This requires that PKI tokens are configured on the identity server. (boolean -# value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#check_revocations_for_cached = false - -# DEPRECATED: Hash algorithms to use for hashing PKI tokens. This may be a -# single algorithm or multiple. The algorithms are those supported by Python -# standard hashlib.new(). The hashes will be tried in the order given, so put -# the preferred one first for performance. The result of the first hash will be -# stored in the cache. This will typically be set to multiple values only while -# migrating from a less secure algorithm to a more secure one. Once all the old -# tokens are expired this option should be set to a single value for better -# performance. (list value) -# This option is deprecated for removal since Ocata. -# Its value may be silently ignored in the future. -# Reason: PKI token format is no longer supported. -#hash_algorithms = md5 - -# A choice of roles that must be present in a service token. Service tokens are -# allowed to request that an expired token can be used and so this check should -# tightly control that only actual services should be sending this token. Roles -# here are applied as an ANY check so any role in this list must be present. For -# backwards compatibility reasons this currently only affects the allow_expired -# check. (list value) -#service_token_roles = service - -# For backwards compatibility reasons we must let valid service tokens pass that -# don't pass the service_token_roles check as valid. Setting this true will -# become the default in a future release and should be enabled if possible. -# (boolean value) -#service_token_roles_required = false - -# Authentication type to load (string value) -# Deprecated group/name - [keystone_authtoken]/auth_plugin -#auth_type = - -# Config Section from which to load plugin specific options (string value) -#auth_section = - - -[matchmaker_redis] - -# -# From oslo.messaging -# - -# DEPRECATED: Host to locate redis. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#host = 127.0.0.1 - -# DEPRECATED: Use this port to connect to redis host. (port value) -# Minimum value: 0 -# Maximum value: 65535 -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#port = 6379 - -# DEPRECATED: Password for Redis server (optional). (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#password = - -# DEPRECATED: List of Redis Sentinel hosts (fault tolerance mode), e.g., -# [host:port, host1:port ... ] (list value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#sentinel_hosts = - -# Redis replica set name. (string value) -#sentinel_group_name = oslo-messaging-zeromq - -# Time in ms to wait between connection attempts. (integer value) -#wait_timeout = 2000 - -# Time in ms to wait before the transaction is killed. (integer value) -#check_timeout = 20000 - -# Timeout in ms on blocking socket operations. (integer value) -#socket_timeout = 10000 - - -[oslo_messaging_amqp] - -# -# From oslo.messaging -# - -# Name for the AMQP container. must be globally unique. Defaults to a generated -# UUID (string value) -# Deprecated group/name - [amqp1]/container_name -#container_name = - -# Timeout for inactive connections (in seconds) (integer value) -# Deprecated group/name - [amqp1]/idle_timeout -#idle_timeout = 0 - -# Debug: dump AMQP frames to stdout (boolean value) -# Deprecated group/name - [amqp1]/trace -#trace = false - -# CA certificate PEM file used to verify the server's certificate (string value) -# Deprecated group/name - [amqp1]/ssl_ca_file -#ssl_ca_file = - -# Self-identifying certificate PEM file for client authentication (string value) -# Deprecated group/name - [amqp1]/ssl_cert_file -#ssl_cert_file = - -# Private key PEM file used to sign ssl_cert_file certificate (optional) (string -# value) -# Deprecated group/name - [amqp1]/ssl_key_file -#ssl_key_file = - -# Password for decrypting ssl_key_file (if encrypted) (string value) -# Deprecated group/name - [amqp1]/ssl_key_password -#ssl_key_password = - -# DEPRECATED: Accept clients using either SSL or plain TCP (boolean value) -# Deprecated group/name - [amqp1]/allow_insecure_clients -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Not applicable - not a SSL server -#allow_insecure_clients = false - -# Space separated list of acceptable SASL mechanisms (string value) -# Deprecated group/name - [amqp1]/sasl_mechanisms -#sasl_mechanisms = - -# Path to directory that contains the SASL configuration (string value) -# Deprecated group/name - [amqp1]/sasl_config_dir -#sasl_config_dir = - -# Name of configuration file (without .conf suffix) (string value) -# Deprecated group/name - [amqp1]/sasl_config_name -#sasl_config_name = - -# User name for message broker authentication (string value) -# Deprecated group/name - [amqp1]/username -#username = - -# Password for message broker authentication (string value) -# Deprecated group/name - [amqp1]/password -#password = - -# Seconds to pause before attempting to re-connect. (integer value) -# Minimum value: 1 -#connection_retry_interval = 1 - -# Increase the connection_retry_interval by this many seconds after each -# unsuccessful failover attempt. (integer value) -# Minimum value: 0 -#connection_retry_backoff = 2 - -# Maximum limit for connection_retry_interval + connection_retry_backoff -# (integer value) -# Minimum value: 1 -#connection_retry_interval_max = 30 - -# Time to pause between re-connecting an AMQP 1.0 link that failed due to a -# recoverable error. (integer value) -# Minimum value: 1 -#link_retry_delay = 10 - -# The maximum number of attempts to re-send a reply message which failed due to -# a recoverable error. (integer value) -# Minimum value: -1 -#default_reply_retry = 0 - -# The deadline for an rpc reply message delivery. (integer value) -# Minimum value: 5 -#default_reply_timeout = 30 - -# The deadline for an rpc cast or call message delivery. Only used when caller -# does not provide a timeout expiry. (integer value) -# Minimum value: 5 -#default_send_timeout = 30 - -# The deadline for a sent notification message delivery. Only used when caller -# does not provide a timeout expiry. (integer value) -# Minimum value: 5 -#default_notify_timeout = 30 - -# The duration to schedule a purge of idle sender links. Detach link after -# expiry. (integer value) -# Minimum value: 1 -#default_sender_link_timeout = 600 - -# Indicates the addressing mode used by the driver. -# Permitted values: -# 'legacy' - use legacy non-routable addressing -# 'routable' - use routable addresses -# 'dynamic' - use legacy addresses if the message bus does not support routing -# otherwise use routable addressing (string value) -#addressing_mode = dynamic - -# address prefix used when sending to a specific server (string value) -# Deprecated group/name - [amqp1]/server_request_prefix -#server_request_prefix = exclusive - -# address prefix used when broadcasting to all servers (string value) -# Deprecated group/name - [amqp1]/broadcast_prefix -#broadcast_prefix = broadcast - -# address prefix when sending to any server in group (string value) -# Deprecated group/name - [amqp1]/group_request_prefix -#group_request_prefix = unicast - -# Address prefix for all generated RPC addresses (string value) -#rpc_address_prefix = openstack.org/om/rpc - -# Address prefix for all generated Notification addresses (string value) -#notify_address_prefix = openstack.org/om/notify - -# Appended to the address prefix when sending a fanout message. Used by the -# message bus to identify fanout messages. (string value) -#multicast_address = multicast - -# Appended to the address prefix when sending to a particular RPC/Notification -# server. Used by the message bus to identify messages sent to a single -# destination. (string value) -#unicast_address = unicast - -# Appended to the address prefix when sending to a group of consumers. Used by -# the message bus to identify messages that should be delivered in a round-robin -# fashion across consumers. (string value) -#anycast_address = anycast - -# Exchange name used in notification addresses. -# Exchange name resolution precedence: -# Target.exchange if set -# else default_notification_exchange if set -# else control_exchange if set -# else 'notify' (string value) -#default_notification_exchange = - -# Exchange name used in RPC addresses. -# Exchange name resolution precedence: -# Target.exchange if set -# else default_rpc_exchange if set -# else control_exchange if set -# else 'rpc' (string value) -#default_rpc_exchange = - -# Window size for incoming RPC Reply messages. (integer value) -# Minimum value: 1 -#reply_link_credit = 200 - -# Window size for incoming RPC Request messages (integer value) -# Minimum value: 1 -#rpc_server_credit = 100 - -# Window size for incoming Notification messages (integer value) -# Minimum value: 1 -#notify_server_credit = 100 - -# Send messages of this type pre-settled. -# Pre-settled messages will not receive acknowledgement -# from the peer. Note well: pre-settled messages may be -# silently discarded if the delivery fails. -# Permitted values: -# 'rpc-call' - send RPC Calls pre-settled -# 'rpc-reply'- send RPC Replies pre-settled -# 'rpc-cast' - Send RPC Casts pre-settled -# 'notify' - Send Notifications pre-settled -# (multi valued) -#pre_settled = rpc-cast -#pre_settled = rpc-reply - - -[oslo_messaging_kafka] - -# -# From oslo.messaging -# - -# DEPRECATED: Default Kafka broker Host (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#kafka_default_host = localhost - -# DEPRECATED: Default Kafka broker Port (port value) -# Minimum value: 0 -# Maximum value: 65535 -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#kafka_default_port = 9092 - -# Max fetch bytes of Kafka consumer (integer value) -#kafka_max_fetch_bytes = 1048576 - -# Default timeout(s) for Kafka consumers (integer value) -#kafka_consumer_timeout = 1.0 - -# Pool Size for Kafka Consumers (integer value) -#pool_size = 10 - -# The pool size limit for connections expiration policy (integer value) -#conn_pool_min_size = 2 - -# The time-to-live in sec of idle connections in the pool (integer value) -#conn_pool_ttl = 1200 - -# Group id for Kafka consumer. Consumers in one group will coordinate message -# consumption (string value) -#consumer_group = oslo_messaging_consumer - -# Upper bound on the delay for KafkaProducer batching in seconds (floating point -# value) -#producer_batch_timeout = 0.0 - -# Size of batch for the producer async send (integer value) -#producer_batch_size = 16384 - - -[oslo_messaging_notifications] - -# -# From oslo.messaging -# - -# The Drivers(s) to handle sending notifications. Possible values are messaging, -# messagingv2, routing, log, test, noop (multi valued) -# Deprecated group/name - [DEFAULT]/notification_driver -#driver = - -# A URL representing the messaging driver to use for notifications. If not set, -# we fall back to the same configuration used for RPC. (string value) -# Deprecated group/name - [DEFAULT]/notification_transport_url -#transport_url = - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -# Deprecated group/name - [DEFAULT]/notification_topics -#topics = notifications - - -[oslo_messaging_rabbit] - -# -# From oslo.messaging -# - -# Use durable queues in AMQP. (boolean value) -# Deprecated group/name - [DEFAULT]/amqp_durable_queues -# Deprecated group/name - [DEFAULT]/rabbit_durable_queues -#amqp_durable_queues = false - -# Auto-delete queues in AMQP. (boolean value) -# Deprecated group/name - [DEFAULT]/amqp_auto_delete -#amqp_auto_delete = false - -# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and -# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some -# distributions. (string value) -# Deprecated group/name - [DEFAULT]/kombu_ssl_version -#kombu_ssl_version = - -# SSL key file (valid only if SSL enabled). (string value) -# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile -#kombu_ssl_keyfile = - -# SSL cert file (valid only if SSL enabled). (string value) -# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile -#kombu_ssl_certfile = - -# SSL certification authority file (valid only if SSL enabled). (string value) -# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs -#kombu_ssl_ca_certs = - -# How long to wait before reconnecting in response to an AMQP consumer cancel -# notification. (floating point value) -# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay -#kombu_reconnect_delay = 1.0 - -# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not -# be used. This option may not be available in future versions. (string value) -#kombu_compression = - -# How long to wait a missing client before abandoning to send it its replies. -# This value should not be longer than rpc_response_timeout. (integer value) -# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout -#kombu_missing_consumer_retry_timeout = 60 - -# Determines how the next RabbitMQ node is chosen in case the one we are -# currently connected to becomes unavailable. Takes effect only if more than one -# RabbitMQ node is provided in config. (string value) -# Allowed values: round-robin, shuffle -#kombu_failover_strategy = round-robin - -# DEPRECATED: The RabbitMQ broker address where a single node is used. (string -# value) -# Deprecated group/name - [DEFAULT]/rabbit_host -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rabbit_host = localhost - -# DEPRECATED: The RabbitMQ broker port where a single node is used. (port value) -# Minimum value: 0 -# Maximum value: 65535 -# Deprecated group/name - [DEFAULT]/rabbit_port -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rabbit_port = 5672 - -# DEPRECATED: RabbitMQ HA cluster host:port pairs. (list value) -# Deprecated group/name - [DEFAULT]/rabbit_hosts -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rabbit_hosts = $rabbit_host:$rabbit_port - -# Connect over SSL for RabbitMQ. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_use_ssl -#rabbit_use_ssl = false - -# DEPRECATED: The RabbitMQ userid. (string value) -# Deprecated group/name - [DEFAULT]/rabbit_userid -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rabbit_userid = guest - -# DEPRECATED: The RabbitMQ password. (string value) -# Deprecated group/name - [DEFAULT]/rabbit_password -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rabbit_password = guest - -# The RabbitMQ login method. (string value) -# Allowed values: PLAIN, AMQPLAIN, RABBIT-CR-DEMO -# Deprecated group/name - [DEFAULT]/rabbit_login_method -#rabbit_login_method = AMQPLAIN - -# DEPRECATED: The RabbitMQ virtual host. (string value) -# Deprecated group/name - [DEFAULT]/rabbit_virtual_host -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Replaced by [DEFAULT]/transport_url -#rabbit_virtual_host = / - -# How frequently to retry connecting with RabbitMQ. (integer value) -#rabbit_retry_interval = 1 - -# How long to backoff for between retries when connecting to RabbitMQ. (integer -# value) -# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff -#rabbit_retry_backoff = 2 - -# Maximum interval of RabbitMQ connection retries. Default is 30 seconds. -# (integer value) -#rabbit_interval_max = 30 - -# DEPRECATED: Maximum number of RabbitMQ connection retries. Default is 0 -# (infinite retry count). (integer value) -# Deprecated group/name - [DEFAULT]/rabbit_max_retries -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#rabbit_max_retries = 0 - -# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this -# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring -# is no longer controlled by the x-ha-policy argument when declaring a queue. If -# you just want to make sure that all queues (except those with auto-generated -# names) are mirrored across all nodes, run: "rabbitmqctl set_policy HA -# '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_ha_queues -#rabbit_ha_queues = false - -# Positive integer representing duration in seconds for queue TTL (x-expires). -# Queues which are unused for the duration of the TTL are automatically deleted. -# The parameter affects only reply and fanout queues. (integer value) -# Minimum value: 1 -#rabbit_transient_queues_ttl = 1800 - -# Specifies the number of messages to prefetch. Setting to zero allows unlimited -# messages. (integer value) -#rabbit_qos_prefetch_count = 0 - -# Number of seconds after which the Rabbit broker is considered down if -# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer -# value) -#heartbeat_timeout_threshold = 60 - -# How often times during the heartbeat_timeout_threshold we check the heartbeat. -# (integer value) -#heartbeat_rate = 2 - -# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) -# Deprecated group/name - [DEFAULT]/fake_rabbit -#fake_rabbit = false - -# Maximum number of channels to allow (integer value) -#channel_max = - -# The maximum byte size for an AMQP frame (integer value) -#frame_max = - -# How often to send heartbeats for consumer's connections (integer value) -#heartbeat_interval = 3 - -# Enable SSL (boolean value) -#ssl = - -# Arguments passed to ssl.wrap_socket (dict value) -#ssl_options = - -# Set socket timeout in seconds for connection's socket (floating point value) -#socket_timeout = 0.25 - -# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point value) -#tcp_user_timeout = 0.25 - -# Set delay for reconnection to some host which has connection error (floating -# point value) -#host_connection_reconnect_delay = 0.25 - -# Connection factory implementation (string value) -# Allowed values: new, single, read_write -#connection_factory = single - -# Maximum number of connections to keep queued. (integer value) -#pool_max_size = 30 - -# Maximum number of connections to create above `pool_max_size`. (integer value) -#pool_max_overflow = 0 - -# Default number of seconds to wait for a connections to available (integer -# value) -#pool_timeout = 30 - -# Lifetime of a connection (since creation) in seconds or None for no recycling. -# Expired connections are closed on acquire. (integer value) -#pool_recycle = 600 - -# Threshold at which inactive (since release) connections are considered stale -# in seconds or None for no staleness. Stale connections are closed on acquire. -# (integer value) -#pool_stale = 60 - -# Default serialization mechanism for serializing/deserializing -# outgoing/incoming messages (string value) -# Allowed values: json, msgpack -#default_serializer_type = json - -# Persist notification messages. (boolean value) -#notification_persistence = false - -# Exchange name for sending notifications (string value) -#default_notification_exchange = ${control_exchange}_notification - -# Max number of not acknowledged message which RabbitMQ can send to notification -# listener. (integer value) -#notification_listener_prefetch_count = 100 - -# Reconnecting retry count in case of connectivity problem during sending -# notification, -1 means infinite retry. (integer value) -#default_notification_retry_attempts = -1 - -# Reconnecting retry delay in case of connectivity problem during sending -# notification message (floating point value) -#notification_retry_delay = 0.25 - -# Time to live for rpc queues without consumers in seconds. (integer value) -#rpc_queue_expiration = 60 - -# Exchange name for sending RPC messages (string value) -#default_rpc_exchange = ${control_exchange}_rpc - -# Exchange name for receiving RPC replies (string value) -#rpc_reply_exchange = ${control_exchange}_rpc_reply - -# Max number of not acknowledged message which RabbitMQ can send to rpc -# listener. (integer value) -#rpc_listener_prefetch_count = 100 - -# Max number of not acknowledged message which RabbitMQ can send to rpc reply -# listener. (integer value) -#rpc_reply_listener_prefetch_count = 100 - -# Reconnecting retry count in case of connectivity problem during sending reply. -# -1 means infinite retry during rpc_timeout (integer value) -#rpc_reply_retry_attempts = -1 - -# Reconnecting retry delay in case of connectivity problem during sending reply. -# (floating point value) -#rpc_reply_retry_delay = 0.25 - -# Reconnecting retry count in case of connectivity problem during sending RPC -# message, -1 means infinite retry. If actual retry attempts in not 0 the rpc -# request could be processed more than one time (integer value) -#default_rpc_retry_attempts = -1 - -# Reconnecting retry delay in case of connectivity problem during sending RPC -# message (floating point value) -#rpc_retry_delay = 0.25 - - -[oslo_messaging_zmq] - -# -# From oslo.messaging -# - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. (string value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_bind_address -#rpc_zmq_bind_address = * - -# MatchMaker driver. (string value) -# Allowed values: redis, sentinel, dummy -# Deprecated group/name - [DEFAULT]/rpc_zmq_matchmaker -#rpc_zmq_matchmaker = redis - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_contexts -#rpc_zmq_contexts = 1 - -# Maximum number of ingress messages to locally buffer per topic. Default is -# unlimited. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_topic_backlog -#rpc_zmq_topic_backlog = - -# Directory for holding IPC sockets. (string value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_ipc_dir -#rpc_zmq_ipc_dir = /var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match -# "host" option, if running Nova. (string value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_host -#rpc_zmq_host = localhost - -# Number of seconds to wait before all pending messages will be sent after -# closing a socket. The default value of -1 specifies an infinite linger period. -# The value of 0 specifies no linger period. Pending messages shall be discarded -# immediately when the socket is closed. Positive values specify an upper bound -# for the linger period. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_cast_timeout -#zmq_linger = -1 - -# The default number of seconds that poll should wait. Poll raises timeout -# exception when timeout expired. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_poll_timeout -#rpc_poll_timeout = 1 - -# Expiration timeout in seconds of a name service record about existing target ( -# < 0 means no timeout). (integer value) -# Deprecated group/name - [DEFAULT]/zmq_target_expire -#zmq_target_expire = 300 - -# Update period in seconds of a name service record about existing target. -# (integer value) -# Deprecated group/name - [DEFAULT]/zmq_target_update -#zmq_target_update = 180 - -# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean -# value) -# Deprecated group/name - [DEFAULT]/use_pub_sub -#use_pub_sub = false - -# Use ROUTER remote proxy. (boolean value) -# Deprecated group/name - [DEFAULT]/use_router_proxy -#use_router_proxy = false - -# This option makes direct connections dynamic or static. It makes sense only -# with use_router_proxy=False which means to use direct connections for direct -# message types (ignored otherwise). (boolean value) -#use_dynamic_connections = false - -# How many additional connections to a host will be made for failover reasons. -# This option is actual only in dynamic connections mode. (integer value) -#zmq_failover_connections = 2 - -# Minimal port number for random ports range. (port value) -# Minimum value: 0 -# Maximum value: 65535 -# Deprecated group/name - [DEFAULT]/rpc_zmq_min_port -#rpc_zmq_min_port = 49153 - -# Maximal port number for random ports range. (integer value) -# Minimum value: 1 -# Maximum value: 65536 -# Deprecated group/name - [DEFAULT]/rpc_zmq_max_port -#rpc_zmq_max_port = 65536 - -# Number of retries to find free port number before fail with ZMQBindError. -# (integer value) -# Deprecated group/name - [DEFAULT]/rpc_zmq_bind_port_retries -#rpc_zmq_bind_port_retries = 100 - -# Default serialization mechanism for serializing/deserializing -# outgoing/incoming messages (string value) -# Allowed values: json, msgpack -# Deprecated group/name - [DEFAULT]/rpc_zmq_serialization -#rpc_zmq_serialization = json - -# This option configures round-robin mode in zmq socket. True means not keeping -# a queue when server side disconnects. False means to keep queue and messages -# even if server is disconnected, when the server appears we send all -# accumulated messages to it. (boolean value) -#zmq_immediate = true - -# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any -# other negative value) means to skip any overrides and leave it to OS default; -# 0 and 1 (or any other positive value) mean to disable and enable the option -# respectively. (integer value) -#zmq_tcp_keepalive = -1 - -# The duration between two keepalive transmissions in idle condition. The unit -# is platform dependent, for example, seconds in Linux, milliseconds in Windows -# etc. The default value of -1 (or any other negative value and 0) means to skip -# any overrides and leave it to OS default. (integer value) -#zmq_tcp_keepalive_idle = -1 - -# The number of retransmissions to be carried out before declaring that remote -# end is not available. The default value of -1 (or any other negative value and -# 0) means to skip any overrides and leave it to OS default. (integer value) -#zmq_tcp_keepalive_cnt = -1 - -# The duration between two successive keepalive retransmissions, if -# acknowledgement to the previous keepalive transmission is not received. The -# unit is platform dependent, for example, seconds in Linux, milliseconds in -# Windows etc. The default value of -1 (or any other negative value and 0) means -# to skip any overrides and leave it to OS default. (integer value) -#zmq_tcp_keepalive_intvl = -1 - -# Maximum number of (green) threads to work concurrently. (integer value) -#rpc_thread_pool_size = 100 - -# Expiration timeout in seconds of a sent/received message after which it is not -# tracked anymore by a client/server. (integer value) -#rpc_message_ttl = 300 - -# Wait for message acknowledgements from receivers. This mechanism works only -# via proxy without PUB/SUB. (boolean value) -#rpc_use_acks = false - -# Number of seconds to wait for an ack from a cast/call. After each retry -# attempt this timeout is multiplied by some specified multiplier. (integer -# value) -#rpc_ack_timeout_base = 15 - -# Number to multiply base ack timeout by after each retry attempt. (integer -# value) -#rpc_ack_timeout_multiplier = 2 - -# Default number of message sending attempts in case of any problems occurred: -# positive value N means at most N retries, 0 means no retries, None or -1 (or -# any other negative values) mean to retry forever. This option is used only if -# acknowledgments are enabled. (integer value) -#rpc_retry_attempts = 3 - -# List of publisher hosts SubConsumer can subscribe on. This option has higher -# priority then the default publishers list taken from the matchmaker. (list -# value) -#subscribe_on = - - -[oslo_policy] - -# -# From oslo.policy -# - -# The file that defines policies. (string value) -# Deprecated group/name - [DEFAULT]/policy_file -#policy_file = policy.json - -# Default rule. Enforced when a requested rule is not found. (string value) -# Deprecated group/name - [DEFAULT]/policy_default_rule -#policy_default_rule = default - -# Directories where policy configuration files are stored. They can be relative -# to any directory in the search path defined by the config_dir option, or -# absolute paths. The file defined by policy_file must exist for these -# directories to be searched. Missing or empty directories are ignored. (multi -# valued) -# Deprecated group/name - [DEFAULT]/policy_dirs -#policy_dirs = policy.d - - -[paste_deploy] - -# -# From glance.registry -# - -# -# Deployment flavor to use in the server application pipeline. -# -# Provide a string value representing the appropriate deployment -# flavor used in the server application pipleline. This is typically -# the partial name of a pipeline in the paste configuration file with -# the service name removed. -# -# For example, if your paste section name in the paste configuration -# file is [pipeline:glance-api-keystone], set ``flavor`` to -# ``keystone``. -# -# Possible values: -# * String value representing a partial pipeline name. -# -# Related Options: -# * config_file -# -# (string value) -#flavor = keystone - -# -# Name of the paste configuration file. -# -# Provide a string value representing the name of the paste -# configuration file to use for configuring piplelines for -# server application deployments. -# -# NOTES: -# * Provide the name or the path relative to the glance directory -# for the paste configuration file and not the absolute path. -# * The sample paste configuration file shipped with Glance need -# not be edited in most cases as it comes with ready-made -# pipelines for all common deployment flavors. -# -# If no value is specified for this option, the ``paste.ini`` file -# with the prefix of the corresponding Glance service's configuration -# file name will be searched for in the known configuration -# directories. (For example, if this option is missing from or has no -# value set in ``glance-api.conf``, the service will look for a file -# named ``glance-api-paste.ini``.) If the paste configuration file is -# not found, the service will not start. -# -# Possible values: -# * A string value representing the name of the paste configuration -# file. -# -# Related Options: -# * flavor -# -# (string value) -#config_file = glance-api-paste.ini - - -[profiler] - -# -# From glance.registry -# - -# -# Enables the profiling for all services on this node. Default value is False -# (fully disable the profiling feature). -# -# Possible values: -# -# * True: Enables the feature -# * False: Disables the feature. The profiling cannot be started via this -# project -# operations. If the profiling is triggered by another project, this project -# part -# will be empty. -# (boolean value) -# Deprecated group/name - [profiler]/profiler_enabled -#enabled = false - -# -# Enables SQL requests profiling in services. Default value is False (SQL -# requests won't be traced). -# -# Possible values: -# -# * True: Enables SQL requests profiling. Each SQL query will be part of the -# trace and can the be analyzed by how much time was spent for that. -# * False: Disables SQL requests profiling. The spent time is only shown on a -# higher level of operations. Single SQL queries cannot be analyzed this -# way. -# (boolean value) -#trace_sqlalchemy = false - -# -# Secret key(s) to use for encrypting context data for performance profiling. -# This string value should have the following format: [,,...], -# where each key is some random string. A user who triggers the profiling via -# the REST API has to set one of these keys in the headers of the REST API call -# to include profiling results of this node for this particular project. -# -# Both "enabled" flag and "hmac_keys" config options should be set to enable -# profiling. Also, to generate correct profiling information across all services -# at least one key needs to be consistent between OpenStack projects. This -# ensures it can be used from client side to generate the trace, containing -# information from all possible resources. (string value) -#hmac_keys = SECRET_KEY - -# -# Connection string for a notifier backend. Default value is messaging:// which -# sets the notifier to oslo_messaging. -# -# Examples of possible values: -# -# * messaging://: use oslo_messaging driver for sending notifications. -# * mongodb://127.0.0.1:27017 : use mongodb driver for sending notifications. -# * elasticsearch://127.0.0.1:9200 : use elasticsearch driver for sending -# notifications. -# (string value) -#connection_string = messaging:// - -# -# Document type for notification indexing in elasticsearch. -# (string value) -#es_doc_type = notification - -# -# This parameter is a time value parameter (for example: es_scroll_time=2m), -# indicating for how long the nodes that participate in the search will maintain -# relevant resources in order to continue and support it. -# (string value) -#es_scroll_time = 2m - -# -# Elasticsearch splits large requests in batches. This parameter defines -# maximum size of each batch (for example: es_scroll_size=10000). -# (integer value) -#es_scroll_size = 10000 - -# -# Redissentinel provides a timeout option on the connections. -# This parameter defines that timeout (for example: socket_timeout=0.1). -# (floating point value) -#socket_timeout = 0.1 - -# -# Redissentinel uses a service name to identify a master redis service. -# This parameter defines the name (for example: -# sentinal_service_name=mymaster). -# (string value) -#sentinel_service_name = mymaster diff --git a/etc/glance-scrubber.conf b/etc/glance-scrubber.conf deleted file mode 100644 index f375661b..00000000 --- a/etc/glance-scrubber.conf +++ /dev/null @@ -1,2500 +0,0 @@ -[DEFAULT] - -# -# From glance.scrubber -# - -# -# Allow users to add additional/custom properties to images. -# -# Glance defines a standard set of properties (in its schema) that -# appear on every image. These properties are also known as -# ``base properties``. In addition to these properties, Glance -# allows users to add custom properties to images. These are known -# as ``additional properties``. -# -# By default, this configuration option is set to ``True`` and users -# are allowed to add additional properties. The number of additional -# properties that can be added to an image can be controlled via -# ``image_property_quota`` configuration option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * image_property_quota -# -# (boolean value) -#allow_additional_image_properties = true - -# -# Maximum number of image members per image. -# -# This limits the maximum of users an image can be shared with. Any negative -# value is interpreted as unlimited. -# -# Related options: -# * None -# -# (integer value) -#image_member_quota = 128 - -# -# Maximum number of properties allowed on an image. -# -# This enforces an upper limit on the number of additional properties an image -# can have. Any negative value is interpreted as unlimited. -# -# NOTE: This won't have any impact if additional properties are disabled. Please -# refer to ``allow_additional_image_properties``. -# -# Related options: -# * ``allow_additional_image_properties`` -# -# (integer value) -#image_property_quota = 128 - -# -# Maximum number of tags allowed on an image. -# -# Any negative value is interpreted as unlimited. -# -# Related options: -# * None -# -# (integer value) -#image_tag_quota = 128 - -# -# Maximum number of locations allowed on an image. -# -# Any negative value is interpreted as unlimited. -# -# Related options: -# * None -# -# (integer value) -#image_location_quota = 10 - -# -# Python module path of data access API. -# -# Specifies the path to the API to use for accessing the data model. -# This option determines how the image catalog data will be accessed. -# -# Possible values: -# * glance.db.sqlalchemy.api -# * glance.db.registry.api -# * glance.db.simple.api -# -# If this option is set to ``glance.db.sqlalchemy.api`` then the image -# catalog data is stored in and read from the database via the -# SQLAlchemy Core and ORM APIs. -# -# Setting this option to ``glance.db.registry.api`` will force all -# database access requests to be routed through the Registry service. -# This avoids data access from the Glance API nodes for an added layer -# of security, scalability and manageability. -# -# NOTE: In v2 OpenStack Images API, the registry service is optional. -# In order to use the Registry API in v2, the option -# ``enable_v2_registry`` must be set to ``True``. -# -# Finally, when this configuration option is set to -# ``glance.db.simple.api``, image catalog data is stored in and read -# from an in-memory data structure. This is primarily used for testing. -# -# Related options: -# * enable_v2_api -# * enable_v2_registry -# -# (string value) -#data_api = glance.db.sqlalchemy.api - -# -# The default number of results to return for a request. -# -# Responses to certain API requests, like list images, may return -# multiple items. The number of results returned can be explicitly -# controlled by specifying the ``limit`` parameter in the API request. -# However, if a ``limit`` parameter is not specified, this -# configuration value will be used as the default number of results to -# be returned for any API request. -# -# NOTES: -# * The value of this configuration option may not be greater than -# the value specified by ``api_limit_max``. -# * Setting this to a very large value may slow down database -# queries and increase response times. Setting this to a -# very low value may result in poor user experience. -# -# Possible values: -# * Any positive integer -# -# Related options: -# * api_limit_max -# -# (integer value) -# Minimum value: 1 -#limit_param_default = 25 - -# -# Maximum number of results that could be returned by a request. -# -# As described in the help text of ``limit_param_default``, some -# requests may return multiple results. The number of results to be -# returned are governed either by the ``limit`` parameter in the -# request or the ``limit_param_default`` configuration option. -# The value in either case, can't be greater than the absolute maximum -# defined by this configuration option. Anything greater than this -# value is trimmed down to the maximum value defined here. -# -# NOTE: Setting this to a very large value may slow down database -# queries and increase response times. Setting this to a -# very low value may result in poor user experience. -# -# Possible values: -# * Any positive integer -# -# Related options: -# * limit_param_default -# -# (integer value) -# Minimum value: 1 -#api_limit_max = 1000 - -# -# Show direct image location when returning an image. -# -# This configuration option indicates whether to show the direct image -# location when returning image details to the user. The direct image -# location is where the image data is stored in backend storage. This -# image location is shown under the image property ``direct_url``. -# -# When multiple image locations exist for an image, the best location -# is displayed based on the location strategy indicated by the -# configuration option ``location_strategy``. -# -# NOTES: -# * Revealing image locations can present a GRAVE SECURITY RISK as -# image locations can sometimes include credentials. Hence, this -# is set to ``False`` by default. Set this to ``True`` with -# EXTREME CAUTION and ONLY IF you know what you are doing! -# * If an operator wishes to avoid showing any image location(s) -# to the user, then both this option and -# ``show_multiple_locations`` MUST be set to ``False``. -# -# Possible values: -# * True -# * False -# -# Related options: -# * show_multiple_locations -# * location_strategy -# -# (boolean value) -#show_image_direct_url = false - -# DEPRECATED: -# Show all image locations when returning an image. -# -# This configuration option indicates whether to show all the image -# locations when returning image details to the user. When multiple -# image locations exist for an image, the locations are ordered based -# on the location strategy indicated by the configuration opt -# ``location_strategy``. The image locations are shown under the -# image property ``locations``. -# -# NOTES: -# * Revealing image locations can present a GRAVE SECURITY RISK as -# image locations can sometimes include credentials. Hence, this -# is set to ``False`` by default. Set this to ``True`` with -# EXTREME CAUTION and ONLY IF you know what you are doing! -# * If an operator wishes to avoid showing any image location(s) -# to the user, then both this option and -# ``show_image_direct_url`` MUST be set to ``False``. -# -# Possible values: -# * True -# * False -# -# Related options: -# * show_image_direct_url -# * location_strategy -# -# (boolean value) -# This option is deprecated for removal since Newton. -# Its value may be silently ignored in the future. -# Reason: This option will be removed in the Pike release or later because the -# same functionality can be achieved with greater granularity by using policies. -# Please see the Newton release notes for more information. -#show_multiple_locations = false - -# -# Maximum size of image a user can upload in bytes. -# -# An image upload greater than the size mentioned here would result -# in an image creation failure. This configuration option defaults to -# 1099511627776 bytes (1 TiB). -# -# NOTES: -# * This value should only be increased after careful -# consideration and must be set less than or equal to -# 8 EiB (9223372036854775808). -# * This value must be set with careful consideration of the -# backend storage capacity. Setting this to a very low value -# may result in a large number of image failures. And, setting -# this to a very large value may result in faster consumption -# of storage. Hence, this must be set according to the nature of -# images created and storage capacity available. -# -# Possible values: -# * Any positive number less than or equal to 9223372036854775808 -# -# (integer value) -# Minimum value: 1 -# Maximum value: 9223372036854775808 -#image_size_cap = 1099511627776 - -# -# Maximum amount of image storage per tenant. -# -# This enforces an upper limit on the cumulative storage consumed by all images -# of a tenant across all stores. This is a per-tenant limit. -# -# The default unit for this configuration option is Bytes. However, storage -# units can be specified using case-sensitive literals ``B``, ``KB``, ``MB``, -# ``GB`` and ``TB`` representing Bytes, KiloBytes, MegaBytes, GigaBytes and -# TeraBytes respectively. Note that there should not be any space between the -# value and unit. Value ``0`` signifies no quota enforcement. Negative values -# are invalid and result in errors. -# -# Possible values: -# * A string that is a valid concatenation of a non-negative integer -# representing the storage value and an optional string literal -# representing storage units as mentioned above. -# -# Related options: -# * None -# -# (string value) -#user_storage_quota = 0 - -# -# Deploy the v1 OpenStack Images API. -# -# When this option is set to ``True``, Glance service will respond to -# requests on registered endpoints conforming to the v1 OpenStack -# Images API. -# -# NOTES: -# * If this option is enabled, then ``enable_v1_registry`` must -# also be set to ``True`` to enable mandatory usage of Registry -# service with v1 API. -# -# * If this option is disabled, then the ``enable_v1_registry`` -# option, which is enabled by default, is also recommended -# to be disabled. -# -# * This option is separate from ``enable_v2_api``, both v1 and v2 -# OpenStack Images API can be deployed independent of each -# other. -# -# * If deploying only the v2 Images API, this option, which is -# enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v1_registry -# * enable_v2_api -# -# (boolean value) -#enable_v1_api = true - -# -# Deploy the v2 OpenStack Images API. -# -# When this option is set to ``True``, Glance service will respond -# to requests on registered endpoints conforming to the v2 OpenStack -# Images API. -# -# NOTES: -# * If this option is disabled, then the ``enable_v2_registry`` -# option, which is enabled by default, is also recommended -# to be disabled. -# -# * This option is separate from ``enable_v1_api``, both v1 and v2 -# OpenStack Images API can be deployed independent of each -# other. -# -# * If deploying only the v1 Images API, this option, which is -# enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v2_registry -# * enable_v1_api -# -# (boolean value) -#enable_v2_api = true - -# -# Deploy the v1 API Registry service. -# -# When this option is set to ``True``, the Registry service -# will be enabled in Glance for v1 API requests. -# -# NOTES: -# * Use of Registry is mandatory in v1 API, so this option must -# be set to ``True`` if the ``enable_v1_api`` option is enabled. -# -# * If deploying only the v2 OpenStack Images API, this option, -# which is enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v1_api -# -# (boolean value) -#enable_v1_registry = true - -# -# Deploy the v2 API Registry service. -# -# When this option is set to ``True``, the Registry service -# will be enabled in Glance for v2 API requests. -# -# NOTES: -# * Use of Registry is optional in v2 API, so this option -# must only be enabled if both ``enable_v2_api`` is set to -# ``True`` and the ``data_api`` option is set to -# ``glance.db.registry.api``. -# -# * If deploying only the v1 OpenStack Images API, this option, -# which is enabled by default, should be disabled. -# -# Possible values: -# * True -# * False -# -# Related options: -# * enable_v2_api -# * data_api -# -# (boolean value) -#enable_v2_registry = true - -# -# Host address of the pydev server. -# -# Provide a string value representing the hostname or IP of the -# pydev server to use for debugging. The pydev server listens for -# debug connections on this address, facilitating remote debugging -# in Glance. -# -# Possible values: -# * Valid hostname -# * Valid IP address -# -# Related options: -# * None -# -# (string value) -#pydev_worker_debug_host = localhost - -# -# Port number that the pydev server will listen on. -# -# Provide a port number to bind the pydev server to. The pydev -# process accepts debug connections on this port and facilitates -# remote debugging in Glance. -# -# Possible values: -# * A valid port number -# -# Related options: -# * None -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#pydev_worker_debug_port = 5678 - -# -# AES key for encrypting store location metadata. -# -# Provide a string value representing the AES cipher to use for -# encrypting Glance store metadata. -# -# NOTE: The AES key to use must be set to a random string of length -# 16, 24 or 32 bytes. -# -# Possible values: -# * String value representing a valid AES key -# -# Related options: -# * None -# -# (string value) -#metadata_encryption_key = - -# -# Digest algorithm to use for digital signature. -# -# Provide a string value representing the digest algorithm to -# use for generating digital signatures. By default, ``sha256`` -# is used. -# -# To get a list of the available algorithms supported by the version -# of OpenSSL on your platform, run the command: -# ``openssl list-message-digest-algorithms``. -# Examples are 'sha1', 'sha256', and 'sha512'. -# -# NOTE: ``digest_algorithm`` is not related to Glance's image signing -# and verification. It is only used to sign the universally unique -# identifier (UUID) as a part of the certificate file and key file -# validation. -# -# Possible values: -# * An OpenSSL message digest algorithm identifier -# -# Relation options: -# * None -# -# (string value) -#digest_algorithm = sha256 - -# -# The amount of time, in seconds, to delay image scrubbing. -# -# When delayed delete is turned on, an image is put into ``pending_delete`` -# state upon deletion until the scrubber deletes its image data. Typically, soon -# after the image is put into ``pending_delete`` state, it is available for -# scrubbing. However, scrubbing can be delayed until a later point using this -# configuration option. This option denotes the time period an image spends in -# ``pending_delete`` state before it is available for scrubbing. -# -# It is important to realize that this has storage implications. The larger the -# ``scrub_time``, the longer the time to reclaim backend storage from deleted -# images. -# -# Possible values: -# * Any non-negative integer -# -# Related options: -# * ``delayed_delete`` -# -# (integer value) -# Minimum value: 0 -#scrub_time = 0 - -# -# The size of thread pool to be used for scrubbing images. -# -# When there are a large number of images to scrub, it is beneficial to scrub -# images in parallel so that the scrub queue stays in control and the backend -# storage is reclaimed in a timely fashion. This configuration option denotes -# the maximum number of images to be scrubbed in parallel. The default value is -# one, which signifies serial scrubbing. Any value above one indicates parallel -# scrubbing. -# -# Possible values: -# * Any non-zero positive integer -# -# Related options: -# * ``delayed_delete`` -# -# (integer value) -# Minimum value: 1 -#scrub_pool_size = 1 - -# -# Turn on/off delayed delete. -# -# Typically when an image is deleted, the ``glance-api`` service puts the image -# into ``deleted`` state and deletes its data at the same time. Delayed delete -# is a feature in Glance that delays the actual deletion of image data until a -# later point in time (as determined by the configuration option -# ``scrub_time``). -# When delayed delete is turned on, the ``glance-api`` service puts the image -# into ``pending_delete`` state upon deletion and leaves the image data in the -# storage backend for the image scrubber to delete at a later time. The image -# scrubber will move the image into ``deleted`` state upon successful deletion -# of image data. -# -# NOTE: When delayed delete is turned on, image scrubber MUST be running as a -# periodic task to prevent the backend storage from filling up with undesired -# usage. -# -# Possible values: -# * True -# * False -# -# Related options: -# * ``scrub_time`` -# * ``wakeup_time`` -# * ``scrub_pool_size`` -# -# (boolean value) -#delayed_delete = false - -# -# Role used to identify an authenticated user as administrator. -# -# Provide a string value representing a Keystone role to identify an -# administrative user. Users with this role will be granted -# administrative privileges. The default value for this option is -# 'admin'. -# -# Possible values: -# * A string value which is a valid Keystone role -# -# Related options: -# * None -# -# (string value) -#admin_role = admin - -# -# Send headers received from identity when making requests to -# registry. -# -# Typically, Glance registry can be deployed in multiple flavors, -# which may or may not include authentication. For example, -# ``trusted-auth`` is a flavor that does not require the registry -# service to authenticate the requests it receives. However, the -# registry service may still need a user context to be populated to -# serve the requests. This can be achieved by the caller -# (the Glance API usually) passing through the headers it received -# from authenticating with identity for the same request. The typical -# headers sent are ``X-User-Id``, ``X-Tenant-Id``, ``X-Roles``, -# ``X-Identity-Status`` and ``X-Service-Catalog``. -# -# Provide a boolean value to determine whether to send the identity -# headers to provide tenant and user information along with the -# requests to registry service. By default, this option is set to -# ``False``, which means that user and tenant information is not -# available readily. It must be obtained by authenticating. Hence, if -# this is set to ``False``, ``flavor`` must be set to value that -# either includes authentication or authenticated user context. -# -# Possible values: -# * True -# * False -# -# Related options: -# * flavor -# -# (boolean value) -#send_identity_headers = false - -# -# Time interval, in seconds, between scrubber runs in daemon mode. -# -# Scrubber can be run either as a cron job or daemon. When run as a daemon, this -# configuration time specifies the time period between two runs. When the -# scrubber wakes up, it fetches and scrubs all ``pending_delete`` images that -# are available for scrubbing after taking ``scrub_time`` into consideration. -# -# If the wakeup time is set to a large number, there may be a large number of -# images to be scrubbed for each run. Also, this impacts how quickly the backend -# storage is reclaimed. -# -# Possible values: -# * Any non-negative integer -# -# Related options: -# * ``daemon`` -# * ``delayed_delete`` -# -# (integer value) -# Minimum value: 0 -#wakeup_time = 300 - -# -# Run scrubber as a daemon. -# -# This boolean configuration option indicates whether scrubber should -# run as a long-running process that wakes up at regular intervals to -# scrub images. The wake up interval can be specified using the -# configuration option ``wakeup_time``. -# -# If this configuration option is set to ``False``, which is the -# default value, scrubber runs once to scrub images and exits. In this -# case, if the operator wishes to implement continuous scrubbing of -# images, scrubber needs to be scheduled as a cron job. -# -# Possible values: -# * True -# * False -# -# Related options: -# * ``wakeup_time`` -# -# (boolean value) -#daemon = false - -# -# Protocol to use for communication with the registry server. -# -# Provide a string value representing the protocol to use for -# communication with the registry server. By default, this option is -# set to ``http`` and the connection is not secure. -# -# This option can be set to ``https`` to establish a secure connection -# to the registry server. In this case, provide a key to use for the -# SSL connection using the ``registry_client_key_file`` option. Also -# include the CA file and cert file using the options -# ``registry_client_ca_file`` and ``registry_client_cert_file`` -# respectively. -# -# Possible values: -# * http -# * https -# -# Related options: -# * registry_client_key_file -# * registry_client_cert_file -# * registry_client_ca_file -# -# (string value) -# Allowed values: http, https -#registry_client_protocol = http - -# -# Absolute path to the private key file. -# -# Provide a string value representing a valid absolute path to the -# private key file to use for establishing a secure connection to -# the registry server. -# -# NOTE: This option must be set if ``registry_client_protocol`` is -# set to ``https``. Alternatively, the GLANCE_CLIENT_KEY_FILE -# environment variable may be set to a filepath of the key file. -# -# Possible values: -# * String value representing a valid absolute path to the key -# file. -# -# Related options: -# * registry_client_protocol -# -# (string value) -#registry_client_key_file = /etc/ssl/key/key-file.pem - -# -# Absolute path to the certificate file. -# -# Provide a string value representing a valid absolute path to the -# certificate file to use for establishing a secure connection to -# the registry server. -# -# NOTE: This option must be set if ``registry_client_protocol`` is -# set to ``https``. Alternatively, the GLANCE_CLIENT_CERT_FILE -# environment variable may be set to a filepath of the certificate -# file. -# -# Possible values: -# * String value representing a valid absolute path to the -# certificate file. -# -# Related options: -# * registry_client_protocol -# -# (string value) -#registry_client_cert_file = /etc/ssl/certs/file.crt - -# -# Absolute path to the Certificate Authority file. -# -# Provide a string value representing a valid absolute path to the -# certificate authority file to use for establishing a secure -# connection to the registry server. -# -# NOTE: This option must be set if ``registry_client_protocol`` is -# set to ``https``. Alternatively, the GLANCE_CLIENT_CA_FILE -# environment variable may be set to a filepath of the CA file. -# This option is ignored if the ``registry_client_insecure`` option -# is set to ``True``. -# -# Possible values: -# * String value representing a valid absolute path to the CA -# file. -# -# Related options: -# * registry_client_protocol -# * registry_client_insecure -# -# (string value) -#registry_client_ca_file = /etc/ssl/cafile/file.ca - -# -# Set verification of the registry server certificate. -# -# Provide a boolean value to determine whether or not to validate -# SSL connections to the registry server. By default, this option -# is set to ``False`` and the SSL connections are validated. -# -# If set to ``True``, the connection to the registry server is not -# validated via a certifying authority and the -# ``registry_client_ca_file`` option is ignored. This is the -# registry's equivalent of specifying --insecure on the command line -# using glanceclient for the API. -# -# Possible values: -# * True -# * False -# -# Related options: -# * registry_client_protocol -# * registry_client_ca_file -# -# (boolean value) -#registry_client_insecure = false - -# -# Timeout value for registry requests. -# -# Provide an integer value representing the period of time in seconds -# that the API server will wait for a registry request to complete. -# The default value is 600 seconds. -# -# A value of 0 implies that a request will never timeout. -# -# Possible values: -# * Zero -# * Positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#registry_client_timeout = 600 - -# DEPRECATED: Whether to pass through the user token when making requests to the -# registry. To prevent failures with token expiration during big files upload, -# it is recommended to set this parameter to False.If "use_user_token" is not in -# effect, then admin credentials can be specified. (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#use_user_token = true - -# DEPRECATED: The administrators user name. If "use_user_token" is not in -# effect, then admin credentials can be specified. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#admin_user = - -# DEPRECATED: The administrators password. If "use_user_token" is not in effect, -# then admin credentials can be specified. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#admin_password = - -# DEPRECATED: The tenant name of the administrative user. If "use_user_token" is -# not in effect, then admin tenant name can be specified. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#admin_tenant_name = - -# DEPRECATED: The URL to the keystone service. If "use_user_token" is not in -# effect and using keystone auth, then URL of keystone can be specified. (string -# value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#auth_url = - -# DEPRECATED: The strategy to use for authentication. If "use_user_token" is not -# in effect, then auth strategy can be specified. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#auth_strategy = noauth - -# DEPRECATED: The region for the authentication service. If "use_user_token" is -# not in effect and using keystone auth, then region name can be specified. -# (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: This option was considered harmful and has been deprecated in M -# release. It will be removed in O release. For more information read OSSN-0060. -# Related functionality with uploading big images has been implemented with -# Keystone trusts support. -#auth_region = - -# -# Address the registry server is hosted on. -# -# Possible values: -# * A valid IP or hostname -# -# Related options: -# * None -# -# (string value) -#registry_host = 0.0.0.0 - -# -# Port the registry server is listening on. -# -# Possible values: -# * A valid port number -# -# Related options: -# * None -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#registry_port = 9191 - -# -# From oslo.log -# - -# If set to true, the logging level will be set to DEBUG instead of the default -# INFO level. (boolean value) -# Note: This option can be changed without restarting. -#debug = false - -# DEPRECATED: If set to false, the logging level will be set to WARNING instead -# of the default INFO level. (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#verbose = true - -# The name of a logging configuration file. This file is appended to any -# existing logging configuration files. For details about logging configuration -# files, see the Python logging module documentation. Note that when logging -# configuration files are used then all logging configuration is set in the -# configuration file and other logging configuration options are ignored (for -# example, logging_context_format_string). (string value) -# Note: This option can be changed without restarting. -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append = - -# Defines the format string for %%(asctime)s in log records. Default: -# %(default)s . This option is ignored if log_config_append is set. (string -# value) -#log_date_format = %Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to send logging output to. If no default is set, -# logging will go to stderr as defined by use_stderr. This option is ignored if -# log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file = - -# (Optional) The base directory used for relative log_file paths. This option -# is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir = - -# Uses logging handler designed to watch file system. When log file is moved or -# removed this handler will open a new log file with specified path -# instantaneously. It makes sense only if log_file option is specified and Linux -# platform is used. This option is ignored if log_config_append is set. (boolean -# value) -#watch_log_file = false - -# Use syslog for logging. Existing syslog format is DEPRECATED and will be -# changed later to honor RFC5424. This option is ignored if log_config_append is -# set. (boolean value) -#use_syslog = false - -# Syslog facility to receive log lines. This option is ignored if -# log_config_append is set. (string value) -#syslog_log_facility = LOG_USER - -# Log output to standard error. This option is ignored if log_config_append is -# set. (boolean value) -#use_stderr = false - -# Format string to use for log messages with context. (string value) -#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages when context is undefined. (string -# value) -#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Additional data to append to log message when logging level for the message is -# DEBUG. (string value) -#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. (string value) -#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s - -# Defines the format string for %(user_identity)s that is used in -# logging_context_format_string. (string value) -#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s - -# List of package logging levels in logger=LEVEL pairs. This option is ignored -# if log_config_append is set. (list value) -#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO - -# Enables or disables publication of error events. (boolean value) -#publish_errors = false - -# The format for an instance that is passed with the log message. (string value) -#instance_format = "[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log message. (string -# value) -#instance_uuid_format = "[instance: %(uuid)s] " - -# Interval, number of seconds, of log rate limiting. (integer value) -#rate_limit_interval = 0 - -# Maximum number of logged messages per rate_limit_interval. (integer value) -#rate_limit_burst = 0 - -# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG or -# empty string. Logs with level greater or equal to rate_limit_except_level are -# not filtered. An empty string means that all levels are filtered. (string -# value) -#rate_limit_except_level = CRITICAL - -# Enables or disables fatal status of deprecations. (boolean value) -#fatal_deprecations = false - - -[database] - -# -# From oslo.db -# - -# DEPRECATED: The file name to use with SQLite. (string value) -# Deprecated group/name - [DEFAULT]/sqlite_db -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Should use config option connection or slave_connection to connect the -# database. -#sqlite_db = oslo.sqlite - -# If True, SQLite uses synchronous mode. (boolean value) -# Deprecated group/name - [DEFAULT]/sqlite_synchronous -#sqlite_synchronous = true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend = sqlalchemy - -# The SQLAlchemy connection string to use to connect to the database. (string -# value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = - -# The SQLAlchemy connection string to use to connect to the slave database. -# (string value) -#slave_connection = - -# The SQL mode to be used for MySQL sessions. This option, including the -# default, overrides any server-set SQL mode. To use whatever SQL mode is set by -# the server configuration, set this to no value. Example: mysql_sql_mode= -# (string value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle SQL connections are reaped. (integer value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool. (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool. Setting a value of 0 -# indicates no limit. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = 5 - -# Maximum number of database connection retries during startup. Set to -1 to -# specify an infinite retry count. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a SQL connection. (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with SQLAlchemy. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = 50 - -# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer -# value) -# Minimum value: 0 -# Maximum value: 100 -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add Python stack traces to SQL as comment strings. (boolean value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = false - -# If set, use this value for pool_timeout with SQLAlchemy. (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on connection lost. (boolean -# value) -#use_db_reconnect = false - -# Seconds between retries of a database transaction. (integer value) -#db_retry_interval = 1 - -# If True, increases the interval between retries of a database operation up to -# db_max_retry_interval. (boolean value) -#db_inc_retry_interval = true - -# If db_inc_retry_interval is set, the maximum seconds between retries of a -# database operation. (integer value) -#db_max_retry_interval = 10 - -# Maximum retries in case of connection error or deadlock error before error is -# raised. Set to -1 to specify an infinite retry count. (integer value) -#db_max_retries = 20 - -# -# From oslo.db.concurrency -# - -# Enable the experimental use of thread pooling for all DB API calls (boolean -# value) -# Deprecated group/name - [DEFAULT]/dbapi_use_tpool -#use_tpool = false - - -[glance_store] - -# -# From glance.store -# - -# -# List of enabled Glance stores. -# -# Register the storage backends to use for storing disk images -# as a comma separated list. The default stores enabled for -# storing disk images with Glance are ``file`` and ``http``. -# -# Possible values: -# * A comma separated list that could include: -# * file -# * http -# * swift -# * rbd -# * sheepdog -# * cinder -# * vmware -# -# Related Options: -# * default_store -# -# (list value) -#stores = file,http - -# -# The default scheme to use for storing images. -# -# Provide a string value representing the default scheme to use for -# storing images. If not set, Glance uses ``file`` as the default -# scheme to store images with the ``file`` store. -# -# NOTE: The value given for this configuration option must be a valid -# scheme for a store registered with the ``stores`` configuration -# option. -# -# Possible values: -# * file -# * filesystem -# * http -# * https -# * swift -# * swift+http -# * swift+https -# * swift+config -# * rbd -# * sheepdog -# * cinder -# * vsphere -# -# Related Options: -# * stores -# -# (string value) -# Allowed values: file, filesystem, http, https, swift, swift+http, swift+https, swift+config, rbd, sheepdog, cinder, vsphere -#default_store = file - -# -# Minimum interval in seconds to execute updating dynamic storage -# capabilities based on current backend status. -# -# Provide an integer value representing time in seconds to set the -# minimum interval before an update of dynamic storage capabilities -# for a storage backend can be attempted. Setting -# ``store_capabilities_update_min_interval`` does not mean updates -# occur periodically based on the set interval. Rather, the update -# is performed at the elapse of this interval set, if an operation -# of the store is triggered. -# -# By default, this option is set to zero and is disabled. Provide an -# integer value greater than zero to enable this option. -# -# NOTE: For more information on store capabilities and their updates, -# please visit: https://specs.openstack.org/openstack/glance-specs/specs/kilo -# /store-capabilities.html -# -# For more information on setting up a particular store in your -# deployment and help with the usage of this feature, please contact -# the storage driver maintainers listed here: -# http://docs.openstack.org/developer/glance_store/drivers/index.html -# -# Possible values: -# * Zero -# * Positive integer -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 0 -#store_capabilities_update_min_interval = 0 - -# -# Information to match when looking for cinder in the service catalog. -# -# When the ``cinder_endpoint_template`` is not set and any of -# ``cinder_store_auth_address``, ``cinder_store_user_name``, -# ``cinder_store_project_name``, ``cinder_store_password`` is not set, -# cinder store uses this information to lookup cinder endpoint from the service -# catalog in the current context. ``cinder_os_region_name``, if set, is taken -# into consideration to fetch the appropriate endpoint. -# -# The service catalog can be listed by the ``openstack catalog list`` command. -# -# Possible values: -# * A string of of the following form: -# ``::`` -# At least ``service_type`` and ``interface`` should be specified. -# ``service_name`` can be omitted. -# -# Related options: -# * cinder_os_region_name -# * cinder_endpoint_template -# * cinder_store_auth_address -# * cinder_store_user_name -# * cinder_store_project_name -# * cinder_store_password -# -# (string value) -#cinder_catalog_info = volumev2::publicURL - -# -# Override service catalog lookup with template for cinder endpoint. -# -# When this option is set, this value is used to generate cinder endpoint, -# instead of looking up from the service catalog. -# This value is ignored if ``cinder_store_auth_address``, -# ``cinder_store_user_name``, ``cinder_store_project_name``, and -# ``cinder_store_password`` are specified. -# -# If this configuration option is set, ``cinder_catalog_info`` will be ignored. -# -# Possible values: -# * URL template string for cinder endpoint, where ``%%(tenant)s`` is -# replaced with the current tenant (project) name. -# For example: ``http://cinder.openstack.example.org/v2/%%(tenant)s`` -# -# Related options: -# * cinder_store_auth_address -# * cinder_store_user_name -# * cinder_store_project_name -# * cinder_store_password -# * cinder_catalog_info -# -# (string value) -#cinder_endpoint_template = - -# -# Region name to lookup cinder service from the service catalog. -# -# This is used only when ``cinder_catalog_info`` is used for determining the -# endpoint. If set, the lookup for cinder endpoint by this node is filtered to -# the specified region. It is useful when multiple regions are listed in the -# catalog. If this is not set, the endpoint is looked up from every region. -# -# Possible values: -# * A string that is a valid region name. -# -# Related options: -# * cinder_catalog_info -# -# (string value) -# Deprecated group/name - [glance_store]/os_region_name -#cinder_os_region_name = - -# -# Location of a CA certificates file used for cinder client requests. -# -# The specified CA certificates file, if set, is used to verify cinder -# connections via HTTPS endpoint. If the endpoint is HTTP, this value is -# ignored. -# ``cinder_api_insecure`` must be set to ``True`` to enable the verification. -# -# Possible values: -# * Path to a ca certificates file -# -# Related options: -# * cinder_api_insecure -# -# (string value) -#cinder_ca_certificates_file = - -# -# Number of cinderclient retries on failed http calls. -# -# When a call failed by any errors, cinderclient will retry the call up to the -# specified times after sleeping a few seconds. -# -# Possible values: -# * A positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#cinder_http_retries = 3 - -# -# Time period, in seconds, to wait for a cinder volume transition to -# complete. -# -# When the cinder volume is created, deleted, or attached to the glance node to -# read/write the volume data, the volume's state is changed. For example, the -# newly created volume status changes from ``creating`` to ``available`` after -# the creation process is completed. This specifies the maximum time to wait for -# the status change. If a timeout occurs while waiting, or the status is changed -# to an unexpected value (e.g. `error``), the image creation fails. -# -# Possible values: -# * A positive integer -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 0 -#cinder_state_transition_timeout = 300 - -# -# Allow to perform insecure SSL requests to cinder. -# -# If this option is set to True, HTTPS endpoint connection is verified using the -# CA certificates file specified by ``cinder_ca_certificates_file`` option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * cinder_ca_certificates_file -# -# (boolean value) -#cinder_api_insecure = false - -# -# The address where the cinder authentication service is listening. -# -# When all of ``cinder_store_auth_address``, ``cinder_store_user_name``, -# ``cinder_store_project_name``, and ``cinder_store_password`` options are -# specified, the specified values are always used for the authentication. -# This is useful to hide the image volumes from users by storing them in a -# project/tenant specific to the image service. It also enables users to share -# the image volume among other projects under the control of glance's ACL. -# -# If either of these options are not set, the cinder endpoint is looked up -# from the service catalog, and current context's user and project are used. -# -# Possible values: -# * A valid authentication service address, for example: -# ``http://openstack.example.org/identity/v2.0`` -# -# Related options: -# * cinder_store_user_name -# * cinder_store_password -# * cinder_store_project_name -# -# (string value) -#cinder_store_auth_address = - -# -# User name to authenticate against cinder. -# -# This must be used with all the following related options. If any of these are -# not specified, the user of the current context is used. -# -# Possible values: -# * A valid user name -# -# Related options: -# * cinder_store_auth_address -# * cinder_store_password -# * cinder_store_project_name -# -# (string value) -#cinder_store_user_name = - -# -# Password for the user authenticating against cinder. -# -# This must be used with all the following related options. If any of these are -# not specified, the user of the current context is used. -# -# Possible values: -# * A valid password for the user specified by ``cinder_store_user_name`` -# -# Related options: -# * cinder_store_auth_address -# * cinder_store_user_name -# * cinder_store_project_name -# -# (string value) -#cinder_store_password = - -# -# Project name where the image volume is stored in cinder. -# -# If this configuration option is not set, the project in current context is -# used. -# -# This must be used with all the following related options. If any of these are -# not specified, the project of the current context is used. -# -# Possible values: -# * A valid project name -# -# Related options: -# * ``cinder_store_auth_address`` -# * ``cinder_store_user_name`` -# * ``cinder_store_password`` -# -# (string value) -#cinder_store_project_name = - -# -# Path to the rootwrap configuration file to use for running commands as root. -# -# The cinder store requires root privileges to operate the image volumes (for -# connecting to iSCSI/FC volumes and reading/writing the volume data, etc.). -# The configuration file should allow the required commands by cinder store and -# os-brick library. -# -# Possible values: -# * Path to the rootwrap config file -# -# Related options: -# * None -# -# (string value) -#rootwrap_config = /etc/glance/rootwrap.conf - -# -# Volume type that will be used for volume creation in cinder. -# -# Some cinder backends can have several volume types to optimize storage usage. -# Adding this option allows an operator to choose a specific volume type -# in cinder that can be optimized for images. -# -# If this is not set, then the default volume type specified in the cinder -# configuration will be used for volume creation. -# -# Possible values: -# * A valid volume type from cinder -# -# Related options: -# * None -# -# (string value) -#cinder_volume_type = - -# -# Directory to which the filesystem backend store writes images. -# -# Upon start up, Glance creates the directory if it doesn't already -# exist and verifies write access to the user under which -# ``glance-api`` runs. If the write access isn't available, a -# ``BadStoreConfiguration`` exception is raised and the filesystem -# store may not be available for adding new images. -# -# NOTE: This directory is used only when filesystem store is used as a -# storage backend. Either ``filesystem_store_datadir`` or -# ``filesystem_store_datadirs`` option must be specified in -# ``glance-api.conf``. If both options are specified, a -# ``BadStoreConfiguration`` will be raised and the filesystem store -# may not be available for adding new images. -# -# Possible values: -# * A valid path to a directory -# -# Related options: -# * ``filesystem_store_datadirs`` -# * ``filesystem_store_file_perm`` -# -# (string value) -#filesystem_store_datadir = /var/lib/glance/images - -# -# List of directories and their priorities to which the filesystem -# backend store writes images. -# -# The filesystem store can be configured to store images in multiple -# directories as opposed to using a single directory specified by the -# ``filesystem_store_datadir`` configuration option. When using -# multiple directories, each directory can be given an optional -# priority to specify the preference order in which they should -# be used. Priority is an integer that is concatenated to the -# directory path with a colon where a higher value indicates higher -# priority. When two directories have the same priority, the directory -# with most free space is used. When no priority is specified, it -# defaults to zero. -# -# More information on configuring filesystem store with multiple store -# directories can be found at -# http://docs.openstack.org/developer/glance/configuring.html -# -# NOTE: This directory is used only when filesystem store is used as a -# storage backend. Either ``filesystem_store_datadir`` or -# ``filesystem_store_datadirs`` option must be specified in -# ``glance-api.conf``. If both options are specified, a -# ``BadStoreConfiguration`` will be raised and the filesystem store -# may not be available for adding new images. -# -# Possible values: -# * List of strings of the following form: -# * ``:`` -# -# Related options: -# * ``filesystem_store_datadir`` -# * ``filesystem_store_file_perm`` -# -# (multi valued) -#filesystem_store_datadirs = - -# -# Filesystem store metadata file. -# -# The path to a file which contains the metadata to be returned with -# any location associated with the filesystem store. The file must -# contain a valid JSON object. The object should contain the keys -# ``id`` and ``mountpoint``. The value for both keys should be a -# string. -# -# Possible values: -# * A valid path to the store metadata file -# -# Related options: -# * None -# -# (string value) -#filesystem_store_metadata_file = - -# -# File access permissions for the image files. -# -# Set the intended file access permissions for image data. This provides -# a way to enable other services, e.g. Nova, to consume images directly -# from the filesystem store. The users running the services that are -# intended to be given access to could be made a member of the group -# that owns the files created. Assigning a value less then or equal to -# zero for this configuration option signifies that no changes be made -# to the default permissions. This value will be decoded as an octal -# digit. -# -# For more information, please refer the documentation at -# http://docs.openstack.org/developer/glance/configuring.html -# -# Possible values: -# * A valid file access permission -# * Zero -# * Any negative integer -# -# Related options: -# * None -# -# (integer value) -#filesystem_store_file_perm = 0 - -# -# Path to the CA bundle file. -# -# This configuration option enables the operator to use a custom -# Certificate Authority file to verify the remote server certificate. If -# this option is set, the ``https_insecure`` option will be ignored and -# the CA file specified will be used to authenticate the server -# certificate and establish a secure connection to the server. -# -# Possible values: -# * A valid path to a CA file -# -# Related options: -# * https_insecure -# -# (string value) -#https_ca_certificates_file = - -# -# Set verification of the remote server certificate. -# -# This configuration option takes in a boolean value to determine -# whether or not to verify the remote server certificate. If set to -# True, the remote server certificate is not verified. If the option is -# set to False, then the default CA truststore is used for verification. -# -# This option is ignored if ``https_ca_certificates_file`` is set. -# The remote server certificate will then be verified using the file -# specified using the ``https_ca_certificates_file`` option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * https_ca_certificates_file -# -# (boolean value) -#https_insecure = true - -# -# The http/https proxy information to be used to connect to the remote -# server. -# -# This configuration option specifies the http/https proxy information -# that should be used to connect to the remote server. The proxy -# information should be a key value pair of the scheme and proxy, for -# example, http:10.0.0.1:3128. You can also specify proxies for multiple -# schemes by separating the key value pairs with a comma, for example, -# http:10.0.0.1:3128, https:10.0.0.1:1080. -# -# Possible values: -# * A comma separated list of scheme:proxy pairs as described above -# -# Related options: -# * None -# -# (dict value) -#http_proxy_information = - -# -# Size, in megabytes, to chunk RADOS images into. -# -# Provide an integer value representing the size in megabytes to chunk -# Glance images into. The default chunk size is 8 megabytes. For optimal -# performance, the value should be a power of two. -# -# When Ceph's RBD object storage system is used as the storage backend -# for storing Glance images, the images are chunked into objects of the -# size set using this option. These chunked objects are then stored -# across the distributed block data store to use for Glance. -# -# Possible Values: -# * Any positive integer value -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#rbd_store_chunk_size = 8 - -# -# RADOS pool in which images are stored. -# -# When RBD is used as the storage backend for storing Glance images, the -# images are stored by means of logical grouping of the objects (chunks -# of images) into a ``pool``. Each pool is defined with the number of -# placement groups it can contain. The default pool that is used is -# 'images'. -# -# More information on the RBD storage backend can be found here: -# http://ceph.com/planet/how-data-is-stored-in-ceph-cluster/ -# -# Possible Values: -# * A valid pool name -# -# Related options: -# * None -# -# (string value) -#rbd_store_pool = images - -# -# RADOS user to authenticate as. -# -# This configuration option takes in the RADOS user to authenticate as. -# This is only needed when RADOS authentication is enabled and is -# applicable only if the user is using Cephx authentication. If the -# value for this option is not set by the user or is set to None, a -# default value will be chosen, which will be based on the client. -# section in rbd_store_ceph_conf. -# -# Possible Values: -# * A valid RADOS user -# -# Related options: -# * rbd_store_ceph_conf -# -# (string value) -#rbd_store_user = - -# -# Ceph configuration file path. -# -# This configuration option takes in the path to the Ceph configuration -# file to be used. If the value for this option is not set by the user -# or is set to None, librados will locate the default configuration file -# which is located at /etc/ceph/ceph.conf. If using Cephx -# authentication, this file should include a reference to the right -# keyring in a client. section -# -# Possible Values: -# * A valid path to a configuration file -# -# Related options: -# * rbd_store_user -# -# (string value) -#rbd_store_ceph_conf = /etc/ceph/ceph.conf - -# -# Timeout value for connecting to Ceph cluster. -# -# This configuration option takes in the timeout value in seconds used -# when connecting to the Ceph cluster i.e. it sets the time to wait for -# glance-api before closing the connection. This prevents glance-api -# hangups during the connection to RBD. If the value for this option -# is set to less than or equal to 0, no timeout is set and the default -# librados value is used. -# -# Possible Values: -# * Any integer value -# -# Related options: -# * None -# -# (integer value) -#rados_connect_timeout = 0 - -# -# Chunk size for images to be stored in Sheepdog data store. -# -# Provide an integer value representing the size in mebibyte -# (1048576 bytes) to chunk Glance images into. The default -# chunk size is 64 mebibytes. -# -# When using Sheepdog distributed storage system, the images are -# chunked into objects of this size and then stored across the -# distributed data store to use for Glance. -# -# Chunk sizes, if a power of two, help avoid fragmentation and -# enable improved performance. -# -# Possible values: -# * Positive integer value representing size in mebibytes. -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 1 -#sheepdog_store_chunk_size = 64 - -# -# Port number on which the sheep daemon will listen. -# -# Provide an integer value representing a valid port number on -# which you want the Sheepdog daemon to listen on. The default -# port is 7000. -# -# The Sheepdog daemon, also called 'sheep', manages the storage -# in the distributed cluster by writing objects across the storage -# network. It identifies and acts on the messages it receives on -# the port number set using ``sheepdog_store_port`` option to store -# chunks of Glance images. -# -# Possible values: -# * A valid port number (0 to 65535) -# -# Related Options: -# * sheepdog_store_address -# -# (port value) -# Minimum value: 0 -# Maximum value: 65535 -#sheepdog_store_port = 7000 - -# -# Address to bind the Sheepdog daemon to. -# -# Provide a string value representing the address to bind the -# Sheepdog daemon to. The default address set for the 'sheep' -# is 127.0.0.1. -# -# The Sheepdog daemon, also called 'sheep', manages the storage -# in the distributed cluster by writing objects across the storage -# network. It identifies and acts on the messages directed to the -# address set using ``sheepdog_store_address`` option to store -# chunks of Glance images. -# -# Possible values: -# * A valid IPv4 address -# * A valid IPv6 address -# * A valid hostname -# -# Related Options: -# * sheepdog_store_port -# -# (string value) -#sheepdog_store_address = 127.0.0.1 - -# -# Set verification of the server certificate. -# -# This boolean determines whether or not to verify the server -# certificate. If this option is set to True, swiftclient won't check -# for a valid SSL certificate when authenticating. If the option is set -# to False, then the default CA truststore is used for verification. -# -# Possible values: -# * True -# * False -# -# Related options: -# * swift_store_cacert -# -# (boolean value) -#swift_store_auth_insecure = false - -# -# Path to the CA bundle file. -# -# This configuration option enables the operator to specify the path to -# a custom Certificate Authority file for SSL verification when -# connecting to Swift. -# -# Possible values: -# * A valid path to a CA file -# -# Related options: -# * swift_store_auth_insecure -# -# (string value) -#swift_store_cacert = /etc/ssl/certs/ca-certificates.crt - -# -# The region of Swift endpoint to use by Glance. -# -# Provide a string value representing a Swift region where Glance -# can connect to for image storage. By default, there is no region -# set. -# -# When Glance uses Swift as the storage backend to store images -# for a specific tenant that has multiple endpoints, setting of a -# Swift region with ``swift_store_region`` allows Glance to connect -# to Swift in the specified region as opposed to a single region -# connectivity. -# -# This option can be configured for both single-tenant and -# multi-tenant storage. -# -# NOTE: Setting the region with ``swift_store_region`` is -# tenant-specific and is necessary ``only if`` the tenant has -# multiple endpoints across different regions. -# -# Possible values: -# * A string value representing a valid Swift region. -# -# Related Options: -# * None -# -# (string value) -#swift_store_region = RegionTwo - -# -# The URL endpoint to use for Swift backend storage. -# -# Provide a string value representing the URL endpoint to use for -# storing Glance images in Swift store. By default, an endpoint -# is not set and the storage URL returned by ``auth`` is used. -# Setting an endpoint with ``swift_store_endpoint`` overrides the -# storage URL and is used for Glance image storage. -# -# NOTE: The URL should include the path up to, but excluding the -# container. The location of an object is obtained by appending -# the container and object to the configured URL. -# -# Possible values: -# * String value representing a valid URL path up to a Swift container -# -# Related Options: -# * None -# -# (string value) -#swift_store_endpoint = https://swift.openstack.example.org/v1/path_not_including_container_name - -# -# Endpoint Type of Swift service. -# -# This string value indicates the endpoint type to use to fetch the -# Swift endpoint. The endpoint type determines the actions the user will -# be allowed to perform, for instance, reading and writing to the Store. -# This setting is only used if swift_store_auth_version is greater than -# 1. -# -# Possible values: -# * publicURL -# * adminURL -# * internalURL -# -# Related options: -# * swift_store_endpoint -# -# (string value) -# Allowed values: publicURL, adminURL, internalURL -#swift_store_endpoint_type = publicURL - -# -# Type of Swift service to use. -# -# Provide a string value representing the service type to use for -# storing images while using Swift backend storage. The default -# service type is set to ``object-store``. -# -# NOTE: If ``swift_store_auth_version`` is set to 2, the value for -# this configuration option needs to be ``object-store``. If using -# a higher version of Keystone or a different auth scheme, this -# option may be modified. -# -# Possible values: -# * A string representing a valid service type for Swift storage. -# -# Related Options: -# * None -# -# (string value) -#swift_store_service_type = object-store - -# -# Name of single container to store images/name prefix for multiple containers -# -# When a single container is being used to store images, this configuration -# option indicates the container within the Glance account to be used for -# storing all images. When multiple containers are used to store images, this -# will be the name prefix for all containers. Usage of single/multiple -# containers can be controlled using the configuration option -# ``swift_store_multiple_containers_seed``. -# -# When using multiple containers, the containers will be named after the value -# set for this configuration option with the first N chars of the image UUID -# as the suffix delimited by an underscore (where N is specified by -# ``swift_store_multiple_containers_seed``). -# -# Example: if the seed is set to 3 and swift_store_container = ``glance``, then -# an image with UUID ``fdae39a1-bac5-4238-aba4-69bcc726e848`` would be placed in -# the container ``glance_fda``. All dashes in the UUID are included when -# creating the container name but do not count toward the character limit, so -# when N=10 the container name would be ``glance_fdae39a1-ba.`` -# -# Possible values: -# * If using single container, this configuration option can be any string -# that is a valid swift container name in Glance's Swift account -# * If using multiple containers, this configuration option can be any -# string as long as it satisfies the container naming rules enforced by -# Swift. The value of ``swift_store_multiple_containers_seed`` should be -# taken into account as well. -# -# Related options: -# * ``swift_store_multiple_containers_seed`` -# * ``swift_store_multi_tenant`` -# * ``swift_store_create_container_on_put`` -# -# (string value) -#swift_store_container = glance - -# -# The size threshold, in MB, after which Glance will start segmenting image -# data. -# -# Swift has an upper limit on the size of a single uploaded object. By default, -# this is 5GB. To upload objects bigger than this limit, objects are segmented -# into multiple smaller objects that are tied together with a manifest file. -# For more detail, refer to -# http://docs.openstack.org/developer/swift/overview_large_objects.html -# -# This configuration option specifies the size threshold over which the Swift -# driver will start segmenting image data into multiple smaller files. -# Currently, the Swift driver only supports creating Dynamic Large Objects. -# -# NOTE: This should be set by taking into account the large object limit -# enforced by the Swift cluster in consideration. -# -# Possible values: -# * A positive integer that is less than or equal to the large object limit -# enforced by the Swift cluster in consideration. -# -# Related options: -# * ``swift_store_large_object_chunk_size`` -# -# (integer value) -# Minimum value: 1 -#swift_store_large_object_size = 5120 - -# -# The maximum size, in MB, of the segments when image data is segmented. -# -# When image data is segmented to upload images that are larger than the limit -# enforced by the Swift cluster, image data is broken into segments that are no -# bigger than the size specified by this configuration option. -# Refer to ``swift_store_large_object_size`` for more detail. -# -# For example: if ``swift_store_large_object_size`` is 5GB and -# ``swift_store_large_object_chunk_size`` is 1GB, an image of size 6.2GB will be -# segmented into 7 segments where the first six segments will be 1GB in size and -# the seventh segment will be 0.2GB. -# -# Possible values: -# * A positive integer that is less than or equal to the large object limit -# enforced by Swift cluster in consideration. -# -# Related options: -# * ``swift_store_large_object_size`` -# -# (integer value) -# Minimum value: 1 -#swift_store_large_object_chunk_size = 200 - -# -# Create container, if it doesn't already exist, when uploading image. -# -# At the time of uploading an image, if the corresponding container doesn't -# exist, it will be created provided this configuration option is set to True. -# By default, it won't be created. This behavior is applicable for both single -# and multiple containers mode. -# -# Possible values: -# * True -# * False -# -# Related options: -# * None -# -# (boolean value) -#swift_store_create_container_on_put = false - -# -# Store images in tenant's Swift account. -# -# This enables multi-tenant storage mode which causes Glance images to be stored -# in tenant specific Swift accounts. If this is disabled, Glance stores all -# images in its own account. More details multi-tenant store can be found at -# https://wiki.openstack.org/wiki/GlanceSwiftTenantSpecificStorage -# -# NOTE: If using multi-tenant swift store, please make sure -# that you do not set a swift configuration file with the -# 'swift_store_config_file' option. -# -# Possible values: -# * True -# * False -# -# Related options: -# * swift_store_config_file -# -# (boolean value) -#swift_store_multi_tenant = false - -# -# Seed indicating the number of containers to use for storing images. -# -# When using a single-tenant store, images can be stored in one or more than one -# containers. When set to 0, all images will be stored in one single container. -# When set to an integer value between 1 and 32, multiple containers will be -# used to store images. This configuration option will determine how many -# containers are created. The total number of containers that will be used is -# equal to 16^N, so if this config option is set to 2, then 16^2=256 containers -# will be used to store images. -# -# Please refer to ``swift_store_container`` for more detail on the naming -# convention. More detail about using multiple containers can be found at -# https://specs.openstack.org/openstack/glance-specs/specs/kilo/swift-store- -# multiple-containers.html -# -# NOTE: This is used only when swift_store_multi_tenant is disabled. -# -# Possible values: -# * A non-negative integer less than or equal to 32 -# -# Related options: -# * ``swift_store_container`` -# * ``swift_store_multi_tenant`` -# * ``swift_store_create_container_on_put`` -# -# (integer value) -# Minimum value: 0 -# Maximum value: 32 -#swift_store_multiple_containers_seed = 0 - -# -# List of tenants that will be granted admin access. -# -# This is a list of tenants that will be granted read/write access on -# all Swift containers created by Glance in multi-tenant mode. The -# default value is an empty list. -# -# Possible values: -# * A comma separated list of strings representing UUIDs of Keystone -# projects/tenants -# -# Related options: -# * None -# -# (list value) -#swift_store_admin_tenants = - -# -# SSL layer compression for HTTPS Swift requests. -# -# Provide a boolean value to determine whether or not to compress -# HTTPS Swift requests for images at the SSL layer. By default, -# compression is enabled. -# -# When using Swift as the backend store for Glance image storage, -# SSL layer compression of HTTPS Swift requests can be set using -# this option. If set to False, SSL layer compression of HTTPS -# Swift requests is disabled. Disabling this option may improve -# performance for images which are already in a compressed format, -# for example, qcow2. -# -# Possible values: -# * True -# * False -# -# Related Options: -# * None -# -# (boolean value) -#swift_store_ssl_compression = true - -# -# The number of times a Swift download will be retried before the -# request fails. -# -# Provide an integer value representing the number of times an image -# download must be retried before erroring out. The default value is -# zero (no retry on a failed image download). When set to a positive -# integer value, ``swift_store_retry_get_count`` ensures that the -# download is attempted this many more times upon a download failure -# before sending an error message. -# -# Possible values: -# * Zero -# * Positive integer value -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 0 -#swift_store_retry_get_count = 0 - -# -# Time in seconds defining the size of the window in which a new -# token may be requested before the current token is due to expire. -# -# Typically, the Swift storage driver fetches a new token upon the -# expiration of the current token to ensure continued access to -# Swift. However, some Swift transactions (like uploading image -# segments) may not recover well if the token expires on the fly. -# -# Hence, by fetching a new token before the current token expiration, -# we make sure that the token does not expire or is close to expiry -# before a transaction is attempted. By default, the Swift storage -# driver requests for a new token 60 seconds or less before the -# current token expiration. -# -# Possible values: -# * Zero -# * Positive integer value -# -# Related Options: -# * None -# -# (integer value) -# Minimum value: 0 -#swift_store_expire_soon_interval = 60 - -# -# Use trusts for multi-tenant Swift store. -# -# This option instructs the Swift store to create a trust for each -# add/get request when the multi-tenant store is in use. Using trusts -# allows the Swift store to avoid problems that can be caused by an -# authentication token expiring during the upload or download of data. -# -# By default, ``swift_store_use_trusts`` is set to ``True``(use of -# trusts is enabled). If set to ``False``, a user token is used for -# the Swift connection instead, eliminating the overhead of trust -# creation. -# -# NOTE: This option is considered only when -# ``swift_store_multi_tenant`` is set to ``True`` -# -# Possible values: -# * True -# * False -# -# Related options: -# * swift_store_multi_tenant -# -# (boolean value) -#swift_store_use_trusts = true - -# -# Reference to default Swift account/backing store parameters. -# -# Provide a string value representing a reference to the default set -# of parameters required for using swift account/backing store for -# image storage. The default reference value for this configuration -# option is 'ref1'. This configuration option dereferences the -# parameters and facilitates image storage in Swift storage backend -# every time a new image is added. -# -# Possible values: -# * A valid string value -# -# Related options: -# * None -# -# (string value) -#default_swift_reference = ref1 - -# DEPRECATED: Version of the authentication service to use. Valid versions are 2 -# and 3 for keystone and 1 (deprecated) for swauth and rackspace. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'auth_version' in the Swift back-end configuration file is -# used instead. -#swift_store_auth_version = 2 - -# DEPRECATED: The address where the Swift authentication service is listening. -# (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'auth_address' in the Swift back-end configuration file is -# used instead. -#swift_store_auth_address = - -# DEPRECATED: The user to authenticate against the Swift authentication service. -# (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'user' in the Swift back-end configuration file is set instead. -#swift_store_user = - -# DEPRECATED: Auth key for the user authenticating against the Swift -# authentication service. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: -# The option 'key' in the Swift back-end configuration file is used -# to set the authentication key instead. -#swift_store_key = - -# -# Absolute path to the file containing the swift account(s) -# configurations. -# -# Include a string value representing the path to a configuration -# file that has references for each of the configured Swift -# account(s)/backing stores. By default, no file path is specified -# and customized Swift referencing is disabled. Configuring this -# option is highly recommended while using Swift storage backend for -# image storage as it avoids storage of credentials in the database. -# -# NOTE: Please do not configure this option if you have set -# ``swift_store_multi_tenant`` to ``True``. -# -# Possible values: -# * String value representing an absolute path on the glance-api -# node -# -# Related options: -# * swift_store_multi_tenant -# -# (string value) -#swift_store_config_file = - -# -# Address of the ESX/ESXi or vCenter Server target system. -# -# This configuration option sets the address of the ESX/ESXi or vCenter -# Server target system. This option is required when using the VMware -# storage backend. The address can contain an IP address (127.0.0.1) or -# a DNS name (www.my-domain.com). -# -# Possible Values: -# * A valid IPv4 or IPv6 address -# * A valid DNS name -# -# Related options: -# * vmware_server_username -# * vmware_server_password -# -# (string value) -#vmware_server_host = 127.0.0.1 - -# -# Server username. -# -# This configuration option takes the username for authenticating with -# the VMware ESX/ESXi or vCenter Server. This option is required when -# using the VMware storage backend. -# -# Possible Values: -# * Any string that is the username for a user with appropriate -# privileges -# -# Related options: -# * vmware_server_host -# * vmware_server_password -# -# (string value) -#vmware_server_username = root - -# -# Server password. -# -# This configuration option takes the password for authenticating with -# the VMware ESX/ESXi or vCenter Server. This option is required when -# using the VMware storage backend. -# -# Possible Values: -# * Any string that is a password corresponding to the username -# specified using the "vmware_server_username" option -# -# Related options: -# * vmware_server_host -# * vmware_server_username -# -# (string value) -#vmware_server_password = vmware - -# -# The number of VMware API retries. -# -# This configuration option specifies the number of times the VMware -# ESX/VC server API must be retried upon connection related issues or -# server API call overload. It is not possible to specify 'retry -# forever'. -# -# Possible Values: -# * Any positive integer value -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#vmware_api_retry_count = 10 - -# -# Interval in seconds used for polling remote tasks invoked on VMware -# ESX/VC server. -# -# This configuration option takes in the sleep time in seconds for polling an -# on-going async task as part of the VMWare ESX/VC server API call. -# -# Possible Values: -# * Any positive integer value -# -# Related options: -# * None -# -# (integer value) -# Minimum value: 1 -#vmware_task_poll_interval = 5 - -# -# The directory where the glance images will be stored in the datastore. -# -# This configuration option specifies the path to the directory where the -# glance images will be stored in the VMware datastore. If this option -# is not set, the default directory where the glance images are stored -# is openstack_glance. -# -# Possible Values: -# * Any string that is a valid path to a directory -# -# Related options: -# * None -# -# (string value) -#vmware_store_image_dir = /openstack_glance - -# -# Set verification of the ESX/vCenter server certificate. -# -# This configuration option takes a boolean value to determine -# whether or not to verify the ESX/vCenter server certificate. If this -# option is set to True, the ESX/vCenter server certificate is not -# verified. If this option is set to False, then the default CA -# truststore is used for verification. -# -# This option is ignored if the "vmware_ca_file" option is set. In that -# case, the ESX/vCenter server certificate will then be verified using -# the file specified using the "vmware_ca_file" option . -# -# Possible Values: -# * True -# * False -# -# Related options: -# * vmware_ca_file -# -# (boolean value) -# Deprecated group/name - [glance_store]/vmware_api_insecure -#vmware_insecure = false - -# -# Absolute path to the CA bundle file. -# -# This configuration option enables the operator to use a custom -# Cerificate Authority File to verify the ESX/vCenter certificate. -# -# If this option is set, the "vmware_insecure" option will be ignored -# and the CA file specified will be used to authenticate the ESX/vCenter -# server certificate and establish a secure connection to the server. -# -# Possible Values: -# * Any string that is a valid absolute path to a CA file -# -# Related options: -# * vmware_insecure -# -# (string value) -#vmware_ca_file = /etc/ssl/certs/ca-certificates.crt - -# -# The datastores where the image can be stored. -# -# This configuration option specifies the datastores where the image can -# be stored in the VMWare store backend. This option may be specified -# multiple times for specifying multiple datastores. The datastore name -# should be specified after its datacenter path, separated by ":". An -# optional weight may be given after the datastore name, separated again -# by ":" to specify the priority. Thus, the required format becomes -# ::. -# -# When adding an image, the datastore with highest weight will be -# selected, unless there is not enough free space available in cases -# where the image size is already known. If no weight is given, it is -# assumed to be zero and the directory will be considered for selection -# last. If multiple datastores have the same weight, then the one with -# the most free space available is selected. -# -# Possible Values: -# * Any string of the format: -# :: -# -# Related options: -# * None -# -# (multi valued) -#vmware_datastores = - - -[oslo_concurrency] - -# -# From oslo.concurrency -# - -# Enables or disables inter-process locks. (boolean value) -# Deprecated group/name - [DEFAULT]/disable_process_locking -#disable_process_locking = false - -# Directory to use for lock files. For security, the specified directory should -# only be writable by the user running the processes that need locking. Defaults -# to environment variable OSLO_LOCK_PATH. If external locks are used, a lock -# path must be set. (string value) -# Deprecated group/name - [DEFAULT]/lock_path -#lock_path = - - -[oslo_policy] - -# -# From oslo.policy -# - -# The file that defines policies. (string value) -# Deprecated group/name - [DEFAULT]/policy_file -#policy_file = policy.json - -# Default rule. Enforced when a requested rule is not found. (string value) -# Deprecated group/name - [DEFAULT]/policy_default_rule -#policy_default_rule = default - -# Directories where policy configuration files are stored. They can be relative -# to any directory in the search path defined by the config_dir option, or -# absolute paths. The file defined by policy_file must exist for these -# directories to be searched. Missing or empty directories are ignored. (multi -# valued) -# Deprecated group/name - [DEFAULT]/policy_dirs -#policy_dirs = policy.d diff --git a/etc/glance-swift.conf.sample b/etc/glance-swift.conf.sample deleted file mode 100644 index 9caf8d04..00000000 --- a/etc/glance-swift.conf.sample +++ /dev/null @@ -1,25 +0,0 @@ -# glance-swift.conf.sample -# -# This file is an example config file when -# multiple swift accounts/backing stores are enabled. -# -# Specify the reference name in [] -# For each section, specify the auth_address, user and key. -# -# WARNING: -# * If any of auth_address, user or key is not specified, -# the glance-api's swift store will fail to configure - -[ref1] -user = tenant:user1 -key = key1 -auth_version = 2 -auth_address = http://localhost:5000/v2.0 - -[ref2] -user = project_name:user_name2 -key = key2 -user_domain_id = default -project_domain_id = default -auth_version = 3 -auth_address = http://localhost:5000/v3 diff --git a/etc/metadefs/README b/etc/metadefs/README deleted file mode 100644 index 39d25b30..00000000 --- a/etc/metadefs/README +++ /dev/null @@ -1,4 +0,0 @@ -This directory contains predefined namespaces for Glance Metadata Definitions -Catalog. Files from this directory can be loaded into the database using -db_load_metadefs command for glance-manage. Similarly you can unload the -definitions using db_unload_metadefs command. diff --git a/etc/metadefs/cim-processor-allocation-setting-data.json b/etc/metadefs/cim-processor-allocation-setting-data.json deleted file mode 100644 index 26f7e0d1..00000000 --- a/etc/metadefs/cim-processor-allocation-setting-data.json +++ /dev/null @@ -1,154 +0,0 @@ -{ - "namespace": "CIM::ProcessorAllocationSettingData", - "display_name": "CIM Processor Allocation Setting", - "description": "Properties related to the resource allocation settings of a processor (CPU) from Common Information Model (CIM) schema (http://www.dmtf.org/standards/cim). These are properties that identify processor setting data and may be specified to volume, image, host aggregate, flavor and Nova server as scheduler hint. For each property details, please refer to http://schemas.dmtf.org/wbem/cim-html/2/CIM_ProcessorAllocationSettingData.html.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Cinder::Volume", - "prefix": "CIM_PASD_", - "properties_target": "image" - }, - { - "name": "OS::Glance::Image", - "prefix": "CIM_PASD_" - }, - { - "name": "OS::Nova::Aggregate", - "prefix": "CIM_PASD_" - }, - { - "name": "OS::Nova::Flavor", - "prefix": "CIM_PASD_" - }, - { - "name": "OS::Nova::Server", - "properties_target": "scheduler_hint" - } - ], - "properties": { - "InstructionSet": { - "title": "Instruction Set", - "description": "Identifies the instruction set of the processor within a processor architecture.", - "operators": [""], - "type": "string", - "enum": [ - "x86:i386", - "x86:i486", - "x86:i586", - "x86:i686", - "x86:64", - "IA-64:IA-64", - "AS/400:TIMI", - "Power:Power_2.03", - "Power:Power_2.04", - "Power:Power_2.05", - "Power:Power_2.06", - "S/390:ESA/390", - "S/390:z/Architecture", - "S/390:z/Architecture_2", - "PA-RISC:PA-RISC_1.0", - "PA-RISC:PA-RISC_2.0", - "ARM:A32", - "ARM:A64", - "MIPS:MIPS_I", - "MIPS:MIPS_II", - "MIPS:MIPS_III", - "MIPS:MIPS_IV", - "MIPS:MIPS_V", - "MIPS:MIPS32", - "MIPS64:MIPS64", - "Alpha:Alpha", - "SPARC:SPARC_V7", - "SPARC:SPARC_V8", - "SPARC:SPARC_V9", - "SPARC:SPARC_JPS1", - "SPARC:UltraSPARC2005", - "SPARC:UltraSPARC2007", - "68k:68000", - "68k:68010", - "68k:68020", - "68k:68030", - "68k:68040", - "68k:68060" - ] - }, - "ProcessorArchitecture": { - "title": "Processor Architecture", - "description": "Identifies the processor architecture of the processor.", - "operators": [""], - "type": "string", - "enum": [ - "x86", - "IA-64", - "AS/400", - "Power", - "S/390", - "PA-RISC", - "ARM", - "MIPS", - "Alpha", - "SPARC", - "68k" - ] - }, - "InstructionSetExtensionName": { - "title": "Instruction Set Extension", - "description": "Identifies the instruction set extensions of the processor within a processor architecture.", - "operators": ["", ""], - "type": "array", - "items": { - "type": "string", - "enum": [ - "x86:3DNow", - "x86:3DNowExt", - "x86:ABM", - "x86:AES", - "x86:AVX", - "x86:AVX2", - "x86:BMI", - "x86:CX16", - "x86:F16C", - "x86:FSGSBASE", - "x86:LWP", - "x86:MMX", - "x86:PCLMUL", - "x86:RDRND", - "x86:SSE2", - "x86:SSE3", - "x86:SSSE3", - "x86:SSE4A", - "x86:SSE41", - "x86:SSE42", - "x86:FMA3", - "x86:FMA4", - "x86:XOP", - "x86:TBM", - "x86:VT-d", - "x86:VT-x", - "x86:EPT", - "x86:SVM", - "PA-RISC:MAX", - "PA-RISC:MAX2", - "ARM:DSP", - "ARM:Jazelle-DBX", - "ARM:Thumb", - "ARM:Thumb-2", - "ARM:ThumbEE)", - "ARM:VFP", - "ARM:NEON", - "ARM:TrustZone", - "MIPS:MDMX", - "MIPS:MIPS-3D", - "Alpha:BWX", - "Alpha:FIX", - "Alpha:CIX", - "Alpha:MVI" - ] - } - } - }, - "objects": [] -} - diff --git a/etc/metadefs/cim-resource-allocation-setting-data.json b/etc/metadefs/cim-resource-allocation-setting-data.json deleted file mode 100644 index af800eab..00000000 --- a/etc/metadefs/cim-resource-allocation-setting-data.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "namespace": "CIM::ResourceAllocationSettingData", - "display_name": "CIM Resource Allocation Setting Data", - "description": "Properties from Common Information Model (CIM) schema (http://www.dmtf.org/standards/cim) that represent settings specifically related to an allocated resource that are outside the scope of the CIM class typically used to represent the resource itself. These properties may be specified to volume, host aggregate and flavor. For each property details, please refer to http://schemas.dmtf.org/wbem/cim-html/2/CIM_ResourceAllocationSettingData.html.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Cinder::Volume", - "prefix": "CIM_RASD_", - "properties_target": "image" - }, - { - "name": "OS::Nova::Aggregate", - "prefix": "CIM_RASD_" - }, - { - "name": "OS::Nova::Flavor", - "prefix": "CIM_RASD_" - } - ], - "properties": { - "Address": { - "title": "Address", - "description": "The address of the resource.", - "type": "string" - }, - "AddressOnParent": { - "title": "Address On Parent", - "description": "Describes the address of this resource in the context of the Parent.", - "type": "string" - }, - "AllocationUnits": { - "title": "Allocation Units", - "description": "This property specifies the units of allocation used by the Reservation and Limit properties.", - "type": "string" - }, - "AutomaticAllocation": { - "title": "Automatic Allocation", - "description": "This property specifies if the resource will be automatically allocated.", - "type": "boolean" - }, - "AutomaticDeallocation": { - "title": "Automatic Deallocation", - "description": "This property specifies if the resource will be automatically de-allocated.", - "type": "boolean" - }, - "ConsumerVisibility": { - "title": "Consumer Visibility", - "description": "Describes the consumers visibility to the allocated resource.", - "operators": [""], - "type": "string", - "enum": [ - "Unknown", - "Passed-Through", - "Virtualized", - "Not represented", - "DMTF reserved", - "Vendor Reserved" - ] - }, - "Limit": { - "title": "Limit", - "description": "This property specifies the upper bound, or maximum amount of resource that will be granted for this allocation.", - "type": "string" - }, - "MappingBehavior": { - "title": "Mapping Behavior", - "description": "Specifies how this resource maps to underlying resources. If the HostResource array contains any entries, this property reflects how the resource maps to those specific resources.", - "operators": [""], - "type": "string", - "enum": [ - "Unknown", - "Not Supported", - "Dedicated", - "Soft Affinity", - "Hard Affinity", - "DMTF Reserved", - "Vendor Reserved" - ] - }, - "OtherResourceType": { - "title": "Other Resource Type", - "description": "A string that describes the resource type when a well defined value is not available and ResourceType has the value 'Other'.", - "type": "string" - }, - "Parent": { - "title": "Parent", - "description": "The Parent of the resource.", - "type": "string" - }, - "PoolID": { - "title": "Pool ID", - "description": "This property specifies which ResourcePool the resource is currently allocated from, or which ResourcePool the resource will be allocated from when the allocation occurs.", - "type": "string" - }, - "Reservation": { - "title": "Reservation", - "description": "This property specifies the amount of resource guaranteed to be available for this allocation.", - "type": "string" - }, - "ResourceSubType": { - "title": "Resource Sub Type", - "description": "A string describing an implementation specific sub-type for this resource.", - "type": "string" - }, - "ResourceType": { - "title": "Resource Type", - "description": "The type of resource this allocation setting represents.", - "operators": [""], - "type": "string", - "enum": [ - "Other", - "Computer System", - "Processor", - "Memory", - "IDE Controller", - "Parallel SCSI HBA", - "FC HBA", - "iSCSI HBA", - "IB HCA", - "Ethernet Adapter", - "Other Network Adapter", - "I/O Slot", - "I/O Device", - "Floppy Drive", - "CD Drive", - "DVD drive", - "Disk Drive", - "Tape Drive", - "Storage Extent", - "Other storage device", - "Serial port", - "Parallel port", - "USB Controller", - "Graphics controller", - "IEEE 1394 Controller", - "Partitionable Unit", - "Base Partitionable Unit", - "Power", - "Cooling Capacity", - "Ethernet Switch Port", - "Logical Disk", - "Storage Volume", - "Ethernet Connection", - "DMTF reserved", - "Vendor Reserved" - ] - }, - "VirtualQuantity": { - "title": "Virtual Quantity", - "description": "This property specifies the quantity of resources presented to the consumer.", - "type": "string" - }, - "VirtualQuantityUnits": { - "title": "Virtual Quantity Units", - "description": "This property specifies the units used by the VirtualQuantity property.", - "type": "string" - }, - "Weight": { - "title": "Weight", - "description": "This property specifies a relative priority for this allocation in relation to other allocations from the same ResourcePool.", - "type": "string" - }, - "Connection": { - "title": "Connection", - "description": "The thing to which this resource is connected.", - "type": "string" - }, - "HostResource": { - "title": "Host Resource", - "description": "This property exposes specific assignment of resources.", - "type": "string" - } - }, - "objects": [] -} - diff --git a/etc/metadefs/cim-storage-allocation-setting-data.json b/etc/metadefs/cim-storage-allocation-setting-data.json deleted file mode 100644 index 3c9e68e0..00000000 --- a/etc/metadefs/cim-storage-allocation-setting-data.json +++ /dev/null @@ -1,122 +0,0 @@ -{ - "namespace": "CIM::StorageAllocationSettingData", - "display_name": "CIM Storage Allocation Setting Data", - "description": "Properties related to the allocation of virtual storage from Common Information Model (CIM) schema (http://www.dmtf.org/standards/cim). These properties may be specified to volume, host aggregate and flavor. For each property details, please refer to http://schemas.dmtf.org/wbem/cim-html/2/CIM_StorageAllocationSettingData.html.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Cinder::Volume", - "prefix": "CIM_SASD_" - }, - { - "name": "OS::Nova::Aggregate", - "prefix": "CIM_SASD_" - }, - { - "name": "OS::Nova::Flavor", - "prefix": "CIM_SASD_" - } - ], - "properties": { - "Access": { - "title": "Access", - "description": "Access describes whether the allocated storage extent is 1 (readable), 2 (writeable), or 3 (both).", - "operators": [""], - "type": "string", - "enum": [ - "Unknown", - "Readable", - "Writeable", - "Read/Write Supported", - "DMTF Reserved" - ] - }, - "HostExtentName": { - "title": "Host Extent Name", - "description": "A unique identifier for the host extent.", - "type": "string" - }, - "HostExtentNameFormat": { - "title": "Host Extent Name Format", - "description": "The HostExtentNameFormat property identifies the format that is used for the value of the HostExtentName property.", - "operators": [""], - "type": "string", - "enum": [ - "Unknown", - "Other", - "SNVM", - "NAA", - "EUI64", - "T10VID", - "OS Device Name", - "DMTF Reserved" - ] - }, - "HostExtentNameNamespace": { - "title": "Host Extent Name Namespace", - "description": "If the host extent is a SCSI volume, then the preferred source for SCSI volume names is SCSI VPD Page 83 responses.", - "operators": [""], - "type": "string", - "enum": [ - "Unknown", - "Other", - "VPD83Type3", - "VPD83Type2", - "VPD83Type1", - "VPD80", - "NodeWWN", - "SNVM", - "OS Device Namespace", - "DMTF Reserved" - ] - }, - "HostExtentStartingAddress": { - "title": "Host Extent Starting Address", - "description": "The HostExtentStartingAddress property identifies the starting address on the host storage extent identified by the value of the HostExtentName property that is used for the allocation of the virtual storage extent.", - "type": "string" - }, - "HostResourceBlockSize": { - "title": "Host Resource Block Size", - "description": "Size in bytes of the blocks that are allocated at the host as the result of this storage resource allocation or storage resource allocation request.", - "type": "string" - }, - "Limit": { - "title": "Limit", - "description": "The maximum amount of blocks that will be granted for this storage resource allocation at the host.", - "type": "string" - }, - "OtherHostExtentNameFormat": { - "title": "Other Host Extent Name Format", - "description": "A string describing the format of the HostExtentName property if the value of the HostExtentNameFormat property is 1 (Other).", - "type": "string" - }, - "OtherHostExtentNameNamespace": { - "title": "Other Host Extent Name Namespace", - "description": "A string describing the namespace of the HostExtentName property if the value of the HostExtentNameNamespace matches 1 (Other).", - "type": "string" - }, - "Reservation": { - "title": "Reservation", - "description": "The amount of blocks that are guaranteed to be available for this storage resource allocation at the host.", - "type": "string" - }, - "VirtualQuantity": { - "title": "Virtual Quantity", - "description": "Number of blocks that are presented to the consumer.", - "type": "string" - }, - "VirtualQuantityUnits": { - "title": "Virtual Quantity Units", - "description": "This property specifies the units used by the VirtualQuantity property.", - "type": "string" - }, - "VirtualResourceBlockSize": { - "title": "Virtual Resource Block Size", - "description": "Size in bytes of the blocks that are presented to the consumer as the result of this storage resource allocation or storage resource allocation request.", - "type": "string" - } - }, - "objects": [] -} - diff --git a/etc/metadefs/cim-virtual-system-setting-data.json b/etc/metadefs/cim-virtual-system-setting-data.json deleted file mode 100644 index bf04e2e4..00000000 --- a/etc/metadefs/cim-virtual-system-setting-data.json +++ /dev/null @@ -1,127 +0,0 @@ -{ - "namespace": "CIM::VirtualSystemSettingData", - "display_name": "CIM Virtual System Setting Data", - "description": "A set of virtualization specific properties from Common Information Model (CIM) schema (http://www.dmtf.org/standards/cim), which define the virtual aspects of a virtual system. These properties may be specified to host aggregate and flavor. For each property details, please refer to http://schemas.dmtf.org/wbem/cim-html/2/CIM_VirtualSystemSettingData.html.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Nova::Aggregate", - "prefix": "CIM_VSSD_" - }, - { - "name": "OS::Nova::Flavor", - "prefix": "CIM_VSSD_" - } - ], - "properties": { - "AutomaticRecoveryAction": { - "title": "Automatic Recovery Action", - "description": "Action to take for the virtual system when the software executed by the virtual system fails.", - "operators": [""], - "type": "string", - "enum": [ - "None", - "Restart", - "Revert to snapshot", - "DMTF Reserved" - ] - }, - "AutomaticShutdownAction": { - "title": "Automatic Shutdown Action", - "description": "Action to take for the virtual system when the host is shut down.", - "operators": [""], - "type": "string", - "enum": [ - "Turn Off", - "Save state", - "Shutdown", - "DMTF Reserved" - ] - }, - "AutomaticStartupAction": { - "title": "Automatic Startup Action", - "description": "Action to take for the virtual system when the host is started.", - "operators": [""], - "type": "string", - "enum": [ - "None", - "Restart if previously active", - "Always startup", - "DMTF Reserved" - ] - }, - "AutomaticStartupActionDelay": { - "title": "Automatic Startup Action Delay", - "description": "Delay applicable to startup action.", - "type": "string" - }, - "AutomaticStartupActionSequenceNumber": { - "title": "Automatic Startup Action Sequence Number", - "description": "Number indicating the relative sequence of virtual system activation when the host system is started.", - "type": "string" - }, - "ConfigurationDataRoot": { - "title": "Configuration Data Root", - "description": "Filepath of a directory where information about the virtual system configuration is stored.", - "type": "string" - }, - "ConfigurationFile": { - "title": "Configuration File", - "description": "Filepath of a file where information about the virtual system configuration is stored.", - "type": "string" - }, - "ConfigurationID": { - "title": "Configuration ID", - "description": "Unique id of the virtual system configuration.", - "type": "string" - }, - "CreationTime": { - "title": "Creation Time", - "description": "Time when the virtual system configuration was created.", - "type": "string" - }, - "LogDataRoot": { - "title": "Log Data Root", - "description": "Filepath of a directory where log information about the virtual system is stored.", - "type": "string" - }, - "RecoveryFile": { - "title": "Recovery File", - "description": "Filepath of a file where recovery relateded information of the virtual system is stored.", - "type": "string" - }, - "SnapshotDataRoot": { - "title": "Snapshot Data Root", - "description": "Filepath of a directory where information about virtual system snapshots is stored.", - "type": "string" - }, - "SuspendDataRoot": { - "title": "Suspend Data Root", - "description": "Filepath of a directory where suspend related information about the virtual system is stored.", - "type": "string" - }, - "SwapFileDataRoot": { - "title": "Swap File Data Root", - "description": "Filepath of a directory where swapfiles of the virtual system are stored.", - "type": "string" - }, - "VirtualSystemIdentifier": { - "title": "Virtual System Identifier", - "description": "VirtualSystemIdentifier shall reflect a unique name for the system as it is used within the virtualization platform.", - "type": "string" - }, - "VirtualSystemType": { - "title": "Virtual System Type", - "description": "VirtualSystemType shall reflect a particular type of virtual system.", - "type": "string" - }, - "Notes": { - "title": "Notes", - "description": "End-user supplied notes that are related to the virtual system.", - "type": "string" - } - }, - "objects": [] -} - diff --git a/etc/metadefs/compute-aggr-disk-filter.json b/etc/metadefs/compute-aggr-disk-filter.json deleted file mode 100644 index 3a1037c9..00000000 --- a/etc/metadefs/compute-aggr-disk-filter.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "namespace": "OS::Compute::AggregateDiskFilter", - "display_name": "Disk Allocation per Host", - "description": "Properties related to the Nova scheduler filter AggregateDiskFilter. Filters aggregate hosts based on the available disk space compared to the requested disk space. Hosts in the aggregate with not enough usable disk will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Nova::Aggregate" - } - ], - "properties": { - "disk_allocation_ratio": { - "title": "Disk Subscription Ratio", - "description": "Allows the host to be under and over subscribed for the amount of disk space requested for an instance. A ratio greater than 1.0 allows for over subscription (hosts may have less usable disk space than requested). A ratio less than 1.0 allows for under subscription.", - "type": "number", - "readonly": false - } - }, - "objects": [] -} diff --git a/etc/metadefs/compute-aggr-iops-filter.json b/etc/metadefs/compute-aggr-iops-filter.json deleted file mode 100644 index 31047835..00000000 --- a/etc/metadefs/compute-aggr-iops-filter.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "namespace": "OS::Compute::AggregateIoOpsFilter", - "display_name": "IO Ops per Host", - "description": "Properties related to the Nova scheduler filter AggregateIoOpsFilter. Filters aggregate hosts based on the number of instances currently changing state. Hosts in the aggregate with too many instances changing state will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Nova::Aggregate" - } - ], - "properties": { - "max_io_ops_per_host": { - "title": "Maximum IO Operations per Host", - "description": "Prevents hosts in the aggregate that have this many or more instances currently in build, resize, snapshot, migrate, rescue or unshelve to be scheduled for new instances.", - "type": "integer", - "readonly": false, - "default": 8, - "minimum": 1 - } - }, - "objects": [] -} diff --git a/etc/metadefs/compute-aggr-num-instances.json b/etc/metadefs/compute-aggr-num-instances.json deleted file mode 100644 index 3c9f678d..00000000 --- a/etc/metadefs/compute-aggr-num-instances.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "namespace": "OS::Compute::AggregateNumInstancesFilter", - "display_name": "Instances per Host", - "description": "Properties related to the Nova scheduler filter AggregateNumInstancesFilter. Filters aggregate hosts by the number of running instances on it. Hosts in the aggregate with too many instances will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", "visibility": "public", - "protected": false, - "resource_type_associations": [ - { - "name": "OS::Nova::Aggregate" - } - ], - "properties": { - "max_instances_per_host": { - "title": "Max Instances Per Host", - "description": "Maximum number of instances allowed to run on a host in the aggregate.", - "type": "integer", - "readonly": false, - "minimum": 0 - } - }, - "objects": [] -} diff --git a/etc/metadefs/compute-cpu-pinning.json b/etc/metadefs/compute-cpu-pinning.json deleted file mode 100644 index 66ff8252..00000000 --- a/etc/metadefs/compute-cpu-pinning.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "namespace": "OS::Compute::CPUPinning", - "display_name": "CPU Pinning", - "description": "This provides the preferred CPU pinning and CPU thread pinning policy to be used when pinning vCPU of the guest to pCPU of the host. See http://docs.openstack.org/admin-guide/compute-numa-cpu-pinning.html", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image", - "prefix": "hw_" - }, - { - "name": "OS::Cinder::Volume", - "prefix": "hw_", - "properties_target": "image" - }, - { - "name": "OS::Nova::Flavor", - "prefix": "hw:" - } - ], - "properties": { - "cpu_policy": { - "title": "CPU Pinning policy", - "description": "Type of CPU pinning policy.", - "type": "string", - "enum": [ - "shared", - "dedicated" - ] - }, - "cpu_thread_policy": { - "title": "CPU Thread Pinning Policy.", - "description": "Type of CPU thread pinning policy.", - "type": "string", - "enum": [ - "isolate", - "prefer", - "require" - ] - } - } -} diff --git a/etc/metadefs/compute-guest-memory-backing.json b/etc/metadefs/compute-guest-memory-backing.json deleted file mode 100644 index f0e4e682..00000000 --- a/etc/metadefs/compute-guest-memory-backing.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "namespace": "OS::Compute::GuestMemoryBacking", - "display_name": "Guest Memory Backing", - "description": "This provides the preferred backing option for guest RAM. Guest's memory can be backed by hugepages to limit TLB lookups. See also: https://wiki.openstack.org/wiki/VirtDriverGuestCPUMemoryPlacement", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Nova::Flavor", - "prefix": "hw:" - }, - { - "name": "OS::Glance::Image", - "prefix": "hw_" - }, - { - "name": "OS::Cinder::Volume", - "prefix": "hw_", - "properties_target": "image" - } - ], - "properties": { - "mem_page_size": { - "title": "Size of memory page", - "description": "Page size to be used for Guest memory backing. Value can be specified as (i.e.: 2MB, 1GB) or 'any', 'small', 'large'. If this property is set in Image metadata then only 'any' and 'large' values are accepted in Flavor metadata by Nova API.", - "type": "string" - } - } -} \ No newline at end of file diff --git a/etc/metadefs/compute-guest-shutdown.json b/etc/metadefs/compute-guest-shutdown.json deleted file mode 100644 index 354ee638..00000000 --- a/etc/metadefs/compute-guest-shutdown.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "namespace": "OS::Compute::GuestShutdownBehavior", - "display_name": "Shutdown Behavior", - "description": "These properties allow modifying the shutdown behavior for stop, rescue, resize, and shelve operations.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - } - ], - "properties": { - "os_shutdown_timeout": { - "title": "Shutdown timeout", - "description": "By default, guests will be given 60 seconds to perform a graceful shutdown. After that, the VM is powered off. This property allows overriding the amount of time (unit: seconds) to allow a guest OS to cleanly shut down before power off. A value of 0 (zero) means the guest will be powered off immediately with no opportunity for guest OS clean-up.", - "type": "integer", - "minimum": 0 - } - }, - "objects": [] -} diff --git a/etc/metadefs/compute-host-capabilities.json b/etc/metadefs/compute-host-capabilities.json deleted file mode 100644 index 6e7b768d..00000000 --- a/etc/metadefs/compute-host-capabilities.json +++ /dev/null @@ -1,293 +0,0 @@ -{ - "namespace": "OS::Compute::HostCapabilities", - "display_name": "Compute Host Capabilities", - "description": "Hardware capabilities provided by the compute host. This provides the ability to fine tune the hardware specification required when an instance is requested. The ComputeCapabilitiesFilter should be enabled in the Nova scheduler to use these properties. When enabled, this filter checks that the capabilities provided by the compute host satisfy any extra specifications requested. Only hosts that can provide the requested capabilities will be eligible for hosting the instance.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Nova::Flavor", - "prefix": "capabilities:" - }, - { - "name": "OS::Nova::Aggregate", - "prefix": "aggregate_instance_extra_specs:" - } - ], - "properties": { - "cpu_info:vendor": { - "title": "Vendor", - "description": "Specifies the CPU manufacturer.", - "operators": [""], - "type": "string", - "enum": [ - "Intel", - "AMD" - ] - }, - "cpu_info:model": { - "title": "Model", - "description": "Specifies the CPU model. Use this property to ensure that your vm runs on a a specific cpu model.", - "operators": [""], - "type": "string", - "enum": [ - "Conroe", - "Core2Duo", - "Penryn", - "Nehalem", - "Westmere", - "SandyBridge", - "IvyBridge", - "Haswell", - "Broadwell", - "Delhi", - "Seoul", - "Abu Dhabi", - "Interlagos", - "Kabini", - "Valencia", - "Zurich", - "Budapest", - "Barcelona", - "Suzuka", - "Shanghai", - "Istanbul", - "Lisbon", - "Magny-Cours", - "Valencia", - "Cortex-A57", - "Cortex-A53", - "Cortex-A12", - "Cortex-A17", - "Cortex-A15", - "Coretx-A7", - "X-Gene" - ] - }, - "cpu_info:arch": { - "title": "Architecture", - "description": "Specifies the CPU architecture. Use this property to specify the architecture supported by the hypervisor.", - "operators": [""], - "type": "string", - "enum": [ - "x86", - "x86_64", - "i686", - "ia64", - "ARMv8-A", - "ARMv7-A" - ] - }, - "cpu_info:topology:cores": { - "title": "cores", - "description": "Number of cores.", - "type": "integer", - "readonly": false, - "default": 1 - }, - "cpu_info:topology:threads": { - "title": "threads", - "description": "Number of threads.", - "type": "integer", - "readonly": false, - "default": 1 - }, - "cpu_info:topology:sockets": { - "title": "sockets", - "description": "Number of sockets.", - "type": "integer", - "readonly": false, - "default": 1 - }, - "cpu_info:features": { - "title": "Features", - "description": "Specifies CPU flags/features. Using this property you can specify the required set of instructions supported by a vm.", - "operators": ["", ""], - "type": "array", - "items": { - "type": "string", - "enum": [ - "fpu", - "vme", - "de", - "pse", - "tsc", - "msr", - "pae", - "mce", - "cx8", - "apic", - "sep", - "mtrr", - "pge", - "mca", - "cmov", - "pat", - "pse36", - "pn", - "clflush", - "dts", - "acpi", - "mmx", - "fxsr", - "sse", - "sse2", - "ss", - "ht", - "tm", - "ia64", - "pbe", - "syscall", - "mp", - "nx", - "mmxext", - "fxsr_opt", - "pdpe1gb", - "rdtscp", - "lm", - "3dnowext", - "3dnow", - "arch_perfmon", - "pebs", - "bts", - "rep_good", - "nopl", - "xtopology", - "tsc_reliable", - "nonstop_tsc", - "extd_apicid", - "amd_dcm", - "aperfmperf", - "eagerfpu", - "nonstop_tsc_s3", - "pni", - "pclmulqdq", - "dtes64", - "monitor", - "ds_cpl", - "vmx", - "smx", - "est", - "tm2", - "ssse3", - "cid", - "fma", - "cx16", - "xtpr", - "pdcm", - "pcid", - "dca", - "sse4_1", - "sse4_2", - "x2apic", - "movbe", - "popcnt", - "tsc_deadline_timer", - "aes", - "xsave", - "avx", - "f16c", - "rdrand", - "hypervisor", - "rng", - "rng_en", - "ace", - "ace_en", - "ace2", - "ace2_en", - "phe", - "phe_en", - "pmm", - "pmm_en", - "lahf_lm", - "cmp_legacy", - "svm", - "extapic", - "cr8_legacy", - "abm", - "sse4a", - "misalignsse", - "3dnowprefetch", - "osvw", - "ibs", - "xop", - "skinit", - "wdt", - "lwp", - "fma4", - "tce", - "nodeid_msr", - "tbm", - "topoext", - "perfctr_core", - "perfctr_nb", - "bpext", - "perfctr_l2", - "mwaitx", - "ida", - "arat", - "cpb", - "epb", - "pln", - "pts", - "dtherm", - "hw_pstate", - "proc_feedback", - "hwp", - "hwp_notify", - "hwp_act_window", - "hwp_epp", - "hwp_pkg_req", - "intel_pt", - "tpr_shadow", - "vnmi", - "flexpriority", - "ept", - "vpid", - "npt", - "lbrv", - "svm_lock", - "nrip_save", - "tsc_scale", - "vmcb_clean", - "flushbyasid", - "decodeassists", - "pausefilter", - "pfthreshold", - "vmmcall", - "fsgsbase", - "tsc_adjust", - "bmi1", - "hle", - "avx2", - "smep", - "bmi2", - "erms", - "invpcid", - "rtm", - "cqm", - "mpx", - "avx512f", - "rdseed", - "adx", - "smap", - "pcommit", - "clflushopt", - "clwb", - "avx512pf", - "avx512er", - "avx512cd", - "sha_ni", - "xsaveopt", - "xsavec", - "xgetbv1", - "xsaves", - "cqm_llc", - "cqm_occup_llc", - "clzero" - ] - } - } - }, - "objects": [] -} diff --git a/etc/metadefs/compute-hypervisor.json b/etc/metadefs/compute-hypervisor.json deleted file mode 100644 index 29169dc9..00000000 --- a/etc/metadefs/compute-hypervisor.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "namespace": "OS::Compute::Hypervisor", - "display_name": "Hypervisor Selection", - "description": "OpenStack Compute supports many hypervisors, although most installations use only one hypervisor. For installations with multiple supported hypervisors, you can schedule different hypervisors using the ImagePropertiesFilter. This filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - } - ], - "properties": { - "hypervisor_type": { - "title": "Hypervisor Type", - "description": "Hypervisor type required by the image. Used with the ImagePropertiesFilter. \n\n KVM - Kernel-based Virtual Machine. LXC - Linux Containers (through libvirt). QEMU - Quick EMUlator. UML - User Mode Linux. hyperv - Microsoft® hyperv. vmware - VMware® vsphere. Baremetal - physical provisioning. VZ - Virtuozzo OS Containers and Virtual Machines (through libvirt). For more information, see: http://docs.openstack.org/trunk/config-reference/content/section_compute-hypervisors.html", - "type": "string", - "enum": [ - "baremetal", - "hyperv", - "kvm", - "lxc", - "qemu", - "uml", - "vmware", - "vz", - "xen" - ] - }, - "vm_mode": { - "title": "VM Mode", - "description": "The virtual machine mode. This represents the host/guest ABI (application binary interface) used for the virtual machine. Used with the ImagePropertiesFilter. \n\n hvm — Fully virtualized - This is the virtual machine mode (vm_mode) used by QEMU and KVM. \n\n xen - Xen 3.0 paravirtualized. \n\n uml — User Mode Linux paravirtualized. \n\n exe — Executables in containers. This is the mode used by LXC.", - "type": "string", - "enum": [ - "hvm", - "xen", - "uml", - "exe" - ] - } - }, - "objects": [] -} diff --git a/etc/metadefs/compute-instance-data.json b/etc/metadefs/compute-instance-data.json deleted file mode 100644 index a419c798..00000000 --- a/etc/metadefs/compute-instance-data.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "namespace": "OS::Compute::InstanceData", - "display_name": "Instance Config Data", - "description": "Instances can perform self-configuration based on data made available to the running instance. These properties affect instance configuration.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - }, - { - "name": "OS::Cinder::Volume", - "properties_target": "image" - } - ], - "properties": { - "img_config_drive": { - "title": "Config Drive", - "description": "This property specifies whether or not Nova should use a config drive when booting the image. Mandatory means that Nova will always use a config drive when booting the image. OpenStack can be configured to write metadata to a special configuration drive that will be attached to the instance when it boots. The instance can retrieve any information from the config drive. One use case for the config drive is to pass network configuration information to the instance. See also: http://docs.openstack.org/user-guide/cli_config_drive.html", - "type": "string", - "enum": [ - "optional", - "mandatory" - ] - }, - "os_require_quiesce": { - "title": "Require Quiescent File system", - "description": "This property specifies whether or not the filesystem must be quiesced during snapshot processing. For volume backed and image backed snapshots, yes means that snapshotting is aborted when quiescing fails, whereas, no means quiescing will be skipped and snapshot processing will continue after the quiesce failure.", - "type": "string", - "enum": [ - "yes", - "no" - ] - } - } -} diff --git a/etc/metadefs/compute-libvirt-image.json b/etc/metadefs/compute-libvirt-image.json deleted file mode 100644 index d418656a..00000000 --- a/etc/metadefs/compute-libvirt-image.json +++ /dev/null @@ -1,101 +0,0 @@ -{ - "namespace": "OS::Compute::LibvirtImage", - "display_name": "libvirt Driver Options for Images", - "description": "The libvirt Compute Driver Options for Glance Images. \n\nThese are properties specific to compute drivers. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - } - ], - "properties": { - "hw_disk_bus": { - "title": "Disk Bus", - "description": "Specifies the type of disk controller to attach disk devices to.", - "type": "string", - "enum": [ - "scsi", - "virtio", - "uml", - "xen", - "ide", - "usb" - ] - }, - "hw_rng_model": { - "title": "Random Number Generator Device", - "description": "Adds a random-number generator device to the image's instances. The cloud administrator can enable and control device behavior by configuring the instance's flavor. By default: The generator device is disabled. /dev/random is used as the default entropy source. To specify a physical HW RNG device, use the following option in the nova.conf file: rng_dev_path=/dev/hwrng", - "type": "string", - "default": "virtio" - }, - "hw_machine_type": { - "title": "Machine Type", - "description": "Enables booting an ARM system using the specified machine type. By default, if an ARM image is used and its type is not specified, Compute uses vexpress-a15 (for ARMv7) or virt (for AArch64) machine types. Valid types can be viewed by using the virsh capabilities command (machine types are displayed in the machine tag).", - "type": "string" - }, - "hw_scsi_model": { - "title": "SCSI Model", - "description": "Enables the use of VirtIO SCSI (virtio-scsi) to provide block device access for compute instances; by default, instances use VirtIO Block (virtio-blk). VirtIO SCSI is a para-virtualized SCSI controller device that provides improved scalability and performance, and supports advanced SCSI hardware.", - "type": "string", - "default": "virtio-scsi" - }, - "hw_video_model": { - "title": "Video Model", - "description": "The video image driver used.", - "type": "string", - "enum": [ - "vga", - "cirrus", - "vmvga", - "xen", - "qxl" - ] - }, - "hw_video_ram": { - "title": "Max Video Ram", - "description": "Maximum RAM (unit: MB) for the video image. Used only if a hw_video:ram_max_mb value has been set in the flavor's extra_specs and that value is higher than the value set in hw_video_ram.", - "type": "integer", - "minimum": 0 - }, - "os_command_line": { - "title": "Kernel Command Line", - "description": "The kernel command line to be used by the libvirt driver, instead of the default. For linux containers (LXC), the value is used as arguments for initialization. This key is valid only for Amazon kernel, ramdisk, or machine images (aki, ari, or ami).", - "type": "string" - }, - "hw_vif_model": { - "title": "Virtual Network Interface", - "description": "Specifies the model of virtual network interface device to use. The valid options depend on the hypervisor configuration. libvirt driver options: KVM and QEMU: e1000, ne2k_pci, pcnet, rtl8139, spapr-vlan, and virtio. Xen: e1000, netfront, ne2k_pci, pcnet, and rtl8139.", - "type": "string", - "enum": [ - "e1000", - "e1000e", - "ne2k_pci", - "netfront", - "pcnet", - "rtl8139", - "spapr-vlan", - "virtio" - ] - }, - "hw_qemu_guest_agent": { - "title": "QEMU Guest Agent", - "description": "This is a background process which helps management applications execute guest OS level commands. For example, freezing and thawing filesystems, entering suspend. However, guest agent (GA) is not bullet proof, and hostile guest OS can send spurious replies.", - "type": "string", - "enum": ["yes", "no"] - }, - "hw_pointer_model": { - "title": "Pointer Model", - "description": "Input devices allow interaction with a graphical framebuffer. For example to provide a graphic tablet for absolute cursor movement. Currently only supported by the KVM/QEMU hypervisor configuration and VNC or SPICE consoles must be enabled.", - "type": "string", - "enum": ["usbtablet"] - }, - "img_hide_hypervisor_id": { - "title": "Hide hypervisor id", - "description": "Enables hiding the host hypervisor signature in the guest OS.", - "type": "string", - "enum": ["yes", "no"] - } - }, - "objects": [] -} diff --git a/etc/metadefs/compute-libvirt.json b/etc/metadefs/compute-libvirt.json deleted file mode 100644 index 08fd9929..00000000 --- a/etc/metadefs/compute-libvirt.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "namespace": "OS::Compute::Libvirt", - "display_name": "libvirt Driver Options", - "description": "The libvirt compute driver options. \n\nThese are properties that affect the libvirt compute driver and may be specified on flavors and images. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image", - "prefix": "hw_" - }, - { - "name": "OS::Nova::Flavor", - "prefix": "hw:" - } - ], - "properties": { - "serial_port_count": { - "title": "Serial Port Count", - "description": "Specifies the count of serial ports that should be provided. If hw:serial_port_count is not set in the flavor's extra_specs, then any count is permitted. If hw:serial_port_count is set, then this provides the default serial port count. It is permitted to override the default serial port count, but only with a lower value.", - "type": "integer", - "minimum": 0 - }, - "boot_menu": { - "title": "Boot Menu", - "description": "If true, enables the BIOS bootmenu. In cases where both the image metadata and Extra Spec are set, the Extra Spec setting is used. This allows for flexibility in setting/overriding the default behavior as needed.", - "type": "string", - "enum": ["true", "false"] - } - }, - "objects": [] -} diff --git a/etc/metadefs/compute-quota.json b/etc/metadefs/compute-quota.json deleted file mode 100644 index 86910574..00000000 --- a/etc/metadefs/compute-quota.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "namespace": "OS::Compute::Quota", - "display_name": "Flavor Quota", - "description": "Compute drivers may enable quotas on CPUs available to a VM, disk tuning, bandwidth I/O, and instance VIF traffic control. See: http://docs.openstack.org/admin-guide/compute-flavors.html", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Nova::Flavor" - } - ], - "objects": [ - { - "name": "CPU Limits", - "description": "You can configure the CPU limits with control parameters.", - "properties": { - "quota:cpu_shares": { - "title": "Quota: CPU Shares", - "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", - "type": "integer" - }, - "quota:cpu_period": { - "title": "Quota: CPU Period", - "description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.", - "type": "integer", - "minimum": 1000, - "maximum": 1000000 - }, - "quota:cpu_quota": { - "title": "Quota: CPU Quota", - "description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.", - "type": "integer" - } - } - }, - { - "name": "Disk QoS", - "description": "Using disk I/O quotas, you can set maximum disk write to 10 MB per second for a VM user.", - "properties": { - "quota:disk_read_bytes_sec": { - "title": "Quota: Disk read bytes / sec", - "description": "Sets disk I/O quota for disk read bytes / sec.", - "type": "integer" - }, - "quota:disk_read_iops_sec": { - "title": "Quota: Disk read IOPS / sec", - "description": "Sets disk I/O quota for disk read IOPS / sec.", - "type": "integer" - }, - "quota:disk_write_bytes_sec": { - "title": "Quota: Disk Write Bytes / sec", - "description": "Sets disk I/O quota for disk write bytes / sec.", - "type": "integer" - }, - "quota:disk_write_iops_sec": { - "title": "Quota: Disk Write IOPS / sec", - "description": "Sets disk I/O quota for disk write IOPS / sec.", - "type": "integer" - }, - "quota:disk_total_bytes_sec": { - "title": "Quota: Disk Total Bytes / sec", - "description": "Sets disk I/O quota for total disk bytes / sec.", - "type": "integer" - }, - "quota:disk_total_iops_sec": { - "title": "Quota: Disk Total IOPS / sec", - "description": "Sets disk I/O quota for disk total IOPS / sec.", - "type": "integer" - } - } - }, - { - "name": "Virtual Interface QoS", - "description": "Bandwidth QoS tuning for instance virtual interfaces (VIFs) may be specified with these properties. Incoming and outgoing traffic can be shaped independently. If not specified, no quality of service (QoS) is applied on that traffic direction. So, if you want to shape only the network's incoming traffic, use inbound only (and vice versa). The OpenStack Networking service abstracts the physical implementation of the network, allowing plugins to configure and manage physical resources. Virtual Interfaces (VIF) in the logical model are analogous to physical network interface cards (NICs). VIFs are typically owned a managed by an external service; for instance when OpenStack Networking is used for building OpenStack networks, VIFs would be created, owned, and managed in Nova. VIFs are connected to OpenStack Networking networks via ports. A port is analogous to a port on a network switch, and it has an administrative state. When a VIF is attached to a port the OpenStack Networking API creates an attachment object, which specifies the fact that a VIF with a given identifier is plugged into the port.", - "properties": { - "quota:vif_inbound_average": { - "title": "Quota: VIF Inbound Average", - "description": "Network Virtual Interface (VIF) inbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.", - "type": "integer" - }, - "quota:vif_inbound_burst": { - "title": "Quota: VIF Inbound Burst", - "description": "Network Virtual Interface (VIF) inbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.", - "type": "integer" - }, - "quota:vif_inbound_peak": { - "title": "Quota: VIF Inbound Peak", - "description": "Network Virtual Interface (VIF) inbound peak in kilobytes per second. Specifies maximum rate at which an interface can receive data.", - "type": "integer" - }, - "quota:vif_outbound_average": { - "title": "Quota: VIF Outbound Average", - "description": "Network Virtual Interface (VIF) outbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.", - "type": "integer" - }, - "quota:vif_outbound_burst": { - "title": "Quota: VIF Outbound Burst", - "description": "Network Virtual Interface (VIF) outbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.", - "type": "integer" - }, - "quota:vif_outbound_peak": { - "title": "Quota: VIF Outbound Peak", - "description": "Network Virtual Interface (VIF) outbound peak in kilobytes per second. Specifies maximum rate at which an interface can send data.", - "type": "integer" - } - } - } - ] -} diff --git a/etc/metadefs/compute-randomgen.json b/etc/metadefs/compute-randomgen.json deleted file mode 100644 index 2414b844..00000000 --- a/etc/metadefs/compute-randomgen.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "namespace": "OS::Compute::RandomNumberGenerator", - "display_name": "Random Number Generator", - "description": "If a random-number generator device has been added to the instance through its image properties, the device can be enabled and configured.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Nova::Flavor" - } - ], - "properties": { - "hw_rng:allowed": { - "title": "Random Number Generator Allowed", - "description": "", - "type": "boolean" - }, - "hw_rng:rate_bytes": { - "title": "Random number generator limits.", - "description": "Allowed amount of bytes that the guest can read from the host's entropy per period.", - "type": "integer" - }, - "hw_rng:rate_period": { - "title": "Random number generator read period.", - "description": "Duration of the read period in seconds.", - "type": "integer" - } - } -} \ No newline at end of file diff --git a/etc/metadefs/compute-trust.json b/etc/metadefs/compute-trust.json deleted file mode 100644 index 7df5691b..00000000 --- a/etc/metadefs/compute-trust.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "namespace": "OS::Compute::Trust", - "display_name": "Trusted Compute Pools (Intel® TXT)", - "description": "Trusted compute pools with Intel® Trusted Execution Technology (Intel® TXT) support IT compliance by protecting virtualized data centers - private, public, and hybrid clouds against attacks toward hypervisor and BIOS, firmware, and other pre-launch software components. The Nova trust scheduling filter must be enabled and configured with the trust attestation service in order to use this feature.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Nova::Flavor" - } - ], - "properties": { - "trust:trusted_host": { - "title": "Intel® TXT attestation", - "description": "Select to ensure that node has been attested by Intel® Trusted Execution Technology (Intel® TXT). The Nova trust scheduling filter must be enabled and configured with the trust attestation service in order to use this feature.", - "type": "string", - "enum": [ - "trusted", - "untrusted", - "unknown" - ] - } - } -} \ No newline at end of file diff --git a/etc/metadefs/compute-vcputopology.json b/etc/metadefs/compute-vcputopology.json deleted file mode 100644 index 345a1c26..00000000 --- a/etc/metadefs/compute-vcputopology.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "namespace": "OS::Compute::VirtCPUTopology", - "display_name": "Virtual CPU Topology", - "description": "This provides the preferred socket/core/thread counts for the virtual CPU instance exposed to guests. This enables the ability to avoid hitting limitations on vCPU topologies that OS vendors place on their products. See also: http://git.openstack.org/cgit/openstack/nova-specs/tree/specs/juno/virt-driver-vcpu-topology.rst", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image", - "prefix": "hw_" - }, - { - "name": "OS::Cinder::Volume", - "prefix": "hw_", - "properties_target": "image" - }, - { - "name": "OS::Nova::Flavor", - "prefix": "hw:" - } - ], - "properties": { - "cpu_sockets": { - "title": "vCPU Sockets", - "description": "Preferred number of sockets to expose to the guest.", - "type": "integer" - }, - "cpu_cores": { - "title": "vCPU Cores", - "description": "Preferred number of cores to expose to the guest.", - "type": "integer" - }, - "cpu_threads": { - "title": " vCPU Threads", - "description": "Preferred number of threads to expose to the guest.", - "type": "integer" - }, - "cpu_maxsockets": { - "title": "Max vCPU Sockets", - "description": "Maximum number of sockets to expose to the guest.", - "type": "integer" - }, - "cpu_maxcores": { - "title": "Max vCPU Cores", - "description": "Maximum number of cores to expose to the guest.", - "type": "integer" - }, - "cpu_maxthreads": { - "title": "Max vCPU Threads", - "description": "Maximum number of threads to expose to the guest.", - "type": "integer" - } - } -} diff --git a/etc/metadefs/compute-vmware-flavor.json b/etc/metadefs/compute-vmware-flavor.json deleted file mode 100644 index ea601179..00000000 --- a/etc/metadefs/compute-vmware-flavor.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "namespace": "OS::Compute::VMwareFlavor", - "display_name": "VMware Driver Options for Flavors", - "description": "VMware Driver Options for Flavors may be used to customize and manage Nova Flavors. These are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. See: http://docs.openstack.org/admin-guide/compute-flavors.html", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Nova::Flavor" - } - ], - "properties": { - "vmware:hw_version": { - "title": "VMware Hardware Version", - "description": "Specifies the hardware version VMware uses to create images. If the hardware version needs to be compatible with a cluster version, for backward compatibility or other circumstances, the vmware:hw_version key specifies a virtual machine hardware version. In the event that a cluster has mixed host version types, the key will enable the vCenter to place the cluster on the correct host.", - "type": "string" - }, - "vmware:storage_policy": { - "title": "VMware Storage Policy", - "description": "Specifies the storage policy to be applied for newly created instance. If not provided, the default storage policy specified in config file will be used. If Storage Policy Based Management (SPBM) is not enabled in config file, this value won't be used.", - "type": "string" - } - } -} diff --git a/etc/metadefs/compute-vmware-quota-flavor.json b/etc/metadefs/compute-vmware-quota-flavor.json deleted file mode 100644 index 7daec697..00000000 --- a/etc/metadefs/compute-vmware-quota-flavor.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "namespace": "OS::Compute::VMwareQuotaFlavor", - "display_name": "VMware Quota for Flavors", - "description": "The VMware compute driver allows various compute quotas to be specified on flavors. When specified, the VMWare driver will ensure that the quota is enforced. These are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. For a list of hypervisors, see: https://wiki.openstack.org/wiki/HypervisorSupportMatrix. For flavor customization, see: http://docs.openstack.org/admin-guide/compute-flavors.html", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Nova::Flavor" - } - ], - "properties": { - "quota:cpu_limit": { - "title": "Quota: CPU Limit", - "description": "Specifies the upper limit for CPU allocation in MHz. This parameter ensures that a machine never uses more than the defined amount of CPU time. It can be used to enforce a limit on the machine's CPU performance. The value should be a numerical value in MHz. If zero is supplied then the cpu_limit is unlimited.", - "type": "integer", - "minimum": 0 - }, - "quota:cpu_reservation": { - "title": "Quota: CPU Reservation Limit", - "description": "Specifies the guaranteed minimum CPU reservation in MHz. This means that if needed, the machine will definitely get allocated the reserved amount of CPU cycles. The value should be a numerical value in MHz.", - "type": "integer", - "minimum": 0 - } - } -} diff --git a/etc/metadefs/compute-vmware.json b/etc/metadefs/compute-vmware.json deleted file mode 100644 index a10dfb8f..00000000 --- a/etc/metadefs/compute-vmware.json +++ /dev/null @@ -1,207 +0,0 @@ -{ - "namespace": "OS::Compute::VMware", - "display_name": "VMware Driver Options", - "description": "The VMware compute driver options. \n\nThese are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - } - ], - "properties": { - "vmware_adaptertype": { - "title": "Disk Adapter Type", - "description": "The virtual SCSI or IDE controller used by the hypervisor.", - "type": "string", - "enum": [ - "lsiLogic", - "lsiLogicsas", - "paraVirtual", - "busLogic", - "ide" - ], - "default" : "lsiLogic" - }, - "vmware_disktype": { - "title": "Disk Provisioning Type", - "description": "When performing operations such as creating a virtual disk, cloning, or migrating, the disk provisioning type may be specified. Please refer to VMware documentation for more.", - "type": "string", - "enum": [ - "streamOptimized", - "sparse", - "preallocated" - ], - "default" : "preallocated" - }, - "vmware_ostype": { - "title": "OS Type", - "description": "A VMware GuestID which describes the operating system installed in the image. This value is passed to the hypervisor when creating a virtual machine. If not specified, the key defaults to otherGuest. See thinkvirt.com.", - "type": "string", - "enum": [ - "asianux3_64Guest", - "asianux3Guest", - "asianux4_64Guest", - "asianux4Guest", - "asianux5_64Guest", - "asianux7_64Guest", - "centos64Guest", - "centosGuest", - "centos6Guest", - "centos6_64Guest", - "centos7_64Guest", - "coreos64Guest", - "darwin10_64Guest", - "darwin10Guest", - "darwin11_64Guest", - "darwin11Guest", - "darwin12_64Guest", - "darwin13_64Guest", - "darwin14_64Guest", - "darwin15_64Guest", - "darwin16_64Guest", - "darwin64Guest", - "darwinGuest", - "debian4_64Guest", - "debian4Guest", - "debian5_64Guest", - "debian5Guest", - "debian6_64Guest", - "debian6Guest", - "debian7_64Guest", - "debian7Guest", - "debian8_64Guest", - "debian8Guest", - "debian9_64Guest", - "debian9Guest", - "debian10_64Guest", - "debian10Guest", - "dosGuest", - "eComStation2Guest", - "eComStationGuest", - "fedora64Guest", - "fedoraGuest", - "freebsd64Guest", - "freebsdGuest", - "genericLinuxGuest", - "mandrakeGuest", - "mandriva64Guest", - "mandrivaGuest", - "netware4Guest", - "netware5Guest", - "netware6Guest", - "nld9Guest", - "oesGuest", - "openServer5Guest", - "openServer6Guest", - "opensuse64Guest", - "opensuseGuest", - "oracleLinux64Guest", - "oracleLinuxGuest", - "oracleLinux6Guest", - "oracleLinux6_64Guest", - "oracleLinux7_64Guest", - "os2Guest", - "other24xLinux64Guest", - "other24xLinuxGuest", - "other26xLinux64Guest", - "other26xLinuxGuest", - "other3xLinux64Guest", - "other3xLinuxGuest", - "otherGuest", - "otherGuest64", - "otherLinux64Guest", - "otherLinuxGuest", - "redhatGuest", - "rhel2Guest", - "rhel3_64Guest", - "rhel3Guest", - "rhel4_64Guest", - "rhel4Guest", - "rhel5_64Guest", - "rhel5Guest", - "rhel6_64Guest", - "rhel6Guest", - "rhel7_64Guest", - "rhel7Guest", - "sjdsGuest", - "sles10_64Guest", - "sles10Guest", - "sles11_64Guest", - "sles11Guest", - "sles12_64Guest", - "sles12Guest", - "sles64Guest", - "slesGuest", - "solaris10_64Guest", - "solaris10Guest", - "solaris11_64Guest", - "solaris6Guest", - "solaris7Guest", - "solaris8Guest", - "solaris9Guest", - "turboLinux64Guest", - "turboLinuxGuest", - "ubuntu64Guest", - "ubuntuGuest", - "unixWare7Guest", - "vmkernel5Guest", - "vmkernel6Guest", - "vmkernel65Guest", - "vmkernelGuest", - "vmwarePhoton64Guest", - "win2000AdvServGuest", - "win2000ProGuest", - "win2000ServGuest", - "win31Guest", - "win95Guest", - "win98Guest", - "windows7_64Guest", - "windows7Guest", - "windows7Server64Guest", - "windows8_64Guest", - "windows8Guest", - "windows8Server64Guest", - "windows9_64Guest", - "windows9Guest", - "windows9Server64Guest", - "windowsHyperVGuest", - "winLonghorn64Guest", - "winLonghornGuest", - "winMeGuest", - "winNetBusinessGuest", - "winNetDatacenter64Guest", - "winNetDatacenterGuest", - "winNetEnterprise64Guest", - "winNetEnterpriseGuest", - "winNetStandard64Guest", - "winNetStandardGuest", - "winNetWebGuest", - "winNTGuest", - "winVista64Guest", - "winVistaGuest", - "winXPHomeGuest", - "winXPPro64Guest", - "winXPProGuest" - ], - "default": "otherGuest" - }, - "hw_vif_model": { - "title": "Virtual Network Interface", - "description": "Specifies the model of virtual network interface device to use. The valid options depend on the hypervisor. VMware driver supported options: e1000, e1000e, VirtualE1000, VirtualE1000e, VirtualPCNet32, VirtualSriovEthernetCard, and VirtualVmxnet.", - "type": "string", - "enum": [ - "e1000", - "e1000e", - "VirtualE1000", - "VirtualE1000e", - "VirtualPCNet32", - "VirtualSriovEthernetCard", - "VirtualVmxnet", - "VirtualVmxnet3" - ], - "default" : "e1000" - } - }, - "objects": [] -} diff --git a/etc/metadefs/compute-watchdog.json b/etc/metadefs/compute-watchdog.json deleted file mode 100644 index a8e9e43a..00000000 --- a/etc/metadefs/compute-watchdog.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "namespace": "OS::Compute::Watchdog", - "display_name": "Watchdog Behavior", - "description": "Compute drivers may enable watchdog behavior over instances. See: http://docs.openstack.org/admin-guide/compute-flavors.html", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - }, - { - "name": "OS::Cinder::Volume", - "properties_target": "image" - }, - { - "name": "OS::Nova::Flavor" - } - ], - "properties": { - "hw_watchdog_action": { - "title": "Watchdog Action", - "description": "For the libvirt driver, you can enable and set the behavior of a virtual hardware watchdog device for each flavor. Watchdog devices keep an eye on the guest server, and carry out the configured action, if the server hangs. The watchdog uses the i6300esb device (emulating a PCI Intel 6300ESB). If hw_watchdog_action is not specified, the watchdog is disabled. Watchdog behavior set using a specific image's properties will override behavior set using flavors.", - "type": "string", - "enum": [ - "disabled", - "reset", - "poweroff", - "pause", - "none" - ] - } - } -} diff --git a/etc/metadefs/compute-xenapi.json b/etc/metadefs/compute-xenapi.json deleted file mode 100644 index eda7489f..00000000 --- a/etc/metadefs/compute-xenapi.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "namespace": "OS::Compute::XenAPI", - "display_name": "XenAPI Driver Options", - "description": "The XenAPI compute driver options. \n\nThese are properties specific to compute drivers. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - } - ], - "properties": { - "os_type": { - "title": "OS Type", - "description": "The operating system installed on the image. The XenAPI driver contains logic that takes different actions depending on the value of the os_type parameter of the image. For example, for os_type=windows images, it creates a FAT32-based swap partition instead of a Linux swap partition, and it limits the injected host name to less than 16 characters.", - "type": "string", - "enum": [ - "linux", - "windows" - ] - }, - "auto_disk_config": { - "title": "Disk Adapter Type", - "description": "If true, the root partition on the disk is automatically resized before the instance boots. This value is only taken into account by the Compute service when using a Xen-based hypervisor with the XenAPI driver. The Compute service will only attempt to resize if there is a single partition on the image, and only if the partition is in ext3 or ext4 format.", - "type": "boolean" - } - }, - "objects": [] -} diff --git a/etc/metadefs/glance-common-image-props.json b/etc/metadefs/glance-common-image-props.json deleted file mode 100644 index 636ff3a0..00000000 --- a/etc/metadefs/glance-common-image-props.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "display_name": "Common Image Properties", - "namespace": "OS::Glance::CommonImageProperties", - "description": "When adding an image to Glance, you may specify some common image properties that may prove useful to consumers of your image.", - "protected": true, - "resource_type_associations" : [ - ], - "properties": { - "kernel_id": { - "title": "Kernel ID", - "type": "string", - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." - }, - "ramdisk_id": { - "title": "Ramdisk ID", - "type": "string", - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." - }, - "instance_uuid": { - "title": "Instance ID", - "type": "string", - "description": "Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.)" - }, - "architecture": { - "title": "CPU Architecture", - "description": "The CPU architecture that must be supported by the hypervisor. For example, x86_64, arm, or ppc64. Run uname -m to get the architecture of a machine. We strongly recommend using the architecture data vocabulary defined by the libosinfo project for this purpose.", - "type": "string" - }, - "os_distro": { - "title": "OS Distro", - "description": "The common name of the operating system distribution in lowercase (uses the same data vocabulary as the libosinfo project). Specify only a recognized value for this field. Deprecated values are listed to assist you in searching for the recognized value.", - "type": "string" - }, - "os_version": { - "title": "OS Version", - "description": "Operating system version as specified by the distributor. (for example, '11.10')", - "type": "string" - } - } -} diff --git a/etc/metadefs/image-signature-verification.json b/etc/metadefs/image-signature-verification.json deleted file mode 100644 index b5ec3c7c..00000000 --- a/etc/metadefs/image-signature-verification.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "namespace": "OS::Glance::Signatures", - "display_name": "Image Signature Verification", - "description": "Image signature verification allows the user to verify that an image has not been modified prior to booting the image.", - "visibility": "public", - "protected": false, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - } - ], - "properties": { - "img_signature": { - "title": "Image Signature", - "description": "The signature of the image data encoded in base64 format.", - "type": "string" - }, - "img_signature_certificate_uuid": { - "title": "Image Signature Certificate UUID", - "description": "The UUID used to retrieve the certificate from the key manager.", - "type": "string" - }, - "img_signature_hash_method": { - "title": "Image Signature Hash Method", - "description": "The hash method used in creating the signature.", - "type": "string", - "enum": [ - "SHA-224", - "SHA-256", - "SHA-384", - "SHA-512" - ] - }, - "img_signature_key_type": { - "title": "Image Signature Key Type", - "description": "The key type used in creating the signature.", - "type": "string", - "enum": [ - "RSA-PSS", - "DSA", - "ECC_SECT571K1", - "ECC_SECT409K1", - "ECC_SECT571R1", - "ECC_SECT409R1", - "ECC_SECP521R1", - "ECC_SECP384R1" - ] - } - } -} diff --git a/etc/metadefs/operating-system.json b/etc/metadefs/operating-system.json deleted file mode 100644 index 5907b5c5..00000000 --- a/etc/metadefs/operating-system.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "display_name": "Common Operating System Properties", - "namespace": "OS::OperatingSystem", - "description": "Details of the operating system contained within this image as well as common operating system properties that can be set on a VM instance created from this image.", - "protected": true, - "resource_type_associations" : [ - { - "name": "OS::Glance::Image" - }, - { - "name": "OS::Cinder::Volume", - "properties_target": "image" - } - ], - "properties": { - "os_distro": { - "title": "OS Distro", - "description": "The common name of the operating system distribution in lowercase (uses the same data vocabulary as the libosinfo project). Specify only a recognized value for this field. Deprecated values are listed to assist you in searching for the recognized value.", - "type": "string" - }, - "os_version": { - "title": "OS Version", - "description": "Operating system version as specified by the distributor. (for example, '11.10')", - "type": "string" - }, - "os_admin_user": { - "title": "OS Admin User", - "description": "The name of the user with admin privileges.", - "type": "string" - } - } -} diff --git a/etc/metadefs/software-databases.json b/etc/metadefs/software-databases.json deleted file mode 100644 index 973c0920..00000000 --- a/etc/metadefs/software-databases.json +++ /dev/null @@ -1,334 +0,0 @@ -{ - "namespace": "OS::Software::DBMS", - "display_name": "Database Software", - "description": "A database is an organized collection of data. The data is typically organized to model aspects of reality in a way that supports processes requiring information. Database management systems are computer software applications that interact with the user, other applications, and the database itself to capture and analyze data. (http://en.wikipedia.org/wiki/Database)", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - }, - { - "name": "OS::Cinder::Volume", - "properties_target": "image" - }, - { - "name": "OS::Nova::Server", - "properties_target": "metadata" - }, - { - "name": "OS::Trove::Instance" - } - ], - "objects": [ - { - "name": "MySQL", - "description": "MySQL is an object-relational database management system (ORDBMS). The MySQL development project has made its source code available under the terms of the GNU General Public License, as well as under a variety of proprietary agreements. MySQL was owned and sponsored by a single for-profit firm, the Swedish company MySQL AB, now owned by Oracle Corporation. MySQL is a popular choice of database for use in web applications, and is a central component of the widely used LAMP open source web application software stack (and other 'AMP' stacks). (http://en.wikipedia.org/wiki/MySQL)", - "properties": { - "sw_database_mysql_version": { - "title": "Version", - "description": "The specific version of MySQL.", - "type": "string" - }, - "sw_database_mysql_listen_port": { - "title": "Listen Port", - "description": "The configured TCP/IP port which MySQL listens for incoming connections.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 3606 - }, - "sw_database_mysql_admin": { - "title": "Admin User", - "description": "The primary user with privileges to perform administrative operations.", - "type": "string", - "default": "root" - } - } - }, - { - "name": "PostgreSQL", - "description": "PostgreSQL, often simply 'Postgres', is an object-relational database management system (ORDBMS) with an emphasis on extensibility and standards-compliance. PostgreSQL is cross-platform and runs on many operating systems. (http://en.wikipedia.org/wiki/PostgreSQL)", - "properties": { - "sw_database_postgresql_version": { - "title": "Version", - "description": "The specific version of PostgreSQL.", - "type": "string" - }, - "sw_database_postgresql_listen_port": { - "title": "Listen Port", - "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which PostgreSQL is to listen for connections from client applications.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 5432 - }, - "sw_database_postgresql_admin": { - "title": "Admin User", - "description": "The primary user with privileges to perform administrative operations.", - "type": "string", - "default": "postgres" - } - } - }, - { - "name": "SQL Server", - "description": "Microsoft SQL Server is a relational database management system developed by Microsoft. There are at least a dozen different editions of Microsoft SQL Server aimed at different audiences and for workloads ranging from small single-machine applications to large Internet-facing applications with many concurrent users. Its primary query languages are T-SQL and ANSI SQL. (http://en.wikipedia.org/wiki/Microsoft_SQL_Server)", - "properties": { - "sw_database_sqlserver_version": { - "title": "Version", - "description": "The specific version of Microsoft SQL Server.", - "type": "string" - }, - "sw_database_sqlserver_edition": { - "title": "Edition", - "description": "SQL Server is available in multiple editions, with different feature sets and targeting different users.", - "type": "string", - "default": "Express", - "enum": [ - "Datacenter", - "Enterprise", - "Standard", - "Web", - "Business Intelligence", - "Workgroup", - "Express", - "Compact (SQL CE)", - "Developer", - "Embedded (SSEE)", - "Express", - "Fast Track", - "LocalDB", - "Parallel Data Warehouse (PDW)", - "Business Intelligence", - "Datawarehouse Appliance Edition" - ] - }, - "sw_database_sqlserver_listen_port": { - "title": "Listen Port", - "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which SQL Server is to listen for connections from client applications. The default SQL Server port is 1433, and client ports are assigned a random value between 1024 and 5000.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 1433 - }, - "sw_database_postsqlserver_admin": { - "title": "Admin User", - "description": "The primary user with privileges to perform administrative operations.", - "type": "string", - "default": "sa" - } - } - }, - { - "name": "Oracle", - "description": "Oracle Database (commonly referred to as Oracle RDBMS or simply as Oracle) is an object-relational database management system produced and marketed by Oracle Corporation. (http://en.wikipedia.org/wiki/Oracle_Database)", - "properties": { - "sw_database_oracle_version": { - "title": "Version", - "description": "The specific version of Oracle.", - "type": "string" - }, - "sw_database_oracle_edition": { - "title": "Edition", - "description": "Over and above the different versions of the Oracle database management software developed over time, Oracle Corporation subdivides its product into varying editions.", - "type": "string", - "default": "Express", - "enum": [ - "Enterprise", - "Standard", - "Standard Edition One", - "Express (XE)", - "Workgroup", - "Lite" - ] - }, - "sw_database_oracle_listen_port": { - "title": "Listen Port", - "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Oracle is to listen for connections from client applications.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 1521 - } - } - }, - { - "name": "DB2", - "description": "IBM DB2 is a family of database server products developed by IBM. These products all support the relational model, but in recent years some products have been extended to support object-relational features and non-relational structures, in particular XML. (http://en.wikipedia.org/wiki/IBM_DB2)", - "properties": { - "sw_database_db2_version": { - "title": "Version", - "description": "The specific version of DB2.", - "type": "string" - }, - "sw_database_db2_port": { - "title": "Listen Port", - "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which DB2 is to listen for connections from client applications.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 5432 - }, - "sw_database_db2_admin": { - "title": "Admin User", - "description": "The primary user with privileges to perform administrative operations.", - "type": "string" - } - } - }, - { - "name": "MongoDB", - "description": "MongoDB is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB uses JSON-like documents with dynamic schemas (MongoDB calls the format BSON), making the integration of data in certain types of applications easier and faster. Released under a combination of the GNU Affero General Public License and the Apache License, MongoDB is free and open-source software. (http://en.wikipedia.org/wiki/MongoDB)", - "properties": { - "sw_database_mongodb_version": { - "title": "Version", - "description": "The specific version of MongoDB.", - "type": "string" - }, - "sw_database_mongodb_listen_port": { - "title": "Listen Port", - "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which MongoDB is to listen for connections from client applications.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 27017 - }, - "sw_database_mongodb_admin": { - "title": "Admin User", - "description": "The primary user with privileges to perform administrative operations.", - "type": "string" - } - } - }, - { - "name": "Couchbase Server", - "description": "Couchbase Server, originally known as Membase, is an open source, distributed (shared-nothing architecture) NoSQL document-oriented database that is optimized for interactive applications. These applications must serve many concurrent users by creating, storing, retrieving, aggregating, manipulating and presenting data. In support of these kinds of application needs, Couchbase is designed to provide easy-to-scale key-value or document access with low latency and high sustained throughput. (http://en.wikipedia.org/wiki/Couchbase_Server)", - "properties": { - "sw_database_couchbaseserver_version": { - "title": "Version", - "description": "The specific version of Couchbase Server.", - "type": "string" - }, - "sw_database_couchbaseserver_listen_port": { - "title": "Listen Port", - "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Couchbase is to listen for connections from client applications.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 11211 - }, - "sw_database_couchbaseserver_admin": { - "title": "Admin User", - "description": "The primary user with privileges to perform administrative operations.", - "type": "string", - "default": "admin" - } - } - }, - { - "name": "Redis", - "description": "Redis is a data structure server (NoSQL). It is open-source, networked, in-memory, and stores keys with optional durability. The development of Redis has been sponsored by Pivotal Software since May 2013; before that, it was sponsored by VMware. The name Redis means REmote DIctionary Server. (http://en.wikipedia.org/wiki/Redis)", - "properties": { - "sw_database_redis_version": { - "title": "Version", - "description": "The specific version of Redis.", - "type": "string" - }, - "sw_database_redis_listen_port": { - "title": "Listen Port", - "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Redis is to listen for connections from client applications.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 6379 - }, - "sw_database_redis_admin": { - "title": "Admin User", - "description": "The primary user with privileges to perform administrative operations.", - "type": "string", - "default": "admin" - } - } - }, - { - "name": "CouchDB", - "description": "Apache CouchDB, commonly referred to as CouchDB, is an open source NoSQL database. It is a NoSQL database that uses JSON to store data, JavaScript as its query language using MapReduce, and HTTP for an API. One of its distinguishing features is multi-master replication. CouchDB was first released in 2005 and later became an Apache project in 2008. (http://en.wikipedia.org/wiki/CouchDB)", - "properties": { - "sw_database_couchdb_version": { - "title": "Version", - "description": "The specific version of CouchDB.", - "type": "string" - }, - "sw_database_couchdb_listen_port": { - "title": "Listen Port", - "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which CouchDB is to listen for connections from client applications.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 5984 - }, - "sw_database_couchdb_admin": { - "title": "Admin User", - "description": "The primary user with privileges to perform administrative operations.", - "type": "string" - } - } - }, - { - "name": "Apache Cassandra", - "description": "Apache Cassandra is an open source distributed NoSQL database management system designed to handle large amounts of data across many commodity servers, providing high availability with no single point of failure. (http://en.wikipedia.org/wiki/Apache_Cassandra)", - "properties": { - "sw_database_cassandra_version": { - "title": "Version", - "description": "The specific version of Apache Cassandra.", - "type": "string" - }, - "sw_database_cassandra_listen_port": { - "title": "Listen Port", - "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Cassandra is to listen for connections from client applications.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 9160 - }, - "sw_database_cassandra_admin": { - "title": "Admin User", - "description": "The primary user with privileges to perform administrative operations.", - "type": "string", - "default": "cassandra" - } - } - }, - { - "name": "HBase", - "description": "HBase is an open source, non-relational (NoSQL), distributed database modeled after Google's BigTable and written in Java. It is developed as part of Apache Software Foundation's Apache Hadoop project and runs on top of HDFS (Hadoop Distributed Filesystem), providing BigTable-like capabilities for Hadoop. (http://en.wikipedia.org/wiki/Apache_HBase)", - "properties": { - "sw_database_hbase_version": { - "title": "Version", - "description": "The specific version of HBase.", - "type": "string" - } - } - }, - { - "name": "Hazlecast", - "description": "In computing, Hazelcast is an in-memory open source software data grid based on Java. By having multiple nodes form a cluster, data is evenly distributed among the nodes. This allows for horizontal scaling both in terms of available storage space and processing power. Backups are also distributed in a similar fashion to other nodes, based on configuration, thereby protecting against single node failure. (http://en.wikipedia.org/wiki/Hazelcast)", - "properties": { - "sw_database_hazlecast_version": { - "title": "Version", - "description": "The specific version of Hazlecast.", - "type": "string" - }, - "sw_database_hazlecast_port": { - "title": "Listen Port", - "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Hazlecast is to listen for connections between members.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 5701 - } - } - } - ] -} diff --git a/etc/metadefs/software-runtimes.json b/etc/metadefs/software-runtimes.json deleted file mode 100644 index 78070728..00000000 --- a/etc/metadefs/software-runtimes.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "namespace": "OS::Software::Runtimes", - "display_name": "Runtime Environment", - "description": "Software is written in a specific programming language and the language must execute within a runtime environment. The runtime environment provides an abstraction to utilizing a computer's processor, memory (RAM), and other system resources.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - }, - { - "name": "OS::Cinder::Volume", - "properties_target": "image" - }, - { - "name": "OS::Nova::Server", - "properties_target": "metadata" - } - ], - "objects": [ - { - "name": "PHP", - "description": "PHP is a server-side scripting language designed for web development but also used as a general-purpose programming language. PHP code can be simply mixed with HTML code, or it can be used in combination with various templating engines and web frameworks. PHP code is usually processed by a PHP interpreter, which is usually implemented as a web server's native module or a Common Gateway Interface (CGI) executable. After the PHP code is interpreted and executed, the web server sends resulting output to its client, usually in form of a part of the generated web page – for example, PHP code can generate a web page's HTML code, an image, or some other data. PHP has also evolved to include a command-line interface (CLI) capability and can be used in standalone graphical applications. (http://en.wikipedia.org/wiki/PHP)", - "properties": { - "sw_runtime_php_version": { - "title": "Version", - "description": "The specific version of PHP.", - "type": "string" - } - } - }, - { - "name": "Python", - "description": "Python is a widely used general-purpose, high-level programming language. Its design philosophy emphasizes code readability, and its syntax allows programmers to express concepts in fewer lines of code than would be possible in languages such as C++ or Java. The language provides constructs intended to enable clear programs on both a small and large scale. Python supports multiple programming paradigms, including object-oriented, imperative and functional programming or procedural styles. It features a dynamic type system and automatic memory management and has a large and comprehensive standard library. (http://en.wikipedia.org/wiki/Python_(programming_language))", - "properties": { - "sw_runtime_python_version": { - "title": "Version", - "description": "The specific version of python.", - "type": "string" - } - } - }, - { - "name": "Java", - "description": "Java is a functional computer programming language that is concurrent, class-based, object-oriented, and specifically designed to have as few implementation dependencies as possible. It is intended to let application developers write once, run anywhere (WORA), meaning that code that runs on one platform does not need to be recompiled to run on another. Java applications are typically compiled to bytecode that can run on any Java virtual machine (JVM) regardless of computer architecture. (http://en.wikipedia.org/wiki/Java_(programming_language))", - "properties": { - "sw_runtime_java_version": { - "title": "Version", - "description": "The specific version of Java.", - "type": "string" - } - } - }, - { - "name": "Ruby", - "description": "Ruby is a dynamic, reflective, object-oriented, general-purpose programming language. It was designed and developed in the mid-1990s by Yukihiro Matsumoto in Japan. According to its authors, Ruby was influenced by Perl, Smalltalk, Eiffel, Ada, and Lisp. It supports multiple programming paradigms, including functional, object-oriented, and imperative. It also has a dynamic type system and automatic memory management. (http://en.wikipedia.org/wiki/Python_(programming_language))", - "properties": { - "sw_runtime_ruby_version": { - "title": "Version", - "description": "The specific version of Ruby.", - "type": "string" - } - } - }, - { - "name": "Perl", - "description": "Perl is a family of high-level, general-purpose, interpreted, dynamic programming languages. The languages in this family include Perl 5 and Perl 6. Though Perl is not officially an acronym, there are various backronyms in use, the most well-known being Practical Extraction and Reporting Language (http://en.wikipedia.org/wiki/Perl)", - "properties": { - "sw_runtime_perl_version": { - "title": "Version", - "description": "The specific version of Perl.", - "type": "string" - } - } - } - ] -} diff --git a/etc/metadefs/software-webservers.json b/etc/metadefs/software-webservers.json deleted file mode 100644 index 3fd7540c..00000000 --- a/etc/metadefs/software-webservers.json +++ /dev/null @@ -1,103 +0,0 @@ -{ - "namespace": "OS::Software::WebServers", - "display_name": "Web Servers", - "description": "A web server is a computer system that processes requests via HTTP, the basic network protocol used to distribute information on the World Wide Web. The most common use of web servers is to host websites, but there are other uses such as gaming, data storage, running enterprise applications, handling email, FTP, or other web uses. (http://en.wikipedia.org/wiki/Web_server)", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image" - }, - { - "name": "OS::Cinder::Volume", - "properties_target": "image" - }, - { - "name": "OS::Nova::Server", - "properties_target": "metadata" - } - ], - "objects": [ - { - "name": "Apache HTTP Server", - "description": "The Apache HTTP Server, colloquially called Apache, is a Web server application notable for playing a key role in the initial growth of the World Wide Web. Apache is developed and maintained by an open community of developers under the auspices of the Apache Software Foundation. Most commonly used on a Unix-like system, the software is available for a wide variety of operating systems, including Unix, FreeBSD, Linux, Solaris, Novell NetWare, OS X, Microsoft Windows, OS/2, TPF, OpenVMS and eComStation. Released under the Apache License, Apache is open-source software. (http://en.wikipedia.org/wiki/Apache_HTTP_Server)", - "properties": { - "sw_webserver_apache_version": { - "title": "Version", - "description": "The specific version of Apache.", - "type": "string" - }, - "sw_webserver_apache_http_port": { - "title": "HTTP Port", - "description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 80 - }, - "sw_webserver_apache_https_port": { - "title": "HTTPS Port", - "description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 443 - } - } - }, - { - "name": "Nginx", - "description": "Nginx (pronounced 'engine-x') is an open source reverse proxy server for HTTP, HTTPS, SMTP, POP3, and IMAP protocols, as well as a load balancer, HTTP cache, and a web server (origin server). The nginx project started with a strong focus on high concurrency, high performance and low memory usage. It is licensed under the 2-clause BSD-like license and it runs on Linux, BSD variants, Mac OS X, Solaris, AIX, HP-UX, as well as on other *nix flavors. It also has a proof of concept port for Microsoft Windows. (http://en.wikipedia.org/wiki/Nginx)", - "properties": { - "sw_webserver_nginx_version": { - "title": "Version", - "description": "The specific version of Nginx.", - "type": "string" - }, - "sw_webserver_nginx_http_port": { - "title": "HTTP Port", - "description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 80 - }, - "sw_webserver_nginx_https_port": { - "title": "HTTPS Port", - "description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 443 - } - } - }, - { - "name": "IIS", - "description": "Internet Information Services (IIS, formerly Internet Information Server) is an extensible web server created by Microsoft. IIS supports HTTP, HTTPS, FTP, FTPS, SMTP and NNTP. IIS is not turned on by default when Windows is installed. The IIS Manager is accessed through the Microsoft Management Console or Administrative Tools in the Control Panel. (http://en.wikipedia.org/wiki/Internet_Information_Services)", - "properties": { - "sw_webserver_iis_version": { - "title": "Version", - "description": "The specific version of IIS.", - "type": "string" - }, - "sw_webserver_iis_http_port": { - "title": "HTTP Port", - "description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 80 - }, - "sw_webserver_iis_https_port": { - "title": "HTTPS Port", - "description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.", - "type": "integer", - "minimum": 1, - "maximum": 65535, - "default": 443 - } - } - } - ] -} diff --git a/etc/metadefs/storage-volume-type.json b/etc/metadefs/storage-volume-type.json deleted file mode 100644 index 76d26e51..00000000 --- a/etc/metadefs/storage-volume-type.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "namespace": "OS::Cinder::Volumetype", - "display_name": "Cinder Volume Type", - "description": "The Cinder volume type configuration option. Volume type assignment provides a mechanism not only to provide scheduling to a specific storage back-end, but also can be used to specify specific information for a back-end storage device to act upon.", - "visibility": "public", - "protected": true, - "resource_type_associations": [ - { - "name": "OS::Glance::Image", - "prefix": "cinder_" - } - ], - "properties": { - "img_volume_type": { - "title": "Image Volume Type", - "description": "Specifies the volume type that should be applied during new volume creation with a image. This value is passed to Cinder when creating a new volume. Priority of volume type related parameters are 1.volume_type(via API or CLI), 2.cinder_img_volume_type, 3.default_volume_type(via cinder.conf). If not specified, volume_type or default_volume_type will be referred based on their priority.", - "type": "string" - } - } -} diff --git a/etc/oslo-config-generator/glance-api.conf b/etc/oslo-config-generator/glance-api.conf deleted file mode 100644 index 31bdbaab..00000000 --- a/etc/oslo-config-generator/glance-api.conf +++ /dev/null @@ -1,14 +0,0 @@ -[DEFAULT] -wrap_width = 80 -output_file = etc/glance-api.conf.sample -namespace = glance.api -namespace = glance.store -namespace = oslo.concurrency -namespace = oslo.messaging -namespace = oslo.db -namespace = oslo.db.concurrency -namespace = oslo.policy -namespace = keystonemiddleware.auth_token -namespace = oslo.log -namespace = oslo.middleware.cors -namespace = oslo.middleware.http_proxy_to_wsgi diff --git a/etc/oslo-config-generator/glance-cache.conf b/etc/oslo-config-generator/glance-cache.conf deleted file mode 100644 index 9bc1ff6b..00000000 --- a/etc/oslo-config-generator/glance-cache.conf +++ /dev/null @@ -1,7 +0,0 @@ -[DEFAULT] -wrap_width = 80 -output_file = etc/glance-cache.conf.sample -namespace = glance.cache -namespace = glance.store -namespace = oslo.log -namespace = oslo.policy diff --git a/etc/oslo-config-generator/glance-manage.conf b/etc/oslo-config-generator/glance-manage.conf deleted file mode 100644 index 21e509d0..00000000 --- a/etc/oslo-config-generator/glance-manage.conf +++ /dev/null @@ -1,7 +0,0 @@ -[DEFAULT] -wrap_width = 80 -output_file = etc/glance-manage.conf.sample -namespace = glance.manage -namespace = oslo.db -namespace = oslo.db.concurrency -namespace = oslo.log diff --git a/etc/oslo-config-generator/glance-registry.conf b/etc/oslo-config-generator/glance-registry.conf deleted file mode 100644 index 388e394e..00000000 --- a/etc/oslo-config-generator/glance-registry.conf +++ /dev/null @@ -1,10 +0,0 @@ -[DEFAULT] -wrap_width = 80 -output_file = etc/glance-registry.conf.sample -namespace = glance.registry -namespace = oslo.messaging -namespace = oslo.db -namespace = oslo.db.concurrency -namespace = oslo.policy -namespace = keystonemiddleware.auth_token -namespace = oslo.log diff --git a/etc/oslo-config-generator/glance-scrubber.conf b/etc/oslo-config-generator/glance-scrubber.conf deleted file mode 100644 index d370f603..00000000 --- a/etc/oslo-config-generator/glance-scrubber.conf +++ /dev/null @@ -1,10 +0,0 @@ -[DEFAULT] -wrap_width = 80 -output_file = etc/glance-scrubber.conf.sample -namespace = glance.scrubber -namespace = glance.store -namespace = oslo.concurrency -namespace = oslo.db -namespace = oslo.db.concurrency -namespace = oslo.log -namespace = oslo.policy diff --git a/etc/ovf-metadata.json.sample b/etc/ovf-metadata.json.sample deleted file mode 100644 index 38628219..00000000 --- a/etc/ovf-metadata.json.sample +++ /dev/null @@ -1,8 +0,0 @@ -{ - "cim_pasd": [ - "ProcessorArchitecture", - "InstructionSet", - "InstructionSetExtensionName" - ] -} - diff --git a/etc/policy.json b/etc/policy.json deleted file mode 100644 index fba54a7e..00000000 --- a/etc/policy.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "context_is_admin": "role:admin", - "default": "role:admin", - - "add_image": "", - "delete_image": "", - "get_image": "", - "get_images": "", - "modify_image": "", - "publicize_image": "role:admin", - "communitize_image": "", - "copy_from": "", - - "download_image": "", - "upload_image": "", - - "delete_image_location": "", - "get_image_location": "", - "set_image_location": "", - - "add_member": "", - "delete_member": "", - "get_member": "", - "get_members": "", - "modify_member": "", - - "manage_image_cache": "role:admin", - - "get_task": "role:admin", - "get_tasks": "role:admin", - "add_task": "role:admin", - "modify_task": "role:admin", - - "deactivate": "", - "reactivate": "", - - "get_metadef_namespace": "", - "get_metadef_namespaces":"", - "modify_metadef_namespace":"", - "add_metadef_namespace":"", - - "get_metadef_object":"", - "get_metadef_objects":"", - "modify_metadef_object":"", - "add_metadef_object":"", - - "list_metadef_resource_types":"", - "get_metadef_resource_type":"", - "add_metadef_resource_type_association":"", - - "get_metadef_property":"", - "get_metadef_properties":"", - "modify_metadef_property":"", - "add_metadef_property":"", - - "get_metadef_tag":"", - "get_metadef_tags":"", - "modify_metadef_tag":"", - "add_metadef_tag":"", - "add_metadef_tags":"" - -} diff --git a/etc/property-protections-policies.conf.sample b/etc/property-protections-policies.conf.sample deleted file mode 100644 index 38f611e5..00000000 --- a/etc/property-protections-policies.conf.sample +++ /dev/null @@ -1,34 +0,0 @@ -# property-protections-policies.conf.sample -# -# This file is an example config file for when -# property_protection_rule_format=policies is enabled. -# -# Specify regular expression for which properties will be protected in [] -# For each section, specify CRUD permissions. You may refer to policies defined -# in policy.json. -# The property rules will be applied in the order specified. Once -# a match is found the remaining property rules will not be applied. -# -# WARNING: -# * If the reg ex specified below does not compile, then -# the glance-api service fails to start. (Guide for reg ex python compiler -# used: -# http://docs.python.org/2/library/re.html#regular-expression-syntax) -# * If an operation(create, read, update, delete) is not specified or misspelt -# then the glance-api service fails to start. -# So, remember, with GREAT POWER comes GREAT RESPONSIBILITY! -# -# NOTE: Only one policy can be specified per action. If multiple policies are -# specified, then the glance-api service fails to start. - -[^x_.*] -create = default -read = default -update = default -delete = default - -[.*] -create = context_is_admin -read = context_is_admin -update = context_is_admin -delete = context_is_admin diff --git a/etc/property-protections-roles.conf.sample b/etc/property-protections-roles.conf.sample deleted file mode 100644 index ff824aea..00000000 --- a/etc/property-protections-roles.conf.sample +++ /dev/null @@ -1,32 +0,0 @@ -# property-protections-roles.conf.sample -# -# This file is an example config file for when -# property_protection_rule_format=roles is enabled. -# -# Specify regular expression for which properties will be protected in [] -# For each section, specify CRUD permissions. -# The property rules will be applied in the order specified. Once -# a match is found the remaining property rules will not be applied. -# -# WARNING: -# * If the reg ex specified below does not compile, then -# glance-api service will not start. (Guide for reg ex python compiler used: -# http://docs.python.org/2/library/re.html#regular-expression-syntax) -# * If an operation(create, read, update, delete) is not specified or misspelt -# then the glance-api service will not start. -# So, remember, with GREAT POWER comes GREAT RESPONSIBILITY! -# -# NOTE: Multiple roles can be specified for a given operation. These roles must -# be comma separated. - -[^x_.*] -create = admin,member,_member_ -read = admin,member,_member_ -update = admin,member,_member_ -delete = admin,member,_member_ - -[.*] -create = admin -read = admin -update = admin -delete = admin diff --git a/etc/schema-image.json b/etc/schema-image.json deleted file mode 100644 index 1b7ca5f1..00000000 --- a/etc/schema-image.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "kernel_id": { - "type": ["null", "string"], - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." - }, - "ramdisk_id": { - "type": ["null", "string"], - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." - }, - "instance_uuid": { - "type": "string", - "description": "Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.)" - }, - "architecture": { - "description": "Operating system architecture as specified in https://docs.openstack.org/cli-reference/glance-property-keys.html", - "type": "string" - }, - "os_distro": { - "description": "Common name of operating system distribution as specified in https://docs.openstack.org/cli-reference/glance-property-keys.html", - "type": "string" - }, - "os_version": { - "description": "Operating system version as specified by the distributor", - "type": "string" - } -} diff --git a/glance/__init__.py b/glance/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/api/__init__.py b/glance/api/__init__.py deleted file mode 100644 index df41d7ad..00000000 --- a/glance/api/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2011-2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import paste.urlmap - -CONF = cfg.CONF - - -def root_app_factory(loader, global_conf, **local_conf): - if not CONF.enable_v1_api and '/v1' in local_conf: - del local_conf['/v1'] - if not CONF.enable_v2_api and '/v2' in local_conf: - del local_conf['/v2'] - return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/glance/api/authorization.py b/glance/api/authorization.py deleted file mode 100644 index 8a4fbc94..00000000 --- a/glance/api/authorization.py +++ /dev/null @@ -1,915 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from glance.common import exception -import glance.domain.proxy -from glance.i18n import _ - - -def is_image_mutable(context, image): - """Return True if the image is mutable in this context.""" - if context.is_admin: - return True - - if image.owner is None or context.owner is None: - return False - - return image.owner == context.owner - - -def proxy_image(context, image): - if is_image_mutable(context, image): - return ImageProxy(image, context) - else: - return ImmutableImageProxy(image, context) - - -def is_member_mutable(context, member): - """Return True if the image is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return member.member_id == context.owner - - -def proxy_member(context, member): - if is_member_mutable(context, member): - return member - else: - return ImmutableMemberProxy(member) - - -def is_task_mutable(context, task): - """Return True if the task is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return task.owner == context.owner - - -def is_task_stub_mutable(context, task_stub): - """Return True if the task stub is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return task_stub.owner == context.owner - - -def proxy_task(context, task): - if is_task_mutable(context, task): - return task - else: - return ImmutableTaskProxy(task) - - -def proxy_task_stub(context, task_stub): - if is_task_stub_mutable(context, task_stub): - return task_stub - else: - return ImmutableTaskStubProxy(task_stub) - - -class ImageRepoProxy(glance.domain.proxy.Repo): - - def __init__(self, image_repo, context): - self.context = context - self.image_repo = image_repo - proxy_kwargs = {'context': self.context} - super(ImageRepoProxy, self).__init__(image_repo, - item_proxy_class=ImageProxy, - item_proxy_kwargs=proxy_kwargs) - - def get(self, image_id): - image = self.image_repo.get(image_id) - return proxy_image(self.context, image) - - def list(self, *args, **kwargs): - images = self.image_repo.list(*args, **kwargs) - return [proxy_image(self.context, i) for i in images] - - -def _validate_image_accepts_members(visibility): - if visibility != 'shared': - message = _("Only shared images have members.") - raise exception.Forbidden(message) - - -class ImageMemberRepoProxy(glance.domain.proxy.MemberRepo): - - def __init__(self, member_repo, image, context): - self.member_repo = member_repo - self.image = image - self.context = context - proxy_kwargs = {'context': self.context} - super(ImageMemberRepoProxy, self).__init__( - image, - member_repo, - member_proxy_class=ImageMemberProxy, - member_proxy_kwargs=proxy_kwargs) - _validate_image_accepts_members(self.image.visibility) - - def get(self, member_id): - if (self.context.is_admin or - self.context.owner in (self.image.owner, member_id)): - member = self.member_repo.get(member_id) - return proxy_member(self.context, member) - else: - message = _("You cannot get image member for %s") - raise exception.Forbidden(message % member_id) - - def list(self, *args, **kwargs): - members = self.member_repo.list(*args, **kwargs) - if (self.context.is_admin or - self.context.owner == self.image.owner): - return [proxy_member(self.context, m) for m in members] - for member in members: - if member.member_id == self.context.owner: - return [proxy_member(self.context, member)] - message = _("You cannot get image member for %s") - raise exception.Forbidden(message % self.image.image_id) - - def remove(self, image_member): - if (self.image.owner == self.context.owner or - self.context.is_admin): - self.member_repo.remove(image_member) - else: - message = _("You cannot delete image member for %s") - raise exception.Forbidden(message - % self.image.image_id) - - def add(self, image_member): - if (self.image.owner == self.context.owner or - self.context.is_admin): - self.member_repo.add(image_member) - else: - message = _("You cannot add image member for %s") - raise exception.Forbidden(message - % self.image.image_id) - - def save(self, image_member, from_state=None): - if (self.context.is_admin or - self.context.owner == image_member.member_id): - self.member_repo.save(image_member, from_state=from_state) - else: - message = _("You cannot update image member %s") - raise exception.Forbidden(message % image_member.member_id) - - -class ImageFactoryProxy(glance.domain.proxy.ImageFactory): - - def __init__(self, image_factory, context): - self.image_factory = image_factory - self.context = context - kwargs = {'context': self.context} - super(ImageFactoryProxy, self).__init__(image_factory, - proxy_class=ImageProxy, - proxy_kwargs=kwargs) - - def new_image(self, **kwargs): - owner = kwargs.pop('owner', self.context.owner) - - if not self.context.is_admin: - if owner is None or owner != self.context.owner: - message = _("You are not permitted to create images " - "owned by '%s'.") - raise exception.Forbidden(message % owner) - - return super(ImageFactoryProxy, self).new_image(owner=owner, **kwargs) - - -class ImageMemberFactoryProxy(glance.domain.proxy.ImageMembershipFactory): - - def __init__(self, image_member_factory, context): - self.image_member_factory = image_member_factory - self.context = context - kwargs = {'context': self.context} - super(ImageMemberFactoryProxy, self).__init__( - image_member_factory, - proxy_class=ImageMemberProxy, - proxy_kwargs=kwargs) - - def new_image_member(self, image, member_id): - owner = image.owner - - if not self.context.is_admin: - if owner is None or owner != self.context.owner: - message = _("You are not permitted to create image members " - "for the image.") - raise exception.Forbidden(message) - - _validate_image_accepts_members(image.visibility) - - return self.image_member_factory.new_image_member(image, member_id) - - -def _immutable_attr(target, attr, proxy=None): - - def get_attr(self): - value = getattr(getattr(self, target), attr) - if proxy is not None: - value = proxy(value) - return value - - def forbidden(self, *args, **kwargs): - resource = getattr(self, 'resource_name', 'resource') - message = _("You are not permitted to modify '%(attr)s' on this " - "%(resource)s.") - raise exception.Forbidden(message % {'attr': attr, - 'resource': resource}) - - return property(get_attr, forbidden, forbidden) - - -class ImmutableLocations(list): - def forbidden(self, *args, **kwargs): - message = _("You are not permitted to modify locations " - "for this image.") - raise exception.Forbidden(message) - - def __deepcopy__(self, memo): - return ImmutableLocations(copy.deepcopy(list(self), memo)) - - append = forbidden - extend = forbidden - insert = forbidden - pop = forbidden - remove = forbidden - reverse = forbidden - sort = forbidden - __delitem__ = forbidden - __delslice__ = forbidden - __iadd__ = forbidden - __imul__ = forbidden - __setitem__ = forbidden - __setslice__ = forbidden - - -class ImmutableProperties(dict): - def forbidden_key(self, key, *args, **kwargs): - message = _("You are not permitted to modify '%s' on this image.") - raise exception.Forbidden(message % key) - - def forbidden(self, *args, **kwargs): - message = _("You are not permitted to modify this image.") - raise exception.Forbidden(message) - - __delitem__ = forbidden_key - __setitem__ = forbidden_key - pop = forbidden - popitem = forbidden - setdefault = forbidden - update = forbidden - - -class ImmutableTags(set): - def forbidden(self, *args, **kwargs): - message = _("You are not permitted to modify tags on this image.") - raise exception.Forbidden(message) - - add = forbidden - clear = forbidden - difference_update = forbidden - intersection_update = forbidden - pop = forbidden - remove = forbidden - symmetric_difference_update = forbidden - update = forbidden - - -class ImmutableImageProxy(object): - def __init__(self, base, context): - self.base = base - self.context = context - self.resource_name = 'image' - - name = _immutable_attr('base', 'name') - image_id = _immutable_attr('base', 'image_id') - status = _immutable_attr('base', 'status') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - visibility = _immutable_attr('base', 'visibility') - min_disk = _immutable_attr('base', 'min_disk') - min_ram = _immutable_attr('base', 'min_ram') - protected = _immutable_attr('base', 'protected') - locations = _immutable_attr('base', 'locations', proxy=ImmutableLocations) - checksum = _immutable_attr('base', 'checksum') - owner = _immutable_attr('base', 'owner') - disk_format = _immutable_attr('base', 'disk_format') - container_format = _immutable_attr('base', 'container_format') - size = _immutable_attr('base', 'size') - virtual_size = _immutable_attr('base', 'virtual_size') - extra_properties = _immutable_attr('base', 'extra_properties', - proxy=ImmutableProperties) - tags = _immutable_attr('base', 'tags', proxy=ImmutableTags) - - def delete(self): - message = _("You are not permitted to delete this image.") - raise exception.Forbidden(message) - - def get_data(self, *args, **kwargs): - return self.base.get_data(*args, **kwargs) - - def set_data(self, *args, **kwargs): - message = _("You are not permitted to upload data for this image.") - raise exception.Forbidden(message) - - def deactivate(self, *args, **kwargs): - message = _("You are not permitted to deactivate this image.") - raise exception.Forbidden(message) - - def reactivate(self, *args, **kwargs): - message = _("You are not permitted to reactivate this image.") - raise exception.Forbidden(message) - - -class ImmutableMemberProxy(object): - def __init__(self, base): - self.base = base - self.resource_name = 'image member' - - id = _immutable_attr('base', 'id') - image_id = _immutable_attr('base', 'image_id') - member_id = _immutable_attr('base', 'member_id') - status = _immutable_attr('base', 'status') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - - -class ImmutableTaskProxy(object): - def __init__(self, base): - self.base = base - self.resource_name = 'task' - - task_id = _immutable_attr('base', 'task_id') - type = _immutable_attr('base', 'type') - status = _immutable_attr('base', 'status') - owner = _immutable_attr('base', 'owner') - expires_at = _immutable_attr('base', 'expires_at') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - input = _immutable_attr('base', 'input') - message = _immutable_attr('base', 'message') - result = _immutable_attr('base', 'result') - - def run(self, executor): - self.base.run(executor) - - def begin_processing(self): - message = _("You are not permitted to set status on this task.") - raise exception.Forbidden(message) - - def succeed(self, result): - message = _("You are not permitted to set status on this task.") - raise exception.Forbidden(message) - - def fail(self, message): - message = _("You are not permitted to set status on this task.") - raise exception.Forbidden(message) - - -class ImmutableTaskStubProxy(object): - def __init__(self, base): - self.base = base - self.resource_name = 'task stub' - - task_id = _immutable_attr('base', 'task_id') - type = _immutable_attr('base', 'type') - status = _immutable_attr('base', 'status') - owner = _immutable_attr('base', 'owner') - expires_at = _immutable_attr('base', 'expires_at') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - - -class ImageProxy(glance.domain.proxy.Image): - - def __init__(self, image, context): - self.image = image - self.context = context - super(ImageProxy, self).__init__(image) - - -class ImageMemberProxy(glance.domain.proxy.ImageMember): - - def __init__(self, image_member, context): - self.image_member = image_member - self.context = context - super(ImageMemberProxy, self).__init__(image_member) - - -class TaskProxy(glance.domain.proxy.Task): - - def __init__(self, task): - self.task = task - super(TaskProxy, self).__init__(task) - - -class TaskFactoryProxy(glance.domain.proxy.TaskFactory): - - def __init__(self, task_factory, context): - self.task_factory = task_factory - self.context = context - super(TaskFactoryProxy, self).__init__( - task_factory, - task_proxy_class=TaskProxy) - - def new_task(self, **kwargs): - owner = kwargs.get('owner', self.context.owner) - - # NOTE(nikhil): Unlike Images, Tasks are expected to have owner. - # We currently do not allow even admins to set the owner to None. - if owner is not None and (owner == self.context.owner - or self.context.is_admin): - return super(TaskFactoryProxy, self).new_task(**kwargs) - else: - message = _("You are not permitted to create this task with " - "owner as: %s") - raise exception.Forbidden(message % owner) - - -class TaskRepoProxy(glance.domain.proxy.TaskRepo): - - def __init__(self, task_repo, context): - self.task_repo = task_repo - self.context = context - super(TaskRepoProxy, self).__init__(task_repo) - - def get(self, task_id): - task = self.task_repo.get(task_id) - return proxy_task(self.context, task) - - -class TaskStubRepoProxy(glance.domain.proxy.TaskStubRepo): - - def __init__(self, task_stub_repo, context): - self.task_stub_repo = task_stub_repo - self.context = context - super(TaskStubRepoProxy, self).__init__(task_stub_repo) - - def list(self, *args, **kwargs): - task_stubs = self.task_stub_repo.list(*args, **kwargs) - return [proxy_task_stub(self.context, t) for t in task_stubs] - - -# Metadef Namespace classes -def is_namespace_mutable(context, namespace): - """Return True if the namespace is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return namespace.owner == context.owner - - -def proxy_namespace(context, namespace): - if is_namespace_mutable(context, namespace): - return namespace - else: - return ImmutableMetadefNamespaceProxy(namespace) - - -class ImmutableMetadefNamespaceProxy(object): - - def __init__(self, base): - self.base = base - self.resource_name = 'namespace' - - namespace_id = _immutable_attr('base', 'namespace_id') - namespace = _immutable_attr('base', 'namespace') - display_name = _immutable_attr('base', 'display_name') - description = _immutable_attr('base', 'description') - owner = _immutable_attr('base', 'owner') - visibility = _immutable_attr('base', 'visibility') - protected = _immutable_attr('base', 'protected') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - - def delete(self): - message = _("You are not permitted to delete this namespace.") - raise exception.Forbidden(message) - - def save(self): - message = _("You are not permitted to update this namespace.") - raise exception.Forbidden(message) - - -class MetadefNamespaceProxy(glance.domain.proxy.MetadefNamespace): - - def __init__(self, namespace): - self.namespace_input = namespace - super(MetadefNamespaceProxy, self).__init__(namespace) - - -class MetadefNamespaceFactoryProxy( - glance.domain.proxy.MetadefNamespaceFactory): - - def __init__(self, meta_namespace_factory, context): - self.meta_namespace_factory = meta_namespace_factory - self.context = context - super(MetadefNamespaceFactoryProxy, self).__init__( - meta_namespace_factory, - meta_namespace_proxy_class=MetadefNamespaceProxy) - - def new_namespace(self, **kwargs): - owner = kwargs.pop('owner', self.context.owner) - - if not self.context.is_admin: - if owner is None or owner != self.context.owner: - message = _("You are not permitted to create namespace " - "owned by '%s'") - raise exception.Forbidden(message % (owner)) - - return super(MetadefNamespaceFactoryProxy, self).new_namespace( - owner=owner, **kwargs) - - -class MetadefNamespaceRepoProxy(glance.domain.proxy.MetadefNamespaceRepo): - - def __init__(self, namespace_repo, context): - self.namespace_repo = namespace_repo - self.context = context - super(MetadefNamespaceRepoProxy, self).__init__(namespace_repo) - - def get(self, namespace): - namespace_obj = self.namespace_repo.get(namespace) - return proxy_namespace(self.context, namespace_obj) - - def list(self, *args, **kwargs): - namespaces = self.namespace_repo.list(*args, **kwargs) - return [proxy_namespace(self.context, namespace) for - namespace in namespaces] - - -# Metadef Object classes -def is_object_mutable(context, object): - """Return True if the object is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return object.namespace.owner == context.owner - - -def proxy_object(context, object): - if is_object_mutable(context, object): - return object - else: - return ImmutableMetadefObjectProxy(object) - - -class ImmutableMetadefObjectProxy(object): - - def __init__(self, base): - self.base = base - self.resource_name = 'object' - - object_id = _immutable_attr('base', 'object_id') - name = _immutable_attr('base', 'name') - required = _immutable_attr('base', 'required') - description = _immutable_attr('base', 'description') - properties = _immutable_attr('base', 'properties') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - - def delete(self): - message = _("You are not permitted to delete this object.") - raise exception.Forbidden(message) - - def save(self): - message = _("You are not permitted to update this object.") - raise exception.Forbidden(message) - - -class MetadefObjectProxy(glance.domain.proxy.MetadefObject): - - def __init__(self, meta_object): - self.meta_object = meta_object - super(MetadefObjectProxy, self).__init__(meta_object) - - -class MetadefObjectFactoryProxy(glance.domain.proxy.MetadefObjectFactory): - - def __init__(self, meta_object_factory, context): - self.meta_object_factory = meta_object_factory - self.context = context - super(MetadefObjectFactoryProxy, self).__init__( - meta_object_factory, - meta_object_proxy_class=MetadefObjectProxy) - - def new_object(self, **kwargs): - owner = kwargs.pop('owner', self.context.owner) - - if not self.context.is_admin: - if owner is None or owner != self.context.owner: - message = _("You are not permitted to create object " - "owned by '%s'") - raise exception.Forbidden(message % (owner)) - - return super(MetadefObjectFactoryProxy, self).new_object(**kwargs) - - -class MetadefObjectRepoProxy(glance.domain.proxy.MetadefObjectRepo): - - def __init__(self, object_repo, context): - self.object_repo = object_repo - self.context = context - super(MetadefObjectRepoProxy, self).__init__(object_repo) - - def get(self, namespace, object_name): - meta_object = self.object_repo.get(namespace, object_name) - return proxy_object(self.context, meta_object) - - def list(self, *args, **kwargs): - objects = self.object_repo.list(*args, **kwargs) - return [proxy_object(self.context, meta_object) for - meta_object in objects] - - -# Metadef ResourceType classes -def is_meta_resource_type_mutable(context, meta_resource_type): - """Return True if the meta_resource_type is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - # (lakshmiS): resource type can exist without an association with - # namespace and resource type cannot be created/update/deleted directly( - # they have to be associated/de-associated from namespace) - if meta_resource_type.namespace: - return meta_resource_type.namespace.owner == context.owner - else: - return False - - -def proxy_meta_resource_type(context, meta_resource_type): - if is_meta_resource_type_mutable(context, meta_resource_type): - return meta_resource_type - else: - return ImmutableMetadefResourceTypeProxy(meta_resource_type) - - -class ImmutableMetadefResourceTypeProxy(object): - - def __init__(self, base): - self.base = base - self.resource_name = 'meta_resource_type' - - namespace = _immutable_attr('base', 'namespace') - name = _immutable_attr('base', 'name') - prefix = _immutable_attr('base', 'prefix') - properties_target = _immutable_attr('base', 'properties_target') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - - def delete(self): - message = _("You are not permitted to delete this meta_resource_type.") - raise exception.Forbidden(message) - - -class MetadefResourceTypeProxy(glance.domain.proxy.MetadefResourceType): - - def __init__(self, meta_resource_type): - self.meta_resource_type = meta_resource_type - super(MetadefResourceTypeProxy, self).__init__(meta_resource_type) - - -class MetadefResourceTypeFactoryProxy( - glance.domain.proxy.MetadefResourceTypeFactory): - - def __init__(self, resource_type_factory, context): - self.meta_resource_type_factory = resource_type_factory - self.context = context - super(MetadefResourceTypeFactoryProxy, self).__init__( - resource_type_factory, - resource_type_proxy_class=MetadefResourceTypeProxy) - - def new_resource_type(self, **kwargs): - owner = kwargs.pop('owner', self.context.owner) - - if not self.context.is_admin: - if owner is None or owner != self.context.owner: - message = _("You are not permitted to create resource_type " - "owned by '%s'") - raise exception.Forbidden(message % (owner)) - - return super(MetadefResourceTypeFactoryProxy, self).new_resource_type( - **kwargs) - - -class MetadefResourceTypeRepoProxy( - glance.domain.proxy.MetadefResourceTypeRepo): - - def __init__(self, meta_resource_type_repo, context): - self.meta_resource_type_repo = meta_resource_type_repo - self.context = context - super(MetadefResourceTypeRepoProxy, self).__init__( - meta_resource_type_repo) - - def list(self, *args, **kwargs): - meta_resource_types = self.meta_resource_type_repo.list( - *args, **kwargs) - return [proxy_meta_resource_type(self.context, meta_resource_type) for - meta_resource_type in meta_resource_types] - - def get(self, *args, **kwargs): - meta_resource_type = self.meta_resource_type_repo.get(*args, **kwargs) - return proxy_meta_resource_type(self.context, meta_resource_type) - - -# Metadef namespace properties classes -def is_namespace_property_mutable(context, namespace_property): - """Return True if the object is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return namespace_property.namespace.owner == context.owner - - -def proxy_namespace_property(context, namespace_property): - if is_namespace_property_mutable(context, namespace_property): - return namespace_property - else: - return ImmutableMetadefPropertyProxy(namespace_property) - - -class ImmutableMetadefPropertyProxy(object): - - def __init__(self, base): - self.base = base - self.resource_name = 'namespace_property' - - property_id = _immutable_attr('base', 'property_id') - name = _immutable_attr('base', 'name') - schema = _immutable_attr('base', 'schema') - - def delete(self): - message = _("You are not permitted to delete this property.") - raise exception.Forbidden(message) - - def save(self): - message = _("You are not permitted to update this property.") - raise exception.Forbidden(message) - - -class MetadefPropertyProxy(glance.domain.proxy.MetadefProperty): - - def __init__(self, namespace_property): - self.meta_object = namespace_property - super(MetadefPropertyProxy, self).__init__(namespace_property) - - -class MetadefPropertyFactoryProxy(glance.domain.proxy.MetadefPropertyFactory): - - def __init__(self, namespace_property_factory, context): - self.meta_object_factory = namespace_property_factory - self.context = context - super(MetadefPropertyFactoryProxy, self).__init__( - namespace_property_factory, - property_proxy_class=MetadefPropertyProxy) - - def new_namespace_property(self, **kwargs): - owner = kwargs.pop('owner', self.context.owner) - - if not self.context.is_admin: - if owner is None or owner != self.context.owner: - message = _("You are not permitted to create property " - "owned by '%s'") - raise exception.Forbidden(message % (owner)) - - return super(MetadefPropertyFactoryProxy, self).new_namespace_property( - **kwargs) - - -class MetadefPropertyRepoProxy(glance.domain.proxy.MetadefPropertyRepo): - - def __init__(self, namespace_property_repo, context): - self.namespace_property_repo = namespace_property_repo - self.context = context - super(MetadefPropertyRepoProxy, self).__init__(namespace_property_repo) - - def get(self, namespace, object_name): - namespace_property = self.namespace_property_repo.get(namespace, - object_name) - return proxy_namespace_property(self.context, namespace_property) - - def list(self, *args, **kwargs): - namespace_properties = self.namespace_property_repo.list( - *args, **kwargs) - return [proxy_namespace_property(self.context, namespace_property) for - namespace_property in namespace_properties] - - -# Metadef Tag classes -def is_tag_mutable(context, tag): - """Return True if the tag is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return tag.namespace.owner == context.owner - - -def proxy_tag(context, tag): - if is_tag_mutable(context, tag): - return tag - else: - return ImmutableMetadefTagProxy(tag) - - -class ImmutableMetadefTagProxy(object): - - def __init__(self, base): - self.base = base - self.resource_name = 'tag' - - tag_id = _immutable_attr('base', 'tag_id') - name = _immutable_attr('base', 'name') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - - def delete(self): - message = _("You are not permitted to delete this tag.") - raise exception.Forbidden(message) - - def save(self): - message = _("You are not permitted to update this tag.") - raise exception.Forbidden(message) - - -class MetadefTagProxy(glance.domain.proxy.MetadefTag): - pass - - -class MetadefTagFactoryProxy(glance.domain.proxy.MetadefTagFactory): - - def __init__(self, meta_tag_factory, context): - self.meta_tag_factory = meta_tag_factory - self.context = context - super(MetadefTagFactoryProxy, self).__init__( - meta_tag_factory, - meta_tag_proxy_class=MetadefTagProxy) - - def new_tag(self, **kwargs): - owner = kwargs.pop('owner', self.context.owner) - if not self.context.is_admin: - if owner is None: - message = _("Owner must be specified to create a tag.") - raise exception.Forbidden(message) - elif owner != self.context.owner: - message = _("You are not permitted to create a tag" - " in the namespace owned by '%s'") - raise exception.Forbidden(message % (owner)) - - return super(MetadefTagFactoryProxy, self).new_tag(**kwargs) - - -class MetadefTagRepoProxy(glance.domain.proxy.MetadefTagRepo): - - def __init__(self, tag_repo, context): - self.tag_repo = tag_repo - self.context = context - super(MetadefTagRepoProxy, self).__init__(tag_repo) - - def get(self, namespace, tag_name): - meta_tag = self.tag_repo.get(namespace, tag_name) - return proxy_tag(self.context, meta_tag) - - def list(self, *args, **kwargs): - tags = self.tag_repo.list(*args, **kwargs) - return [proxy_tag(self.context, meta_tag) for - meta_tag in tags] diff --git a/glance/api/cached_images.py b/glance/api/cached_images.py deleted file mode 100644 index 04d1c0ce..00000000 --- a/glance/api/cached_images.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Controller for Image Cache Management API -""" - -from oslo_log import log as logging -import webob.exc - -from glance.api import policy -from glance.api.v1 import controller -from glance.common import exception -from glance.common import wsgi -from glance import image_cache - -LOG = logging.getLogger(__name__) - - -class Controller(controller.BaseController): - """ - A controller for managing cached images. - """ - - def __init__(self): - self.cache = image_cache.ImageCache() - self.policy = policy.Enforcer() - - def _enforce(self, req): - """Authorize request against 'manage_image_cache' policy""" - try: - self.policy.enforce(req.context, 'manage_image_cache', {}) - except exception.Forbidden: - LOG.debug("User not permitted to manage the image cache") - raise webob.exc.HTTPForbidden() - - def get_cached_images(self, req): - """ - GET /cached_images - - Returns a mapping of records about cached images. - """ - self._enforce(req) - images = self.cache.get_cached_images() - return dict(cached_images=images) - - def delete_cached_image(self, req, image_id): - """ - DELETE /cached_images/ - - Removes an image from the cache. - """ - self._enforce(req) - self.cache.delete_cached_image(image_id) - - def delete_cached_images(self, req): - """ - DELETE /cached_images - Clear all active cached images - - Removes all images from the cache. - """ - self._enforce(req) - return dict(num_deleted=self.cache.delete_all_cached_images()) - - def get_queued_images(self, req): - """ - GET /queued_images - - Returns a mapping of records about queued images. - """ - self._enforce(req) - images = self.cache.get_queued_images() - return dict(queued_images=images) - - def queue_image(self, req, image_id): - """ - PUT /queued_images/ - - Queues an image for caching. We do not check to see if - the image is in the registry here. That is done by the - prefetcher... - """ - self._enforce(req) - self.cache.queue_image(image_id) - - def delete_queued_image(self, req, image_id): - """ - DELETE /queued_images/ - - Removes an image from the cache. - """ - self._enforce(req) - self.cache.delete_queued_image(image_id) - - def delete_queued_images(self, req): - """ - DELETE /queued_images - Clear all active queued images - - Removes all images from the cache. - """ - self._enforce(req) - return dict(num_deleted=self.cache.delete_all_queued_images()) - - -class CachedImageDeserializer(wsgi.JSONRequestDeserializer): - pass - - -class CachedImageSerializer(wsgi.JSONResponseSerializer): - pass - - -def create_resource(): - """Cached Images resource factory method""" - deserializer = CachedImageDeserializer() - serializer = CachedImageSerializer() - return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/glance/api/common.py b/glance/api/common.py deleted file mode 100644 index 2d0699e7..00000000 --- a/glance/api/common.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units - -from glance.common import exception -from glance.common import wsgi -from glance.i18n import _, _LE, _LW - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -_CACHED_THREAD_POOL = {} - - -def size_checked_iter(response, image_meta, expected_size, image_iter, - notifier): - image_id = image_meta['id'] - bytes_written = 0 - - def notify_image_sent_hook(env): - image_send_notification(bytes_written, expected_size, - image_meta, response.request, notifier) - - # Add hook to process after response is fully sent - if 'eventlet.posthooks' in response.request.environ: - response.request.environ['eventlet.posthooks'].append( - (notify_image_sent_hook, (), {})) - - try: - for chunk in image_iter: - yield chunk - bytes_written += len(chunk) - except Exception as err: - with excutils.save_and_reraise_exception(): - msg = (_LE("An error occurred reading from backend storage for " - "image %(image_id)s: %(err)s") % {'image_id': image_id, - 'err': err}) - LOG.error(msg) - - if expected_size != bytes_written: - msg = (_LE("Backend storage for image %(image_id)s " - "disconnected after writing only %(bytes_written)d " - "bytes") % {'image_id': image_id, - 'bytes_written': bytes_written}) - LOG.error(msg) - raise exception.GlanceException(_("Corrupt image download for " - "image %(image_id)s") % - {'image_id': image_id}) - - -def image_send_notification(bytes_written, expected_size, image_meta, request, - notifier): - """Send an image.send message to the notifier.""" - try: - context = request.context - payload = { - 'bytes_sent': bytes_written, - 'image_id': image_meta['id'], - 'owner_id': image_meta['owner'], - 'receiver_tenant_id': context.tenant, - 'receiver_user_id': context.user, - 'destination_ip': request.remote_addr, - } - if bytes_written != expected_size: - notify = notifier.error - else: - notify = notifier.info - - notify('image.send', payload) - - except Exception as err: - msg = (_LE("An error occurred during image.send" - " notification: %(err)s") % {'err': err}) - LOG.error(msg) - - -def get_remaining_quota(context, db_api, image_id=None): - """Method called to see if the user is allowed to store an image. - - Checks if it is allowed based on the given size in glance based on their - quota and current usage. - - :param context: - :param db_api: The db_api in use for this configuration - :param image_id: The image that will be replaced with this new data size - :returns: The number of bytes the user has remaining under their quota. - None means infinity - """ - - # NOTE(jbresnah) in the future this value will come from a call to - # keystone. - users_quota = CONF.user_storage_quota - - # set quota must have a number optionally followed by B, KB, MB, - # GB or TB without any spaces in between - pattern = re.compile('^(\d+)((K|M|G|T)?B)?$') - match = pattern.match(users_quota) - - if not match: - LOG.error(_LE("Invalid value for option user_storage_quota: " - "%(users_quota)s") - % {'users_quota': users_quota}) - raise exception.InvalidOptionValue(option='user_storage_quota', - value=users_quota) - - quota_value, quota_unit = (match.groups())[0:2] - # fall back to Bytes if user specified anything other than - # permitted values - quota_unit = quota_unit or "B" - factor = getattr(units, quota_unit.replace('B', 'i'), 1) - users_quota = int(quota_value) * factor - - if users_quota <= 0: - return - - usage = db_api.user_get_storage_usage(context, - context.owner, - image_id=image_id) - return users_quota - usage - - -def check_quota(context, image_size, db_api, image_id=None): - """Method called to see if the user is allowed to store an image. - - Checks if it is allowed based on the given size in glance based on their - quota and current usage. - - :param context: - :param image_size: The size of the image we hope to store - :param db_api: The db_api in use for this configuration - :param image_id: The image that will be replaced with this new data size - :returns: - """ - - remaining = get_remaining_quota(context, db_api, image_id=image_id) - - if remaining is None: - return - - user = getattr(context, 'user', '') - - if image_size is None: - # NOTE(jbresnah) When the image size is None it means that it is - # not known. In this case the only time we will raise an - # exception is when there is no room left at all, thus we know - # it will not fit - if remaining <= 0: - LOG.warn(_LW("User %(user)s attempted to upload an image of" - " unknown size that will exceed the quota." - " %(remaining)d bytes remaining.") - % {'user': user, 'remaining': remaining}) - raise exception.StorageQuotaFull(image_size=image_size, - remaining=remaining) - return - - if image_size > remaining: - LOG.warn(_LW("User %(user)s attempted to upload an image of size" - " %(size)d that will exceed the quota. %(remaining)d" - " bytes remaining.") - % {'user': user, 'size': image_size, 'remaining': remaining}) - raise exception.StorageQuotaFull(image_size=image_size, - remaining=remaining) - - return remaining - - -def memoize(lock_name): - def memoizer_wrapper(func): - @lockutils.synchronized(lock_name) - def memoizer(lock_name): - if lock_name not in _CACHED_THREAD_POOL: - _CACHED_THREAD_POOL[lock_name] = func() - - return _CACHED_THREAD_POOL[lock_name] - - return memoizer(lock_name) - - return memoizer_wrapper - - -def get_thread_pool(lock_name, size=1024): - """Initializes eventlet thread pool. - - If thread pool is present in cache, then returns it from cache - else create new pool, stores it in cache and return newly created - pool. - - @param lock_name: Name of the lock. - @param size: Size of eventlet pool. - - @return: eventlet pool - """ - @memoize(lock_name) - def _get_thread_pool(): - return wsgi.get_asynchronous_eventlet_pool(size=size) - - return _get_thread_pool diff --git a/glance/api/middleware/__init__.py b/glance/api/middleware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/api/middleware/cache.py b/glance/api/middleware/cache.py deleted file mode 100644 index ae752358..00000000 --- a/glance/api/middleware/cache.py +++ /dev/null @@ -1,338 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Transparent image file caching middleware, designed to live on -Glance API nodes. When images are requested from the API node, -this middleware caches the returned image file to local filesystem. - -When subsequent requests for the same image file are received, -the local cached copy of the image file is returned. -""" - -import re -import six - -from oslo_log import log as logging -from six.moves import http_client as http -import webob - -from glance.api.common import size_checked_iter -from glance.api import policy -from glance.api.v1 import images -from glance.common import exception -from glance.common import utils -from glance.common import wsgi -import glance.db -from glance.i18n import _LE, _LI -from glance import image_cache -from glance import notifier -import glance.registry.client.v1.api as registry - -LOG = logging.getLogger(__name__) - -PATTERNS = { - ('v1', 'GET'): re.compile(r'^/v1/images/([^\/]+)$'), - ('v1', 'DELETE'): re.compile(r'^/v1/images/([^\/]+)$'), - ('v2', 'GET'): re.compile(r'^/v2/images/([^\/]+)/file$'), - ('v2', 'DELETE'): re.compile(r'^/v2/images/([^\/]+)$') -} - - -class CacheFilter(wsgi.Middleware): - - def __init__(self, app): - self.cache = image_cache.ImageCache() - self.serializer = images.ImageSerializer() - self.policy = policy.Enforcer() - LOG.info(_LI("Initialized image cache middleware")) - super(CacheFilter, self).__init__(app) - - def _verify_metadata(self, image_meta): - """ - Sanity check the 'deleted' and 'size' metadata values. - """ - # NOTE: admins can see image metadata in the v1 API, but shouldn't - # be able to download the actual image data. - if image_meta['status'] == 'deleted' and image_meta['deleted']: - raise exception.NotFound() - - if not image_meta['size']: - # override image size metadata with the actual cached - # file size, see LP Bug #900959 - image_meta['size'] = self.cache.get_image_size(image_meta['id']) - - @staticmethod - def _match_request(request): - """Determine the version of the url and extract the image id - - :returns: tuple of version and image id if the url is a cacheable, - otherwise None - """ - for ((version, method), pattern) in PATTERNS.items(): - if request.method != method: - continue - match = pattern.match(request.path_info) - if match is None: - continue - image_id = match.group(1) - # Ensure the image id we got looks like an image id to filter - # out a URI like /images/detail. See LP Bug #879136 - if image_id != 'detail': - return (version, method, image_id) - - def _enforce(self, req, action, target=None): - """Authorize an action against our policies""" - if target is None: - target = {} - try: - self.policy.enforce(req.context, action, target) - except exception.Forbidden as e: - LOG.debug("User not permitted to perform '%s' action", action) - raise webob.exc.HTTPForbidden(explanation=e.msg, request=req) - - def _get_v1_image_metadata(self, request, image_id): - """ - Retrieves image metadata using registry for v1 api and creates - dictionary-like mash-up of image core and custom properties. - """ - try: - image_metadata = registry.get_image_metadata(request.context, - image_id) - return utils.create_mashup_dict(image_metadata) - except exception.NotFound as e: - LOG.debug("No metadata found for image '%s'", image_id) - raise webob.exc.HTTPNotFound(explanation=e.msg, request=request) - - def _get_v2_image_metadata(self, request, image_id): - """ - Retrieves image and for v2 api and creates adapter like object - to access image core or custom properties on request. - """ - db_api = glance.db.get_api() - image_repo = glance.db.ImageRepo(request.context, db_api) - try: - image = image_repo.get(image_id) - # Storing image object in request as it is required in - # _process_v2_request call. - request.environ['api.cache.image'] = image - - return policy.ImageTarget(image) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg, request=request) - - def process_request(self, request): - """ - For requests for an image file, we check the local image - cache. If present, we return the image file, appending - the image metadata in headers. If not present, we pass - the request on to the next application in the pipeline. - """ - match = self._match_request(request) - try: - (version, method, image_id) = match - except TypeError: - # Trying to unpack None raises this exception - return None - - self._stash_request_info(request, image_id, method, version) - - # Partial image download requests shall not be served from cache - # Bug: 1664709 - # TODO(dharinic): If an image is already cached, add support to serve - # only the requested bytes (partial image download) from the cache. - if (request.headers.get('Content-Range') or - request.headers.get('Range')): - return None - - if request.method != 'GET' or not self.cache.is_cached(image_id): - return None - - method = getattr(self, '_get_%s_image_metadata' % version) - image_metadata = method(request, image_id) - - # Deactivated images shall not be served from cache - if image_metadata['status'] == 'deactivated': - return None - - try: - self._enforce(request, 'download_image', target=image_metadata) - except exception.Forbidden: - return None - - LOG.debug("Cache hit for image '%s'", image_id) - image_iterator = self.get_from_cache(image_id) - method = getattr(self, '_process_%s_request' % version) - - try: - return method(request, image_id, image_iterator, image_metadata) - except exception.ImageNotFound: - msg = _LE("Image cache contained image file for image '%s', " - "however the registry did not contain metadata for " - "that image!") % image_id - LOG.error(msg) - self.cache.delete_cached_image(image_id) - - @staticmethod - def _stash_request_info(request, image_id, method, version): - """ - Preserve the image id, version and request method for later retrieval - """ - request.environ['api.cache.image_id'] = image_id - request.environ['api.cache.method'] = method - request.environ['api.cache.version'] = version - - @staticmethod - def _fetch_request_info(request): - """ - Preserve the cached image id, version for consumption by the - process_response method of this middleware - """ - try: - image_id = request.environ['api.cache.image_id'] - method = request.environ['api.cache.method'] - version = request.environ['api.cache.version'] - except KeyError: - return None - else: - return (image_id, method, version) - - def _process_v1_request(self, request, image_id, image_iterator, - image_meta): - # Don't display location - if 'location' in image_meta: - del image_meta['location'] - image_meta.pop('location_data', None) - self._verify_metadata(image_meta) - - response = webob.Response(request=request) - raw_response = { - 'image_iterator': image_iterator, - 'image_meta': image_meta, - } - return self.serializer.show(response, raw_response) - - def _process_v2_request(self, request, image_id, image_iterator, - image_meta): - # We do some contortions to get the image_metadata so - # that we can provide it to 'size_checked_iter' which - # will generate a notification. - # TODO(mclaren): Make notification happen more - # naturally once caching is part of the domain model. - image = request.environ['api.cache.image'] - self._verify_metadata(image_meta) - response = webob.Response(request=request) - response.app_iter = size_checked_iter(response, image_meta, - image_meta['size'], - image_iterator, - notifier.Notifier()) - # NOTE (flwang): Set the content-type, content-md5 and content-length - # explicitly to be consistent with the non-cache scenario. - # Besides, it's not worth the candle to invoke the "download" method - # of ResponseSerializer under image_data. Because method "download" - # will reset the app_iter. Then we have to call method - # "size_checked_iter" to avoid missing any notification. But after - # call "size_checked_iter", we will lose the content-md5 and - # content-length got by the method "download" because of this issue: - # https://github.com/Pylons/webob/issues/86 - response.headers['Content-Type'] = 'application/octet-stream' - if image.checksum: - response.headers['Content-MD5'] = (image.checksum.encode('utf-8') - if six.PY2 else image.checksum) - response.headers['Content-Length'] = str(image.size) - return response - - def process_response(self, resp): - """ - We intercept the response coming back from the main - images Resource, removing image file from the cache - if necessary - """ - status_code = self.get_status_code(resp) - if not 200 <= status_code < 300: - return resp - - # Note(dharinic): Bug: 1664709: Do not cache partial images. - if status_code == http.PARTIAL_CONTENT: - return resp - - try: - (image_id, method, version) = self._fetch_request_info( - resp.request) - except TypeError: - return resp - - if method == 'GET' and status_code == http.NO_CONTENT: - # Bugfix:1251055 - Don't cache non-existent image files. - # NOTE: Both GET for an image without locations and DELETE return - # 204 but DELETE should be processed. - return resp - - method_str = '_process_%s_response' % method - try: - process_response_method = getattr(self, method_str) - except AttributeError: - LOG.error(_LE('could not find %s') % method_str) - # Nothing to do here, move along - return resp - else: - return process_response_method(resp, image_id, version=version) - - def _process_DELETE_response(self, resp, image_id, version=None): - if self.cache.is_cached(image_id): - LOG.debug("Removing image %s from cache", image_id) - self.cache.delete_cached_image(image_id) - return resp - - def _process_GET_response(self, resp, image_id, version=None): - image_checksum = resp.headers.get('Content-MD5') - if not image_checksum: - # API V1 stores the checksum in a different header: - image_checksum = resp.headers.get('x-image-meta-checksum') - - if not image_checksum: - LOG.error(_LE("Checksum header is missing.")) - - # fetch image_meta on the basis of version - image_metadata = None - if version: - method = getattr(self, '_get_%s_image_metadata' % version) - image_metadata = method(resp.request, image_id) - # NOTE(zhiyan): image_cache return a generator object and set to - # response.app_iter, it will be called by eventlet.wsgi later. - # So we need enforce policy firstly but do it by application - # since eventlet.wsgi could not catch webob.exc.HTTPForbidden and - # return 403 error to client then. - self._enforce(resp.request, 'download_image', target=image_metadata) - - resp.app_iter = self.cache.get_caching_iter(image_id, image_checksum, - resp.app_iter) - return resp - - def get_status_code(self, response): - """ - Returns the integer status code from the response, which - can be either a Webob.Response (used in testing) or httplib.Response - """ - if hasattr(response, 'status_int'): - return response.status_int - return response.status - - def get_from_cache(self, image_id): - """Called if cache hit""" - with self.cache.open_for_read(image_id) as cache_file: - chunks = utils.chunkiter(cache_file) - for chunk in chunks: - yield chunk diff --git a/glance/api/middleware/cache_manage.py b/glance/api/middleware/cache_manage.py deleted file mode 100644 index 77c69541..00000000 --- a/glance/api/middleware/cache_manage.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Image Cache Management API -""" - -from oslo_log import log as logging -import routes - -from glance.api import cached_images -from glance.common import wsgi -from glance.i18n import _LI - -LOG = logging.getLogger(__name__) - - -class CacheManageFilter(wsgi.Middleware): - def __init__(self, app): - mapper = routes.Mapper() - resource = cached_images.create_resource() - - mapper.connect("/v1/cached_images", - controller=resource, - action="get_cached_images", - conditions=dict(method=["GET"])) - - mapper.connect("/v1/cached_images/{image_id}", - controller=resource, - action="delete_cached_image", - conditions=dict(method=["DELETE"])) - - mapper.connect("/v1/cached_images", - controller=resource, - action="delete_cached_images", - conditions=dict(method=["DELETE"])) - - mapper.connect("/v1/queued_images/{image_id}", - controller=resource, - action="queue_image", - conditions=dict(method=["PUT"])) - - mapper.connect("/v1/queued_images", - controller=resource, - action="get_queued_images", - conditions=dict(method=["GET"])) - - mapper.connect("/v1/queued_images/{image_id}", - controller=resource, - action="delete_queued_image", - conditions=dict(method=["DELETE"])) - - mapper.connect("/v1/queued_images", - controller=resource, - action="delete_queued_images", - conditions=dict(method=["DELETE"])) - - self._mapper = mapper - self._resource = resource - - LOG.info(_LI("Initialized image cache management middleware")) - super(CacheManageFilter, self).__init__(app) - - def process_request(self, request): - # Map request to our resource object if we can handle it - match = self._mapper.match(request.path_info, request.environ) - if match: - request.environ['wsgiorg.routing_args'] = (None, match) - return self._resource(request) - # Pass off downstream if we don't match the request path - else: - return None diff --git a/glance/api/middleware/context.py b/glance/api/middleware/context.py deleted file mode 100644 index ef60da62..00000000 --- a/glance/api/middleware/context.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2011-2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -import webob.exc - -from glance.api import policy -from glance.common import wsgi -import glance.context -from glance.i18n import _, _LW - - -context_opts = [ - cfg.BoolOpt('owner_is_tenant', default=True, - help=_(""" -Set the image owner to tenant or the authenticated user. - -Assign a boolean value to determine the owner of an image. When set to -True, the owner of the image is the tenant. When set to False, the -owner of the image will be the authenticated user issuing the request. -Setting it to False makes the image private to the associated user and -sharing with other users within the same tenant (or "project") -requires explicit image sharing via image membership. - -Possible values: - * True - * False - -Related options: - * None - -""")), - - cfg.StrOpt('admin_role', default='admin', - help=_(""" -Role used to identify an authenticated user as administrator. - -Provide a string value representing a Keystone role to identify an -administrative user. Users with this role will be granted -administrative privileges. The default value for this option is -'admin'. - -Possible values: - * A string value which is a valid Keystone role - -Related options: - * None - -""")), - - cfg.BoolOpt('allow_anonymous_access', default=False, - help=_(""" -Allow limited access to unauthenticated users. - -Assign a boolean to determine API access for unathenticated -users. When set to False, the API cannot be accessed by -unauthenticated users. When set to True, unauthenticated users can -access the API with read-only privileges. This however only applies -when using ContextMiddleware. - -Possible values: - * True - * False - -Related options: - * None - -""")), - - cfg.IntOpt('max_request_id_length', default=64, min=0, - help=_(""" -Limit the request ID length. - -Provide an integer value to limit the length of the request ID to -the specified length. The default value is 64. Users can change this -to any ineteger value between 0 and 16384 however keeping in mind that -a larger value may flood the logs. - -Possible values: - * Integer value between 0 and 16384 - -Related options: - * None - -""")), -] - -CONF = cfg.CONF -CONF.register_opts(context_opts) - -LOG = logging.getLogger(__name__) - - -class BaseContextMiddleware(wsgi.Middleware): - def process_response(self, resp): - try: - request_id = resp.request.context.request_id - except AttributeError: - LOG.warn(_LW('Unable to retrieve request id from context')) - else: - # For python 3 compatibility need to use bytes type - prefix = b'req-' if isinstance(request_id, bytes) else 'req-' - - if not request_id.startswith(prefix): - request_id = prefix + request_id - - resp.headers['x-openstack-request-id'] = request_id - - return resp - - -class ContextMiddleware(BaseContextMiddleware): - def __init__(self, app): - self.policy_enforcer = policy.Enforcer() - super(ContextMiddleware, self).__init__(app) - - def process_request(self, req): - """Convert authentication information into a request context - - Generate a glance.context.RequestContext object from the available - authentication headers and store on the 'context' attribute - of the req object. - - :param req: wsgi request object that will be given the context object - :raises webob.exc.HTTPUnauthorized: when value of the - X-Identity-Status header is not - 'Confirmed' and anonymous access - is disallowed - """ - if req.headers.get('X-Identity-Status') == 'Confirmed': - req.context = self._get_authenticated_context(req) - elif CONF.allow_anonymous_access: - req.context = self._get_anonymous_context() - else: - raise webob.exc.HTTPUnauthorized() - - def _get_anonymous_context(self): - kwargs = { - 'user': None, - 'tenant': None, - 'roles': [], - 'is_admin': False, - 'read_only': True, - 'policy_enforcer': self.policy_enforcer, - } - return glance.context.RequestContext(**kwargs) - - def _get_authenticated_context(self, req): - service_catalog = None - if req.headers.get('X-Service-Catalog') is not None: - try: - catalog_header = req.headers.get('X-Service-Catalog') - service_catalog = jsonutils.loads(catalog_header) - except ValueError: - raise webob.exc.HTTPInternalServerError( - _('Invalid service catalog json.')) - - request_id = req.headers.get('X-Openstack-Request-ID') - if request_id and (0 < CONF.max_request_id_length < - len(request_id)): - msg = (_('x-openstack-request-id is too long, max size %s') % - CONF.max_request_id_length) - return webob.exc.HTTPRequestHeaderFieldsTooLarge(comment=msg) - - kwargs = { - 'owner_is_tenant': CONF.owner_is_tenant, - 'service_catalog': service_catalog, - 'policy_enforcer': self.policy_enforcer, - 'request_id': request_id, - } - - ctxt = glance.context.RequestContext.from_environ(req.environ, - **kwargs) - - # FIXME(jamielennox): glance has traditionally lowercased its roles. - # This was related to bug #1010519 where at least the admin role was - # case insensitive. This seems to no longer be the case and should be - # fixed. - ctxt.roles = [r.lower() for r in ctxt.roles] - - if CONF.admin_role.strip().lower() in ctxt.roles: - ctxt.is_admin = True - - return ctxt - - -class UnauthenticatedContextMiddleware(BaseContextMiddleware): - def process_request(self, req): - """Create a context without an authorized user.""" - kwargs = { - 'user': None, - 'tenant': None, - 'roles': [], - 'is_admin': True, - } - - req.context = glance.context.RequestContext(**kwargs) diff --git a/glance/api/middleware/gzip.py b/glance/api/middleware/gzip.py deleted file mode 100644 index 942ff021..00000000 --- a/glance/api/middleware/gzip.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Use gzip compression if the client accepts it. -""" - -import re - -from oslo_log import log as logging - -from glance.common import wsgi -from glance.i18n import _LI - -LOG = logging.getLogger(__name__) - - -class GzipMiddleware(wsgi.Middleware): - - re_zip = re.compile(r'\bgzip\b') - - def __init__(self, app): - LOG.info(_LI("Initialized gzip middleware")) - super(GzipMiddleware, self).__init__(app) - - def process_response(self, response): - request = response.request - accept_encoding = request.headers.get('Accept-Encoding', '') - - if self.re_zip.search(accept_encoding): - # NOTE(flaper87): Webob removes the content-md5 when - # app_iter is called. We'll keep it and reset it later - checksum = response.headers.get("Content-MD5") - - # NOTE(flaper87): We'll use lazy for images so - # that they can be compressed without reading - # the whole content in memory. Notice that using - # lazy will set response's content-length to 0. - content_type = response.headers.get("Content-Type", "") - lazy = content_type == "application/octet-stream" - - # NOTE(flaper87): Webob takes care of the compression - # process, it will replace the body either with a - # compressed body or a generator - used for lazy com - # pression - depending on the lazy value. - # - # Webob itself will set the Content-Encoding header. - response.encode_content(lazy=lazy) - - if checksum: - response.headers['Content-MD5'] = checksum - - return response diff --git a/glance/api/middleware/version_negotiation.py b/glance/api/middleware/version_negotiation.py deleted file mode 100644 index 8d30fffe..00000000 --- a/glance/api/middleware/version_negotiation.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A filter middleware that inspects the requested URI for a version string -and/or Accept headers and attempts to negotiate an API controller to -return -""" - -from oslo_config import cfg -from oslo_log import log as logging - -from glance.api import versions -from glance.common import wsgi - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -class VersionNegotiationFilter(wsgi.Middleware): - - def __init__(self, app): - self.versions_app = versions.Controller() - self.allowed_versions = None - self.vnd_mime_type = 'application/vnd.openstack.images-' - super(VersionNegotiationFilter, self).__init__(app) - - def process_request(self, req): - """Try to find a version first in the accept header, then the URL""" - args = {'method': req.method, 'path': req.path, 'accept': req.accept} - LOG.debug("Determining version of request: %(method)s %(path)s " - "Accept: %(accept)s", args) - - # If the request is for /versions, just return the versions container - if req.path_info_peek() == "versions": - return self.versions_app.index(req, explicit=True) - - accept = str(req.accept) - if accept.startswith(self.vnd_mime_type): - LOG.debug("Using media-type versioning") - token_loc = len(self.vnd_mime_type) - req_version = accept[token_loc:] - else: - LOG.debug("Using url versioning") - # Remove version in url so it doesn't conflict later - req_version = self._pop_path_info(req) - - try: - version = self._match_version_string(req_version) - except ValueError: - LOG.debug("Unknown version. Returning version choices.") - return self.versions_app - - req.environ['api.version'] = version - req.path_info = ''.join(('/v', str(version), req.path_info)) - LOG.debug("Matched version: v%d", version) - LOG.debug('new path %s', req.path_info) - return None - - def _get_allowed_versions(self): - allowed_versions = {} - if CONF.enable_v1_api: - allowed_versions['v1'] = 1 - allowed_versions['v1.0'] = 1 - allowed_versions['v1.1'] = 1 - if CONF.enable_v2_api: - allowed_versions['v2'] = 2 - allowed_versions['v2.0'] = 2 - allowed_versions['v2.1'] = 2 - allowed_versions['v2.2'] = 2 - allowed_versions['v2.3'] = 2 - allowed_versions['v2.4'] = 2 - allowed_versions['v2.5'] = 2 - return allowed_versions - - def _match_version_string(self, subject): - """ - Given a string, tries to match a major and/or - minor version number. - - :param subject: The string to check - :returns: version found in the subject - :raises ValueError: if no acceptable version could be found - """ - if self.allowed_versions is None: - self.allowed_versions = self._get_allowed_versions() - if subject in self.allowed_versions: - return self.allowed_versions[subject] - else: - raise ValueError() - - def _pop_path_info(self, req): - """ - 'Pops' off the next segment of PATH_INFO, returns the popped - segment. Do NOT push it onto SCRIPT_NAME. - """ - path = req.path_info - if not path: - return None - while path.startswith('/'): - path = path[1:] - idx = path.find('/') - if idx == -1: - idx = len(path) - r = path[:idx] - req.path_info = path[idx:] - return r diff --git a/glance/api/policy.py b/glance/api/policy.py deleted file mode 100644 index 3fd38aa8..00000000 --- a/glance/api/policy.py +++ /dev/null @@ -1,680 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Policy Engine For Glance""" - -import copy - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_policy import policy - -from glance.common import exception -import glance.domain.proxy -from glance.i18n import _ - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -DEFAULT_RULES = policy.Rules.from_dict({ - 'context_is_admin': 'role:admin', - 'default': '@', - 'manage_image_cache': 'role:admin', -}) - - -class Enforcer(policy.Enforcer): - """Responsible for loading and enforcing rules""" - - def __init__(self): - if CONF.find_file(CONF.oslo_policy.policy_file): - kwargs = dict(rules=None, use_conf=True) - else: - kwargs = dict(rules=DEFAULT_RULES, use_conf=False) - super(Enforcer, self).__init__(CONF, overwrite=False, **kwargs) - - def add_rules(self, rules): - """Add new rules to the Rules object""" - self.set_rules(rules, overwrite=False, use_conf=self.use_conf) - - def enforce(self, context, action, target): - """Verifies that the action is valid on the target in this context. - - :param context: Glance request context - :param action: String representing the action to be checked - :param target: Dictionary representing the object of the action. - :raises: `glance.common.exception.Forbidden` - :returns: A non-False value if access is allowed. - """ - return super(Enforcer, self).enforce(action, target, - context.to_policy_values(), - do_raise=True, - exc=exception.Forbidden, - action=action) - - def check(self, context, action, target): - """Verifies that the action is valid on the target in this context. - - :param context: Glance request context - :param action: String representing the action to be checked - :param target: Dictionary representing the object of the action. - :returns: A non-False value if access is allowed. - """ - return super(Enforcer, self).enforce(action, - target, - context.to_policy_values()) - - def check_is_admin(self, context): - """Check if the given context is associated with an admin role, - as defined via the 'context_is_admin' RBAC rule. - - :param context: Glance request context - :returns: A non-False value if context role is admin. - """ - return self.check(context, 'context_is_admin', context.to_dict()) - - -class ImageRepoProxy(glance.domain.proxy.Repo): - - def __init__(self, image_repo, context, policy): - self.context = context - self.policy = policy - self.image_repo = image_repo - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(ImageRepoProxy, self).__init__(image_repo, - item_proxy_class=ImageProxy, - item_proxy_kwargs=proxy_kwargs) - - def get(self, image_id): - try: - image = super(ImageRepoProxy, self).get(image_id) - except exception.NotFound: - self.policy.enforce(self.context, 'get_image', {}) - raise - else: - self.policy.enforce(self.context, 'get_image', ImageTarget(image)) - return image - - def list(self, *args, **kwargs): - self.policy.enforce(self.context, 'get_images', {}) - return super(ImageRepoProxy, self).list(*args, **kwargs) - - def save(self, image, from_state=None): - self.policy.enforce(self.context, 'modify_image', image.target) - return super(ImageRepoProxy, self).save(image, from_state=from_state) - - def add(self, image): - self.policy.enforce(self.context, 'add_image', image.target) - return super(ImageRepoProxy, self).add(image) - - -def _enforce_image_visibility(policy, context, visibility, target): - if visibility == 'public': - policy.enforce(context, 'publicize_image', target) - elif visibility == 'community': - policy.enforce(context, 'communitize_image', target) - - -class ImageProxy(glance.domain.proxy.Image): - - def __init__(self, image, context, policy): - self.image = image - self.target = ImageTarget(image) - self.context = context - self.policy = policy - super(ImageProxy, self).__init__(image) - - @property - def visibility(self): - return self.image.visibility - - @visibility.setter - def visibility(self, value): - _enforce_image_visibility(self.policy, self.context, value, - self.target) - self.image.visibility = value - - @property - def locations(self): - return ImageLocationsProxy(self.image.locations, - self.context, self.policy) - - @locations.setter - def locations(self, value): - if not isinstance(value, (list, ImageLocationsProxy)): - raise exception.Invalid(_('Invalid locations: %s') % value) - self.policy.enforce(self.context, 'set_image_location', self.target) - new_locations = list(value) - if (set([loc['url'] for loc in self.image.locations]) - - set([loc['url'] for loc in new_locations])): - self.policy.enforce(self.context, 'delete_image_location', - self.target) - self.image.locations = new_locations - - def delete(self): - self.policy.enforce(self.context, 'delete_image', self.target) - return self.image.delete() - - def deactivate(self): - LOG.debug('Attempting deactivate') - target = ImageTarget(self.image) - self.policy.enforce(self.context, 'deactivate', target=target) - LOG.debug('Deactivate allowed, continue') - self.image.deactivate() - - def reactivate(self): - LOG.debug('Attempting reactivate') - target = ImageTarget(self.image) - self.policy.enforce(self.context, 'reactivate', target=target) - LOG.debug('Reactivate allowed, continue') - self.image.reactivate() - - def get_data(self, *args, **kwargs): - self.policy.enforce(self.context, 'download_image', self.target) - return self.image.get_data(*args, **kwargs) - - def set_data(self, *args, **kwargs): - self.policy.enforce(self.context, 'upload_image', self.target) - return self.image.set_data(*args, **kwargs) - - -class ImageMemberProxy(glance.domain.proxy.ImageMember): - - def __init__(self, image_member, context, policy): - super(ImageMemberProxy, self).__init__(image_member) - self.image_member = image_member - self.context = context - self.policy = policy - - -class ImageFactoryProxy(glance.domain.proxy.ImageFactory): - - def __init__(self, image_factory, context, policy): - self.image_factory = image_factory - self.context = context - self.policy = policy - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(ImageFactoryProxy, self).__init__(image_factory, - proxy_class=ImageProxy, - proxy_kwargs=proxy_kwargs) - - def new_image(self, **kwargs): - _enforce_image_visibility(self.policy, self.context, - kwargs.get('visibility'), {}) - return super(ImageFactoryProxy, self).new_image(**kwargs) - - -class ImageMemberFactoryProxy(glance.domain.proxy.ImageMembershipFactory): - - def __init__(self, member_factory, context, policy): - super(ImageMemberFactoryProxy, self).__init__( - member_factory, - proxy_class=ImageMemberProxy, - proxy_kwargs={'context': context, 'policy': policy}) - - -class ImageMemberRepoProxy(glance.domain.proxy.Repo): - - def __init__(self, member_repo, image, context, policy): - self.member_repo = member_repo - self.image = image - self.target = ImageTarget(image) - self.context = context - self.policy = policy - - def add(self, member): - self.policy.enforce(self.context, 'add_member', self.target) - self.member_repo.add(member) - - def get(self, member_id): - self.policy.enforce(self.context, 'get_member', self.target) - return self.member_repo.get(member_id) - - def save(self, member, from_state=None): - self.policy.enforce(self.context, 'modify_member', self.target) - self.member_repo.save(member, from_state=from_state) - - def list(self, *args, **kwargs): - self.policy.enforce(self.context, 'get_members', self.target) - return self.member_repo.list(*args, **kwargs) - - def remove(self, member): - self.policy.enforce(self.context, 'delete_member', self.target) - self.member_repo.remove(member) - - -class ImageLocationsProxy(object): - - __hash__ = None - - def __init__(self, locations, context, policy): - self.locations = locations - self.context = context - self.policy = policy - - def __copy__(self): - return type(self)(self.locations, self.context, self.policy) - - def __deepcopy__(self, memo): - # NOTE(zhiyan): Only copy location entries, others can be reused. - return type(self)(copy.deepcopy(self.locations, memo), - self.context, self.policy) - - def _get_checker(action, func_name): - def _checker(self, *args, **kwargs): - self.policy.enforce(self.context, action, {}) - method = getattr(self.locations, func_name) - return method(*args, **kwargs) - return _checker - - count = _get_checker('get_image_location', 'count') - index = _get_checker('get_image_location', 'index') - __getitem__ = _get_checker('get_image_location', '__getitem__') - __contains__ = _get_checker('get_image_location', '__contains__') - __len__ = _get_checker('get_image_location', '__len__') - __cast = _get_checker('get_image_location', '__cast') - __cmp__ = _get_checker('get_image_location', '__cmp__') - __iter__ = _get_checker('get_image_location', '__iter__') - - append = _get_checker('set_image_location', 'append') - extend = _get_checker('set_image_location', 'extend') - insert = _get_checker('set_image_location', 'insert') - reverse = _get_checker('set_image_location', 'reverse') - __iadd__ = _get_checker('set_image_location', '__iadd__') - __setitem__ = _get_checker('set_image_location', '__setitem__') - - pop = _get_checker('delete_image_location', 'pop') - remove = _get_checker('delete_image_location', 'remove') - __delitem__ = _get_checker('delete_image_location', '__delitem__') - __delslice__ = _get_checker('delete_image_location', '__delslice__') - - del _get_checker - - -class TaskProxy(glance.domain.proxy.Task): - - def __init__(self, task, context, policy): - self.task = task - self.context = context - self.policy = policy - super(TaskProxy, self).__init__(task) - - -class TaskStubProxy(glance.domain.proxy.TaskStub): - - def __init__(self, task_stub, context, policy): - self.task_stub = task_stub - self.context = context - self.policy = policy - super(TaskStubProxy, self).__init__(task_stub) - - -class TaskRepoProxy(glance.domain.proxy.TaskRepo): - - def __init__(self, task_repo, context, task_policy): - self.context = context - self.policy = task_policy - self.task_repo = task_repo - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(TaskRepoProxy, - self).__init__(task_repo, - task_proxy_class=TaskProxy, - task_proxy_kwargs=proxy_kwargs) - - def get(self, task_id): - self.policy.enforce(self.context, 'get_task', {}) - return super(TaskRepoProxy, self).get(task_id) - - def add(self, task): - self.policy.enforce(self.context, 'add_task', {}) - super(TaskRepoProxy, self).add(task) - - def save(self, task): - self.policy.enforce(self.context, 'modify_task', {}) - super(TaskRepoProxy, self).save(task) - - -class TaskStubRepoProxy(glance.domain.proxy.TaskStubRepo): - - def __init__(self, task_stub_repo, context, task_policy): - self.context = context - self.policy = task_policy - self.task_stub_repo = task_stub_repo - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(TaskStubRepoProxy, - self).__init__(task_stub_repo, - task_stub_proxy_class=TaskStubProxy, - task_stub_proxy_kwargs=proxy_kwargs) - - def list(self, *args, **kwargs): - self.policy.enforce(self.context, 'get_tasks', {}) - return super(TaskStubRepoProxy, self).list(*args, **kwargs) - - -class TaskFactoryProxy(glance.domain.proxy.TaskFactory): - - def __init__(self, task_factory, context, policy): - self.task_factory = task_factory - self.context = context - self.policy = policy - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(TaskFactoryProxy, self).__init__( - task_factory, - task_proxy_class=TaskProxy, - task_proxy_kwargs=proxy_kwargs) - - -class ImageTarget(object): - SENTINEL = object() - - def __init__(self, target): - """Initialize the object - - :param target: Object being targeted - """ - self.target = target - - def __getitem__(self, key): - """Return the value of 'key' from the target. - - If the target has the attribute 'key', return it. - - :param key: value to retrieve - """ - key = self.key_transforms(key) - - value = getattr(self.target, key, self.SENTINEL) - if value is self.SENTINEL: - extra_properties = getattr(self.target, 'extra_properties', None) - if extra_properties is not None: - value = extra_properties[key] - else: - value = None - return value - - def key_transforms(self, key): - if key == 'id': - key = 'image_id' - - return key - - -# Metadef Namespace classes -class MetadefNamespaceProxy(glance.domain.proxy.MetadefNamespace): - - def __init__(self, namespace, context, policy): - self.namespace_input = namespace - self.context = context - self.policy = policy - super(MetadefNamespaceProxy, self).__init__(namespace) - - -class MetadefNamespaceRepoProxy(glance.domain.proxy.MetadefNamespaceRepo): - - def __init__(self, namespace_repo, context, namespace_policy): - self.context = context - self.policy = namespace_policy - self.namespace_repo = namespace_repo - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(MetadefNamespaceRepoProxy, - self).__init__(namespace_repo, - namespace_proxy_class=MetadefNamespaceProxy, - namespace_proxy_kwargs=proxy_kwargs) - - def get(self, namespace): - self.policy.enforce(self.context, 'get_metadef_namespace', {}) - return super(MetadefNamespaceRepoProxy, self).get(namespace) - - def list(self, *args, **kwargs): - self.policy.enforce(self.context, 'get_metadef_namespaces', {}) - return super(MetadefNamespaceRepoProxy, self).list(*args, **kwargs) - - def save(self, namespace): - self.policy.enforce(self.context, 'modify_metadef_namespace', {}) - return super(MetadefNamespaceRepoProxy, self).save(namespace) - - def add(self, namespace): - self.policy.enforce(self.context, 'add_metadef_namespace', {}) - return super(MetadefNamespaceRepoProxy, self).add(namespace) - - -class MetadefNamespaceFactoryProxy( - glance.domain.proxy.MetadefNamespaceFactory): - - def __init__(self, meta_namespace_factory, context, policy): - self.meta_namespace_factory = meta_namespace_factory - self.context = context - self.policy = policy - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(MetadefNamespaceFactoryProxy, self).__init__( - meta_namespace_factory, - meta_namespace_proxy_class=MetadefNamespaceProxy, - meta_namespace_proxy_kwargs=proxy_kwargs) - - -# Metadef Object classes -class MetadefObjectProxy(glance.domain.proxy.MetadefObject): - - def __init__(self, meta_object, context, policy): - self.meta_object = meta_object - self.context = context - self.policy = policy - super(MetadefObjectProxy, self).__init__(meta_object) - - -class MetadefObjectRepoProxy(glance.domain.proxy.MetadefObjectRepo): - - def __init__(self, object_repo, context, object_policy): - self.context = context - self.policy = object_policy - self.object_repo = object_repo - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(MetadefObjectRepoProxy, - self).__init__(object_repo, - object_proxy_class=MetadefObjectProxy, - object_proxy_kwargs=proxy_kwargs) - - def get(self, namespace, object_name): - self.policy.enforce(self.context, 'get_metadef_object', {}) - return super(MetadefObjectRepoProxy, self).get(namespace, object_name) - - def list(self, *args, **kwargs): - self.policy.enforce(self.context, 'get_metadef_objects', {}) - return super(MetadefObjectRepoProxy, self).list(*args, **kwargs) - - def save(self, meta_object): - self.policy.enforce(self.context, 'modify_metadef_object', {}) - return super(MetadefObjectRepoProxy, self).save(meta_object) - - def add(self, meta_object): - self.policy.enforce(self.context, 'add_metadef_object', {}) - return super(MetadefObjectRepoProxy, self).add(meta_object) - - -class MetadefObjectFactoryProxy(glance.domain.proxy.MetadefObjectFactory): - - def __init__(self, meta_object_factory, context, policy): - self.meta_object_factory = meta_object_factory - self.context = context - self.policy = policy - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(MetadefObjectFactoryProxy, self).__init__( - meta_object_factory, - meta_object_proxy_class=MetadefObjectProxy, - meta_object_proxy_kwargs=proxy_kwargs) - - -# Metadef ResourceType classes -class MetadefResourceTypeProxy(glance.domain.proxy.MetadefResourceType): - - def __init__(self, meta_resource_type, context, policy): - self.meta_resource_type = meta_resource_type - self.context = context - self.policy = policy - super(MetadefResourceTypeProxy, self).__init__(meta_resource_type) - - -class MetadefResourceTypeRepoProxy( - glance.domain.proxy.MetadefResourceTypeRepo): - - def __init__(self, resource_type_repo, context, resource_type_policy): - self.context = context - self.policy = resource_type_policy - self.resource_type_repo = resource_type_repo - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(MetadefResourceTypeRepoProxy, self).__init__( - resource_type_repo, - resource_type_proxy_class=MetadefResourceTypeProxy, - resource_type_proxy_kwargs=proxy_kwargs) - - def list(self, *args, **kwargs): - self.policy.enforce(self.context, 'list_metadef_resource_types', {}) - return super(MetadefResourceTypeRepoProxy, self).list(*args, **kwargs) - - def get(self, *args, **kwargs): - self.policy.enforce(self.context, 'get_metadef_resource_type', {}) - return super(MetadefResourceTypeRepoProxy, self).get(*args, **kwargs) - - def add(self, resource_type): - self.policy.enforce(self.context, - 'add_metadef_resource_type_association', {}) - return super(MetadefResourceTypeRepoProxy, self).add(resource_type) - - -class MetadefResourceTypeFactoryProxy( - glance.domain.proxy.MetadefResourceTypeFactory): - - def __init__(self, resource_type_factory, context, policy): - self.resource_type_factory = resource_type_factory - self.context = context - self.policy = policy - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(MetadefResourceTypeFactoryProxy, self).__init__( - resource_type_factory, - resource_type_proxy_class=MetadefResourceTypeProxy, - resource_type_proxy_kwargs=proxy_kwargs) - - -# Metadef namespace properties classes -class MetadefPropertyProxy(glance.domain.proxy.MetadefProperty): - - def __init__(self, namespace_property, context, policy): - self.namespace_property = namespace_property - self.context = context - self.policy = policy - super(MetadefPropertyProxy, self).__init__(namespace_property) - - -class MetadefPropertyRepoProxy(glance.domain.proxy.MetadefPropertyRepo): - - def __init__(self, property_repo, context, object_policy): - self.context = context - self.policy = object_policy - self.property_repo = property_repo - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(MetadefPropertyRepoProxy, self).__init__( - property_repo, - property_proxy_class=MetadefPropertyProxy, - property_proxy_kwargs=proxy_kwargs) - - def get(self, namespace, property_name): - self.policy.enforce(self.context, 'get_metadef_property', {}) - return super(MetadefPropertyRepoProxy, self).get(namespace, - property_name) - - def list(self, *args, **kwargs): - self.policy.enforce(self.context, 'get_metadef_properties', {}) - return super(MetadefPropertyRepoProxy, self).list( - *args, **kwargs) - - def save(self, namespace_property): - self.policy.enforce(self.context, 'modify_metadef_property', {}) - return super(MetadefPropertyRepoProxy, self).save( - namespace_property) - - def add(self, namespace_property): - self.policy.enforce(self.context, 'add_metadef_property', {}) - return super(MetadefPropertyRepoProxy, self).add( - namespace_property) - - -class MetadefPropertyFactoryProxy(glance.domain.proxy.MetadefPropertyFactory): - - def __init__(self, namespace_property_factory, context, policy): - self.namespace_property_factory = namespace_property_factory - self.context = context - self.policy = policy - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(MetadefPropertyFactoryProxy, self).__init__( - namespace_property_factory, - property_proxy_class=MetadefPropertyProxy, - property_proxy_kwargs=proxy_kwargs) - - -# Metadef Tag classes -class MetadefTagProxy(glance.domain.proxy.MetadefTag): - - def __init__(self, meta_tag, context, policy): - self.context = context - self.policy = policy - super(MetadefTagProxy, self).__init__(meta_tag) - - -class MetadefTagRepoProxy(glance.domain.proxy.MetadefTagRepo): - - def __init__(self, tag_repo, context, tag_policy): - self.context = context - self.policy = tag_policy - self.tag_repo = tag_repo - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(MetadefTagRepoProxy, - self).__init__(tag_repo, - tag_proxy_class=MetadefTagProxy, - tag_proxy_kwargs=proxy_kwargs) - - def get(self, namespace, tag_name): - self.policy.enforce(self.context, 'get_metadef_tag', {}) - return super(MetadefTagRepoProxy, self).get(namespace, tag_name) - - def list(self, *args, **kwargs): - self.policy.enforce(self.context, 'get_metadef_tags', {}) - return super(MetadefTagRepoProxy, self).list(*args, **kwargs) - - def save(self, meta_tag): - self.policy.enforce(self.context, 'modify_metadef_tag', {}) - return super(MetadefTagRepoProxy, self).save(meta_tag) - - def add(self, meta_tag): - self.policy.enforce(self.context, 'add_metadef_tag', {}) - return super(MetadefTagRepoProxy, self).add(meta_tag) - - def add_tags(self, meta_tags): - self.policy.enforce(self.context, 'add_metadef_tags', {}) - return super(MetadefTagRepoProxy, self).add_tags(meta_tags) - - -class MetadefTagFactoryProxy(glance.domain.proxy.MetadefTagFactory): - - def __init__(self, meta_tag_factory, context, policy): - self.meta_tag_factory = meta_tag_factory - self.context = context - self.policy = policy - proxy_kwargs = {'context': self.context, 'policy': self.policy} - super(MetadefTagFactoryProxy, self).__init__( - meta_tag_factory, - meta_tag_proxy_class=MetadefTagProxy, - meta_tag_proxy_kwargs=proxy_kwargs) diff --git a/glance/api/property_protections.py b/glance/api/property_protections.py deleted file mode 100644 index 309a39ba..00000000 --- a/glance/api/property_protections.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2013 Rackspace -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.common import exception -import glance.domain.proxy - - -class ProtectedImageFactoryProxy(glance.domain.proxy.ImageFactory): - - def __init__(self, image_factory, context, property_rules): - self.image_factory = image_factory - self.context = context - self.property_rules = property_rules - kwargs = {'context': self.context, - 'property_rules': self.property_rules} - super(ProtectedImageFactoryProxy, self).__init__( - image_factory, - proxy_class=ProtectedImageProxy, - proxy_kwargs=kwargs) - - def new_image(self, **kwargs): - extra_props = kwargs.pop('extra_properties', {}) - - extra_properties = {} - for key in extra_props.keys(): - if self.property_rules.check_property_rules(key, 'create', - self.context): - extra_properties[key] = extra_props[key] - else: - raise exception.ReservedProperty(property=key) - return super(ProtectedImageFactoryProxy, self).new_image( - extra_properties=extra_properties, **kwargs) - - -class ProtectedImageRepoProxy(glance.domain.proxy.Repo): - - def __init__(self, image_repo, context, property_rules): - self.context = context - self.image_repo = image_repo - self.property_rules = property_rules - proxy_kwargs = {'context': self.context} - super(ProtectedImageRepoProxy, self).__init__( - image_repo, item_proxy_class=ProtectedImageProxy, - item_proxy_kwargs=proxy_kwargs) - - def get(self, image_id): - return ProtectedImageProxy(self.image_repo.get(image_id), - self.context, self.property_rules) - - def list(self, *args, **kwargs): - images = self.image_repo.list(*args, **kwargs) - return [ProtectedImageProxy(image, self.context, self.property_rules) - for image in images] - - -class ProtectedImageProxy(glance.domain.proxy.Image): - - def __init__(self, image, context, property_rules): - self.image = image - self.context = context - self.property_rules = property_rules - - self.image.extra_properties = ExtraPropertiesProxy( - self.context, - self.image.extra_properties, - self.property_rules) - super(ProtectedImageProxy, self).__init__(self.image) - - -class ExtraPropertiesProxy(glance.domain.ExtraProperties): - - def __init__(self, context, extra_props, property_rules): - self.context = context - self.property_rules = property_rules - extra_properties = {} - for key in extra_props.keys(): - if self.property_rules.check_property_rules(key, 'read', - self.context): - extra_properties[key] = extra_props[key] - super(ExtraPropertiesProxy, self).__init__(extra_properties) - - def __getitem__(self, key): - if self.property_rules.check_property_rules(key, 'read', self.context): - return dict.__getitem__(self, key) - else: - raise KeyError - - def __setitem__(self, key, value): - # NOTE(isethi): Exceptions are raised only for actions update, delete - # and create, where the user proactively interacts with the properties. - # A user cannot request to read a specific property, hence reads do - # raise an exception - try: - if self.__getitem__(key) is not None: - if self.property_rules.check_property_rules(key, 'update', - self.context): - return dict.__setitem__(self, key, value) - else: - raise exception.ReservedProperty(property=key) - except KeyError: - if self.property_rules.check_property_rules(key, 'create', - self.context): - return dict.__setitem__(self, key, value) - else: - raise exception.ReservedProperty(property=key) - - def __delitem__(self, key): - if key not in super(ExtraPropertiesProxy, self).keys(): - raise KeyError - - if self.property_rules.check_property_rules(key, 'delete', - self.context): - return dict.__delitem__(self, key) - else: - raise exception.ReservedProperty(property=key) diff --git a/glance/api/v1/__init__.py b/glance/api/v1/__init__.py deleted file mode 100644 index aa3d8718..00000000 --- a/glance/api/v1/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', - 'min_ram', 'min_disk', 'size_min', 'size_max', - 'is_public', 'changes-since', 'protected'] - -SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') - -# Metadata which only an admin can change once the image is active -ACTIVE_IMMUTABLE = ('size', 'checksum') - -# Metadata which cannot be changed (irrespective of the current image state) -IMMUTABLE = ('status', 'id') diff --git a/glance/api/v1/controller.py b/glance/api/v1/controller.py deleted file mode 100644 index 945da001..00000000 --- a/glance/api/v1/controller.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import glance_store as store -from oslo_log import log as logging -import webob.exc - -from glance.common import exception -from glance.i18n import _ -import glance.registry.client.v1.api as registry - - -LOG = logging.getLogger(__name__) - - -class BaseController(object): - def get_image_meta_or_404(self, request, image_id): - """ - Grabs the image metadata for an image with a supplied - identifier or raises an HTTPNotFound (404) response - - :param request: The WSGI/Webob Request object - :param image_id: The opaque image identifier - - :raises HTTPNotFound: if image does not exist - """ - context = request.context - try: - return registry.get_image_metadata(context, image_id) - except exception.NotFound: - LOG.debug("Image with identifier %s not found", image_id) - msg = _("Image with identifier %s not found") % image_id - raise webob.exc.HTTPNotFound( - msg, request=request, content_type='text/plain') - except exception.Forbidden: - LOG.debug("Forbidden image access") - raise webob.exc.HTTPForbidden(_("Forbidden image access"), - request=request, - content_type='text/plain') - - def get_active_image_meta_or_error(self, request, image_id): - """ - Same as get_image_meta_or_404 except that it will raise a 403 if the - image is deactivated or 404 if the image is otherwise not 'active'. - """ - image = self.get_image_meta_or_404(request, image_id) - if image['status'] == 'deactivated': - LOG.debug("Image %s is deactivated", image_id) - msg = _("Image %s is deactivated") % image_id - raise webob.exc.HTTPForbidden( - msg, request=request, content_type='text/plain') - if image['status'] != 'active': - LOG.debug("Image %s is not active", image_id) - msg = _("Image %s is not active") % image_id - raise webob.exc.HTTPNotFound( - msg, request=request, content_type='text/plain') - return image - - def update_store_acls(self, req, image_id, location_uri, public=False): - if location_uri: - try: - read_tenants = [] - write_tenants = [] - members = registry.get_image_members(req.context, image_id) - if members: - for member in members: - if member['can_share']: - write_tenants.append(member['member_id']) - else: - read_tenants.append(member['member_id']) - store.set_acls(location_uri, public=public, - read_tenants=read_tenants, - write_tenants=write_tenants, - context=req.context) - except store.UnknownScheme: - msg = _("Store for image_id not found: %s") % image_id - raise webob.exc.HTTPBadRequest(explanation=msg, - request=req, - content_type='text/plain') - except store.NotFound: - msg = _("Data for image_id not found: %s") % image_id - raise webob.exc.HTTPNotFound(explanation=msg, - request=req, - content_type='text/plain') diff --git a/glance/api/v1/filters.py b/glance/api/v1/filters.py deleted file mode 100644 index a71b13cc..00000000 --- a/glance/api/v1/filters.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2012, Piston Cloud Computing, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def validate(filter, value): - return FILTER_FUNCTIONS.get(filter, lambda v: True)(value) - - -def validate_int_in_range(min=0, max=None): - def _validator(v): - try: - if max is None: - return min <= int(v) - return min <= int(v) <= max - except ValueError: - return False - return _validator - - -def validate_boolean(v): - return v.lower() in ('none', 'true', 'false', '1', '0') - - -FILTER_FUNCTIONS = {'size_max': validate_int_in_range(), # build validator - 'size_min': validate_int_in_range(), # build validator - 'min_ram': validate_int_in_range(), # build validator - 'protected': validate_boolean, - 'is_public': validate_boolean, } diff --git a/glance/api/v1/images.py b/glance/api/v1/images.py deleted file mode 100644 index 0b74be80..00000000 --- a/glance/api/v1/images.py +++ /dev/null @@ -1,1351 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/images endpoint for Glance v1 API -""" - -import copy - -import glance_store as store -import glance_store.location -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -from oslo_utils import strutils -import six -from webob.exc import HTTPBadRequest -from webob.exc import HTTPConflict -from webob.exc import HTTPForbidden -from webob.exc import HTTPMethodNotAllowed -from webob.exc import HTTPNotFound -from webob.exc import HTTPRequestEntityTooLarge -from webob.exc import HTTPServiceUnavailable -from webob.exc import HTTPUnauthorized -from webob import Response - -from glance.api import common -from glance.api import policy -import glance.api.v1 -from glance.api.v1 import controller -from glance.api.v1 import filters -from glance.api.v1 import upload_utils -from glance.common import exception -from glance.common import property_utils -from glance.common import store_utils -from glance.common import timeutils -from glance.common import utils -from glance.common import wsgi -from glance.i18n import _, _LE, _LI, _LW -from glance import notifier -import glance.registry.client.v1.api as registry - -LOG = logging.getLogger(__name__) -SUPPORTED_PARAMS = glance.api.v1.SUPPORTED_PARAMS -SUPPORTED_FILTERS = glance.api.v1.SUPPORTED_FILTERS -ACTIVE_IMMUTABLE = glance.api.v1.ACTIVE_IMMUTABLE -IMMUTABLE = glance.api.v1.IMMUTABLE - -CONF = cfg.CONF -CONF.import_opt('disk_formats', 'glance.common.config', group='image_format') -CONF.import_opt('container_formats', 'glance.common.config', - group='image_format') -CONF.import_opt('image_property_quota', 'glance.common.config') - - -def _validate_time(req, values): - """Validates time formats for updated_at, created_at and deleted_at. - 'strftime' only allows values after 1900 in glance v1 so this is enforced - here. This was introduced to keep modularity. - """ - for time_field in ['created_at', 'updated_at', 'deleted_at']: - if time_field in values and values[time_field]: - try: - time = timeutils.parse_isotime(values[time_field]) - # On Python 2, datetime.datetime.strftime() raises a ValueError - # for years older than 1900. On Python 3, years older than 1900 - # are accepted. But we explicitly want to reject timestamps - # older than January 1st, 1900 for Glance API v1. - if time.year < 1900: - raise ValueError - values[time_field] = time.strftime( - timeutils.PERFECT_TIME_FORMAT) - except ValueError: - msg = (_("Invalid time format for %s.") % time_field) - raise HTTPBadRequest(explanation=msg, request=req) - - -def _validate_format(req, values): - """Validates disk_format and container_format fields - - Introduced to split too complex validate_image_meta method. - """ - amazon_formats = ('aki', 'ari', 'ami') - disk_format = values.get('disk_format') - container_format = values.get('container_format') - - if 'disk_format' in values: - if disk_format not in CONF.image_format.disk_formats: - msg = _("Invalid disk format '%s' for image.") % disk_format - raise HTTPBadRequest(explanation=msg, request=req) - - if 'container_format' in values: - if container_format not in CONF.image_format.container_formats: - msg = _("Invalid container format '%s' " - "for image.") % container_format - raise HTTPBadRequest(explanation=msg, request=req) - - if any(f in amazon_formats for f in [disk_format, container_format]): - if disk_format is None: - values['disk_format'] = container_format - elif container_format is None: - values['container_format'] = disk_format - elif container_format != disk_format: - msg = (_("Invalid mix of disk and container formats. " - "When setting a disk or container format to " - "one of 'aki', 'ari', or 'ami', the container " - "and disk formats must match.")) - raise HTTPBadRequest(explanation=msg, request=req) - - -def validate_image_meta(req, values): - _validate_format(req, values) - _validate_time(req, values) - - name = values.get('name') - checksum = values.get('checksum') - - if name and len(name) > 255: - msg = _('Image name too long: %d') % len(name) - raise HTTPBadRequest(explanation=msg, request=req) - - # check that checksum retrieved is exactly 32 characters - # as long as we expect md5 checksum - # https://bugs.launchpad.net/glance/+bug/1454730 - if checksum and len(checksum) > 32: - msg = (_("Invalid checksum '%s': can't exceed 32 characters") % - checksum) - raise HTTPBadRequest(explanation=msg, request=req) - - return values - - -def redact_loc(image_meta, copy_dict=True): - """ - Create a shallow copy of image meta with 'location' removed - for security (as it can contain credentials). - """ - if copy_dict: - new_image_meta = copy.copy(image_meta) - else: - new_image_meta = image_meta - new_image_meta.pop('location', None) - new_image_meta.pop('location_data', None) - return new_image_meta - - -class Controller(controller.BaseController): - """ - WSGI controller for images resource in Glance v1 API - - The images resource API is a RESTful web service for image data. The API - is as follows:: - - GET /images -- Returns a set of brief metadata about images - GET /images/detail -- Returns a set of detailed metadata about - images - HEAD /images/ -- Return metadata about an image with id - GET /images/ -- Return image data for image with id - POST /images -- Store image data and return metadata about the - newly-stored image - PUT /images/ -- Update image metadata and/or upload image - data for a previously-reserved image - DELETE /images/ -- Delete the image with id - """ - - def __init__(self): - self.notifier = notifier.Notifier() - registry.configure_registry_client() - self.policy = policy.Enforcer() - if property_utils.is_property_protection_enabled(): - self.prop_enforcer = property_utils.PropertyRules(self.policy) - else: - self.prop_enforcer = None - - def _enforce(self, req, action, target=None): - """Authorize an action against our policies""" - if target is None: - target = {} - try: - self.policy.enforce(req.context, action, target) - except exception.Forbidden: - LOG.debug("User not permitted to perform '%s' action", action) - raise HTTPForbidden() - - def _enforce_image_property_quota(self, - image_meta, - orig_image_meta=None, - purge_props=False, - req=None): - if CONF.image_property_quota < 0: - # If value is negative, allow unlimited number of properties - return - - props = list(image_meta['properties'].keys()) - - # NOTE(ameade): If we are not removing existing properties, - # take them in to account - if (not purge_props) and orig_image_meta: - original_props = orig_image_meta['properties'].keys() - props.extend(original_props) - props = set(props) - - if len(props) > CONF.image_property_quota: - msg = (_("The limit has been exceeded on the number of allowed " - "image properties. Attempted: %(num)s, Maximum: " - "%(quota)s") % {'num': len(props), - 'quota': CONF.image_property_quota}) - LOG.warn(msg) - raise HTTPRequestEntityTooLarge(explanation=msg, - request=req, - content_type="text/plain") - - def _enforce_create_protected_props(self, create_props, req): - """ - Check request is permitted to create certain properties - - :param create_props: List of properties to check - :param req: The WSGI/Webob Request object - - :raises HTTPForbidden: if request forbidden to create a property - """ - if property_utils.is_property_protection_enabled(): - for key in create_props: - if (self.prop_enforcer.check_property_rules( - key, 'create', req.context) is False): - msg = _("Property '%s' is protected") % key - LOG.warn(msg) - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - - def _enforce_read_protected_props(self, image_meta, req): - """ - Remove entries from metadata properties if they are read protected - - :param image_meta: Mapping of metadata about image - :param req: The WSGI/Webob Request object - """ - if property_utils.is_property_protection_enabled(): - for key in list(image_meta['properties'].keys()): - if (self.prop_enforcer.check_property_rules( - key, 'read', req.context) is False): - image_meta['properties'].pop(key) - - def _enforce_update_protected_props(self, update_props, image_meta, - orig_meta, req): - """ - Check request is permitted to update certain properties. Read - permission is required to delete a property. - - If the property value is unchanged, i.e. a noop, it is permitted, - however, it is important to ensure read access first. Otherwise the - value could be discovered using brute force. - - :param update_props: List of properties to check - :param image_meta: Mapping of proposed new metadata about image - :param orig_meta: Mapping of existing metadata about image - :param req: The WSGI/Webob Request object - - :raises HTTPForbidden: if request forbidden to create a property - """ - if property_utils.is_property_protection_enabled(): - for key in update_props: - has_read = self.prop_enforcer.check_property_rules( - key, 'read', req.context) - if ((self.prop_enforcer.check_property_rules( - key, 'update', req.context) is False and - image_meta['properties'][key] != - orig_meta['properties'][key]) or not has_read): - msg = _("Property '%s' is protected") % key - LOG.warn(msg) - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - - def _enforce_delete_protected_props(self, delete_props, image_meta, - orig_meta, req): - """ - Check request is permitted to delete certain properties. Read - permission is required to delete a property. - - Note, the absence of a property in a request does not necessarily - indicate a delete. The requester may not have read access, and so can - not know the property exists. Hence, read access is a requirement for - delete, otherwise the delete is ignored transparently. - - :param delete_props: List of properties to check - :param image_meta: Mapping of proposed new metadata about image - :param orig_meta: Mapping of existing metadata about image - :param req: The WSGI/Webob Request object - - :raises HTTPForbidden: if request forbidden to create a property - """ - if property_utils.is_property_protection_enabled(): - for key in delete_props: - if (self.prop_enforcer.check_property_rules( - key, 'read', req.context) is False): - # NOTE(bourke): if read protected, re-add to image_meta to - # prevent deletion - image_meta['properties'][key] = orig_meta[ - 'properties'][key] - elif (self.prop_enforcer.check_property_rules( - key, 'delete', req.context) is False): - msg = _("Property '%s' is protected") % key - LOG.warn(msg) - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - - def index(self, req): - """ - Returns the following information for all public, available images: - - * id -- The opaque image identifier - * name -- The name of the image - * disk_format -- The disk image format - * container_format -- The "container" format of the image - * checksum -- MD5 checksum of the image data - * size -- Size of image data in bytes - - :param req: The WSGI/Webob Request object - :returns: The response body is a mapping of the following form - - :: - - {'images': [ - {'id': , - 'name': , - 'disk_format': , - 'container_format': , - 'checksum': , - 'size': }, {...}] - } - - """ - self._enforce(req, 'get_images') - params = self._get_query_params(req) - try: - images = registry.get_images_list(req.context, **params) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - - return dict(images=images) - - def detail(self, req): - """ - Returns detailed information for all available images - - :param req: The WSGI/Webob Request object - :returns: The response body is a mapping of the following form - - :: - - {'images': - [{ - 'id': , - 'name': , - 'size': , - 'disk_format': , - 'container_format': , - 'checksum': , - 'min_disk': , - 'min_ram': , - 'store': , - 'status': , - 'created_at': , - 'updated_at': , - 'deleted_at': |, - 'properties': {'distro': 'Ubuntu 10.04 LTS', {...}} - }, {...}] - } - - """ - if req.method == 'HEAD': - msg = (_("This operation is currently not permitted on " - "Glance images details.")) - raise HTTPMethodNotAllowed(explanation=msg, - headers={'Allow': 'GET'}, - body_template='${explanation}') - self._enforce(req, 'get_images') - params = self._get_query_params(req) - try: - images = registry.get_images_detail(req.context, **params) - # Strip out the Location attribute. Temporary fix for - # LP Bug #755916. This information is still coming back - # from the registry, since the API server still needs access - # to it, however we do not return this potential security - # information to the API end user... - for image in images: - redact_loc(image, copy_dict=False) - self._enforce_read_protected_props(image, req) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - except exception.NotAuthenticated as e: - raise HTTPUnauthorized(explanation=e.msg, request=req) - return dict(images=images) - - def _get_query_params(self, req): - """ - Extracts necessary query params from request. - - :param req: the WSGI Request object - :returns: dict of parameters that can be used by registry client - """ - params = {'filters': self._get_filters(req)} - - for PARAM in SUPPORTED_PARAMS: - if PARAM in req.params: - params[PARAM] = req.params.get(PARAM) - - # Fix for LP Bug #1132294 - # Ensure all shared images are returned in v1 - params['member_status'] = 'all' - return params - - def _get_filters(self, req): - """ - Return a dictionary of query param filters from the request - - :param req: the Request object coming from the wsgi layer - :returns: a dict of key/value filters - """ - query_filters = {} - for param in req.params: - if param in SUPPORTED_FILTERS or param.startswith('property-'): - query_filters[param] = req.params.get(param) - if not filters.validate(param, query_filters[param]): - raise HTTPBadRequest(_('Bad value passed to filter ' - '%(filter)s got %(val)s') - % {'filter': param, - 'val': query_filters[param]}) - return query_filters - - def meta(self, req, id): - """ - Returns metadata about an image in the HTTP headers of the - response object - - :param req: The WSGI/Webob Request object - :param id: The opaque image identifier - :returns: similar to 'show' method but without image_data - - :raises HTTPNotFound: if image metadata is not available to user - """ - self._enforce(req, 'get_image') - image_meta = self.get_image_meta_or_404(req, id) - image_meta = redact_loc(image_meta) - self._enforce_read_protected_props(image_meta, req) - return { - 'image_meta': image_meta - } - - @staticmethod - def _validate_source(source, req): - """ - Validate if external sources (as specified via the location - or copy-from headers) are supported. Otherwise we reject - with 400 "Bad Request". - """ - if store_utils.validate_external_location(source): - return source - else: - if source: - msg = _("External sources are not supported: '%s'") % source - else: - msg = _("External source should not be empty") - LOG.warn(msg) - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - - @staticmethod - def _copy_from(req): - return req.headers.get('x-glance-api-copy-from') - - def _external_source(self, image_meta, req): - if 'location' in image_meta: - self._enforce(req, 'set_image_location') - source = image_meta['location'] - elif 'x-glance-api-copy-from' in req.headers: - source = Controller._copy_from(req) - else: - # we have an empty external source value - # so we are creating "draft" of the image and no need validation - return None - return Controller._validate_source(source, req) - - @staticmethod - def _get_from_store(context, where, dest=None): - try: - loc = glance_store.location.get_location_from_uri(where) - src_store = store.get_store_from_uri(where) - - if dest is not None: - src_store.READ_CHUNKSIZE = dest.WRITE_CHUNKSIZE - - image_data, image_size = src_store.get(loc, context=context) - - except store.RemoteServiceUnavailable as e: - raise HTTPServiceUnavailable(explanation=e.msg) - except store.NotFound as e: - raise HTTPNotFound(explanation=e.msg) - except (store.StoreGetNotSupported, - store.StoreRandomGetNotSupported, - store.UnknownScheme) as e: - raise HTTPBadRequest(explanation=e.msg) - image_size = int(image_size) if image_size else None - return image_data, image_size - - def show(self, req, id): - """ - Returns an iterator that can be used to retrieve an image's - data along with the image metadata. - - :param req: The WSGI/Webob Request object - :param id: The opaque image identifier - - :raises HTTPNotFound: if image is not available to user - """ - - self._enforce(req, 'get_image') - - try: - image_meta = self.get_active_image_meta_or_error(req, id) - except HTTPNotFound: - # provision for backward-compatibility breaking issue - # catch the 404 exception and raise it after enforcing - # the policy - with excutils.save_and_reraise_exception(): - self._enforce(req, 'download_image') - else: - target = utils.create_mashup_dict(image_meta) - self._enforce(req, 'download_image', target=target) - - self._enforce_read_protected_props(image_meta, req) - - if image_meta.get('size') == 0: - image_iterator = iter([]) - else: - image_iterator, size = self._get_from_store(req.context, - image_meta['location']) - image_iterator = utils.cooperative_iter(image_iterator) - image_meta['size'] = size or image_meta['size'] - image_meta = redact_loc(image_meta) - return { - 'image_iterator': image_iterator, - 'image_meta': image_meta, - } - - def _reserve(self, req, image_meta): - """ - Adds the image metadata to the registry and assigns - an image identifier if one is not supplied in the request - headers. Sets the image's status to `queued`. - - :param req: The WSGI/Webob Request object - :param id: The opaque image identifier - :param image_meta: The image metadata - - :raises HTTPConflict: if image already exists - :raises HTTPBadRequest: if image metadata is not valid - """ - location = self._external_source(image_meta, req) - scheme = image_meta.get('store') - if scheme and scheme not in store.get_known_schemes(): - msg = _("Required store %s is invalid") % scheme - LOG.warn(msg) - raise HTTPBadRequest(explanation=msg, - content_type='text/plain') - - image_meta['status'] = ('active' if image_meta.get('size') == 0 - else 'queued') - - if location: - try: - backend = store.get_store_from_location(location) - except (store.UnknownScheme, store.BadStoreUri): - LOG.debug("Invalid location %s", location) - msg = _("Invalid location %s") % location - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - # check the store exists before we hit the registry, but we - # don't actually care what it is at this point - self.get_store_or_400(req, backend) - - # retrieve the image size from remote store (if not provided) - image_meta['size'] = self._get_size(req.context, image_meta, - location) - else: - # Ensure that the size attribute is set to zero for directly - # uploadable images (if not provided). The size will be set - # to a non-zero value during upload - image_meta['size'] = image_meta.get('size', 0) - - try: - image_meta = registry.add_image_metadata(req.context, image_meta) - self.notifier.info("image.create", redact_loc(image_meta)) - return image_meta - except exception.Duplicate: - msg = (_("An image with identifier %s already exists") % - image_meta['id']) - LOG.warn(msg) - raise HTTPConflict(explanation=msg, - request=req, - content_type="text/plain") - except exception.Invalid as e: - msg = (_("Failed to reserve image. Got error: %s") % - encodeutils.exception_to_unicode(e)) - LOG.exception(msg) - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - except exception.Forbidden: - msg = _("Forbidden to reserve image.") - LOG.warn(msg) - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - - def _upload(self, req, image_meta): - """ - Uploads the payload of the request to a backend store in - Glance. If the `x-image-meta-store` header is set, Glance - will attempt to use that scheme; if not, Glance will use the - scheme set by the flag `default_store` to find the backing store. - - :param req: The WSGI/Webob Request object - :param image_meta: Mapping of metadata about image - - :raises HTTPConflict: if image already exists - :returns: The location where the image was stored - """ - - scheme = req.headers.get('x-image-meta-store', - CONF.glance_store.default_store) - - store = self.get_store_or_400(req, scheme) - - copy_from = self._copy_from(req) - if copy_from: - try: - image_data, image_size = self._get_from_store(req.context, - copy_from, - dest=store) - except Exception: - upload_utils.safe_kill(req, image_meta['id'], 'queued') - msg = (_LE("Copy from external source '%(scheme)s' failed for " - "image: %(image)s") % - {'scheme': scheme, 'image': image_meta['id']}) - LOG.exception(msg) - return - image_meta['size'] = image_size or image_meta['size'] - else: - try: - req.get_content_type(('application/octet-stream',)) - except exception.InvalidContentType: - upload_utils.safe_kill(req, image_meta['id'], 'queued') - msg = _("Content-Type must be application/octet-stream") - LOG.warn(msg) - raise HTTPBadRequest(explanation=msg) - - image_data = req.body_file - - image_id = image_meta['id'] - LOG.debug("Setting image %s to status 'saving'", image_id) - registry.update_image_metadata(req.context, image_id, - {'status': 'saving'}) - - LOG.debug("Uploading image data for image %(image_id)s " - "to %(scheme)s store", {'image_id': image_id, - 'scheme': scheme}) - - self.notifier.info("image.prepare", redact_loc(image_meta)) - - image_meta, location_data = upload_utils.upload_data_to_store( - req, image_meta, image_data, store, self.notifier) - - self.notifier.info('image.upload', redact_loc(image_meta)) - - return location_data - - def _activate(self, req, image_id, location_data, from_state=None): - """ - Sets the image status to `active` and the image's location - attribute. - - :param req: The WSGI/Webob Request object - :param image_id: Opaque image identifier - :param location_data: Location of where Glance stored this image - """ - image_meta = { - 'location': location_data['url'], - 'status': 'active', - 'location_data': [location_data] - } - - try: - s = from_state - image_meta_data = registry.update_image_metadata(req.context, - image_id, - image_meta, - from_state=s) - self.notifier.info("image.activate", redact_loc(image_meta_data)) - self.notifier.info("image.update", redact_loc(image_meta_data)) - return image_meta_data - except exception.Duplicate: - with excutils.save_and_reraise_exception(): - # Delete image data since it has been superseded by another - # upload and re-raise. - LOG.debug("duplicate operation - deleting image data for " - " %(id)s (location:%(location)s)", - {'id': image_id, 'location': image_meta['location']}) - upload_utils.initiate_deletion(req, location_data, image_id) - except exception.Invalid as e: - msg = (_("Failed to activate image. Got error: %s") % - encodeutils.exception_to_unicode(e)) - LOG.warn(msg) - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - - def _upload_and_activate(self, req, image_meta): - """ - Safely uploads the image data in the request payload - and activates the image in the registry after a successful - upload. - - :param req: The WSGI/Webob Request object - :param image_meta: Mapping of metadata about image - - :returns: Mapping of updated image data - """ - location_data = self._upload(req, image_meta) - image_id = image_meta['id'] - LOG.info(_LI("Uploaded data of image %s from request " - "payload successfully."), image_id) - - if location_data: - try: - image_meta = self._activate(req, - image_id, - location_data, - from_state='saving') - except exception.Duplicate: - raise - except Exception: - with excutils.save_and_reraise_exception(): - # NOTE(zhiyan): Delete image data since it has already - # been added to store by above _upload() call. - LOG.warn(_LW("Failed to activate image %s in " - "registry. About to delete image " - "bits from store and update status " - "to 'killed'.") % image_id) - upload_utils.initiate_deletion(req, location_data, - image_id) - upload_utils.safe_kill(req, image_id, 'saving') - else: - image_meta = None - - return image_meta - - def _get_size(self, context, image_meta, location): - # retrieve the image size from remote store (if not provided) - try: - return (image_meta.get('size', 0) or - store.get_size_from_backend(location, context=context)) - except store.NotFound as e: - # NOTE(rajesht): The exception is logged as debug message because - # the image is located at third-party server and it has nothing to - # do with glance. If log.exception is used here, in that case the - # log file might be flooded with exception log messages if - # malicious user keeps on trying image-create using non-existent - # location url. Used log.debug because administrator can - # disable debug logs. - LOG.debug(encodeutils.exception_to_unicode(e)) - raise HTTPNotFound(explanation=e.msg, content_type="text/plain") - except (store.UnknownScheme, store.BadStoreUri) as e: - # NOTE(rajesht): See above note of store.NotFound - LOG.debug(encodeutils.exception_to_unicode(e)) - raise HTTPBadRequest(explanation=e.msg, content_type="text/plain") - - def _handle_source(self, req, image_id, image_meta, image_data): - copy_from = self._copy_from(req) - location = image_meta.get('location') - sources = [obj for obj in (copy_from, location, image_data) if obj] - if len(sources) >= 2: - msg = _("It's invalid to provide multiple image sources.") - LOG.warn(msg) - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - if len(sources) == 0: - return image_meta - if image_data: - image_meta = self._validate_image_for_activation(req, - image_id, - image_meta) - image_meta = self._upload_and_activate(req, image_meta) - elif copy_from: - msg = _LI('Triggering asynchronous copy from external source') - LOG.info(msg) - pool = common.get_thread_pool("copy_from_eventlet_pool") - pool.spawn_n(self._upload_and_activate, req, image_meta) - else: - if location: - self._validate_image_for_activation(req, image_id, image_meta) - image_size_meta = image_meta.get('size') - if image_size_meta: - try: - image_size_store = store.get_size_from_backend( - location, req.context) - except (store.BadStoreUri, store.UnknownScheme) as e: - LOG.debug(encodeutils.exception_to_unicode(e)) - raise HTTPBadRequest(explanation=e.msg, - request=req, - content_type="text/plain") - # NOTE(zhiyan): A returned size of zero usually means - # the driver encountered an error. In this case the - # size provided by the client will be used as-is. - if (image_size_store and - image_size_store != image_size_meta): - msg = (_("Provided image size must match the stored" - " image size. (provided size: %(ps)d, " - "stored size: %(ss)d)") % - {"ps": image_size_meta, - "ss": image_size_store}) - LOG.warn(msg) - raise HTTPConflict(explanation=msg, - request=req, - content_type="text/plain") - location_data = {'url': location, 'metadata': {}, - 'status': 'active'} - image_meta = self._activate(req, image_id, location_data) - return image_meta - - def _validate_image_for_activation(self, req, id, values): - """Ensures that all required image metadata values are valid.""" - image = self.get_image_meta_or_404(req, id) - if values['disk_format'] is None: - if not image['disk_format']: - msg = _("Disk format is not specified.") - raise HTTPBadRequest(explanation=msg, request=req) - values['disk_format'] = image['disk_format'] - if values['container_format'] is None: - if not image['container_format']: - msg = _("Container format is not specified.") - raise HTTPBadRequest(explanation=msg, request=req) - values['container_format'] = image['container_format'] - if 'name' not in values: - values['name'] = image['name'] - - values = validate_image_meta(req, values) - return values - - @utils.mutating - def create(self, req, image_meta, image_data): - """ - Adds a new image to Glance. Four scenarios exist when creating an - image: - - 1. If the image data is available directly for upload, create can be - passed the image data as the request body and the metadata as the - request headers. The image will initially be 'queued', during - upload it will be in the 'saving' status, and then 'killed' or - 'active' depending on whether the upload completed successfully. - - 2. If the image data exists somewhere else, you can upload indirectly - from the external source using the x-glance-api-copy-from header. - Once the image is uploaded, the external store is not subsequently - consulted, i.e. the image content is served out from the configured - glance image store. State transitions are as for option #1. - - 3. If the image data exists somewhere else, you can reference the - source using the x-image-meta-location header. The image content - will be served out from the external store, i.e. is never uploaded - to the configured glance image store. - - 4. If the image data is not available yet, but you'd like reserve a - spot for it, you can omit the data and a record will be created in - the 'queued' state. This exists primarily to maintain backwards - compatibility with OpenStack/Rackspace API semantics. - - The request body *must* be encoded as application/octet-stream, - otherwise an HTTPBadRequest is returned. - - Upon a successful save of the image data and metadata, a response - containing metadata about the image is returned, including its - opaque identifier. - - :param req: The WSGI/Webob Request object - :param image_meta: Mapping of metadata about image - :param image_data: Actual image data that is to be stored - - :raises HTTPBadRequest: if x-image-meta-location is missing - and the request body is not application/octet-stream - image data. - """ - self._enforce(req, 'add_image') - is_public = image_meta.get('is_public') - if is_public: - self._enforce(req, 'publicize_image') - if Controller._copy_from(req): - self._enforce(req, 'copy_from') - if image_data or Controller._copy_from(req): - self._enforce(req, 'upload_image') - - self._enforce_create_protected_props(image_meta['properties'].keys(), - req) - - self._enforce_image_property_quota(image_meta, req=req) - - image_meta = self._reserve(req, image_meta) - id = image_meta['id'] - - image_meta = self._handle_source(req, id, image_meta, image_data) - - location_uri = image_meta.get('location') - if location_uri: - self.update_store_acls(req, id, location_uri, public=is_public) - - # Prevent client from learning the location, as it - # could contain security credentials - image_meta = redact_loc(image_meta) - - return {'image_meta': image_meta} - - @utils.mutating - def update(self, req, id, image_meta, image_data): - """ - Updates an existing image with the registry. - - :param request: The WSGI/Webob Request object - :param id: The opaque image identifier - - :returns: Returns the updated image information as a mapping - """ - self._enforce(req, 'modify_image') - is_public = image_meta.get('is_public') - if is_public: - self._enforce(req, 'publicize_image') - if Controller._copy_from(req): - self._enforce(req, 'copy_from') - if image_data or Controller._copy_from(req): - self._enforce(req, 'upload_image') - - orig_image_meta = self.get_image_meta_or_404(req, id) - orig_status = orig_image_meta['status'] - - # Do not allow any updates on a deleted image. - # Fix for LP Bug #1060930 - if orig_status == 'deleted': - msg = _("Forbidden to update deleted image.") - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - - if req.context.is_admin is False: - # Once an image is 'active' only an admin can - # modify certain core metadata keys - for key in ACTIVE_IMMUTABLE: - if ((orig_status == 'active' or orig_status == 'deactivated') - and key in image_meta - and image_meta.get(key) != orig_image_meta.get(key)): - msg = _("Forbidden to modify '%(key)s' of %(status)s " - "image.") % {'key': key, 'status': orig_status} - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - - for key in IMMUTABLE: - if (key in image_meta and - image_meta.get(key) != orig_image_meta.get(key)): - msg = _("Forbidden to modify '%s' of image.") % key - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - - # The default behaviour for a PUT /images/ is to - # override any properties that were previously set. This, however, - # leads to a number of issues for the common use case where a caller - # registers an image with some properties and then almost immediately - # uploads an image file along with some more properties. Here, we - # check for a special header value to be false in order to force - # properties NOT to be purged. However we also disable purging of - # properties if an image file is being uploaded... - purge_props = req.headers.get('x-glance-registry-purge-props', True) - purge_props = (strutils.bool_from_string(purge_props) and - image_data is None) - - if image_data is not None and orig_status != 'queued': - raise HTTPConflict(_("Cannot upload to an unqueued image")) - - # Only allow the Location|Copy-From fields to be modified if the - # image is in queued status, which indicates that the user called - # POST /images but originally supply neither a Location|Copy-From - # field NOR image data - location = self._external_source(image_meta, req) - reactivating = orig_status != 'queued' and location - activating = orig_status == 'queued' and (location or image_data) - - # Make image public in the backend store (if implemented) - orig_or_updated_loc = location or orig_image_meta.get('location') - if orig_or_updated_loc: - try: - if is_public is not None or location is not None: - self.update_store_acls(req, id, orig_or_updated_loc, - public=is_public) - except store.BadStoreUri: - msg = _("Invalid location: %s") % location - LOG.warn(msg) - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - - if reactivating: - msg = _("Attempted to update Location field for an image " - "not in queued status.") - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - - # ensure requester has permissions to create/update/delete properties - # according to property-protections.conf - orig_keys = set(orig_image_meta['properties']) - new_keys = set(image_meta['properties']) - self._enforce_update_protected_props( - orig_keys.intersection(new_keys), image_meta, - orig_image_meta, req) - self._enforce_create_protected_props( - new_keys.difference(orig_keys), req) - if purge_props: - self._enforce_delete_protected_props( - orig_keys.difference(new_keys), image_meta, - orig_image_meta, req) - - self._enforce_image_property_quota(image_meta, - orig_image_meta=orig_image_meta, - purge_props=purge_props, - req=req) - - try: - if location: - image_meta['size'] = self._get_size(req.context, image_meta, - location) - - image_meta = registry.update_image_metadata(req.context, - id, - image_meta, - purge_props) - - if activating: - image_meta = self._handle_source(req, id, image_meta, - image_data) - - except exception.Invalid as e: - msg = (_("Failed to update image metadata. Got error: %s") % - encodeutils.exception_to_unicode(e)) - LOG.warn(msg) - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - except exception.ImageNotFound as e: - msg = (_("Failed to find image to update: %s") % - encodeutils.exception_to_unicode(e)) - LOG.warn(msg) - raise HTTPNotFound(explanation=msg, - request=req, - content_type="text/plain") - except exception.Forbidden as e: - msg = (_("Forbidden to update image: %s") % - encodeutils.exception_to_unicode(e)) - LOG.warn(msg) - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - except (exception.Conflict, exception.Duplicate) as e: - LOG.warn(encodeutils.exception_to_unicode(e)) - raise HTTPConflict(body=_('Image operation conflicts'), - request=req, - content_type='text/plain') - else: - self.notifier.info('image.update', redact_loc(image_meta)) - - # Prevent client from learning the location, as it - # could contain security credentials - image_meta = redact_loc(image_meta) - - self._enforce_read_protected_props(image_meta, req) - - return {'image_meta': image_meta} - - @utils.mutating - def delete(self, req, id): - """ - Deletes the image and all its chunks from the Glance - - :param req: The WSGI/Webob Request object - :param id: The opaque image identifier - - :raises HttpBadRequest: if image registry is invalid - :raises HttpNotFound: if image or any chunk is not available - :raises HttpUnauthorized: if image or any chunk is not - deleteable by the requesting user - """ - self._enforce(req, 'delete_image') - - image = self.get_image_meta_or_404(req, id) - if image['protected']: - msg = _("Image is protected") - LOG.warn(msg) - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - - if image['status'] == 'pending_delete': - msg = (_("Forbidden to delete a %s image.") % - image['status']) - LOG.warn(msg) - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - elif image['status'] == 'deleted': - msg = _("Image %s not found.") % id - LOG.warn(msg) - raise HTTPNotFound(explanation=msg, request=req, - content_type="text/plain") - - if image['location'] and CONF.delayed_delete: - status = 'pending_delete' - else: - status = 'deleted' - - ori_status = image['status'] - - try: - # Update the image from the registry first, since we rely on it - # for authorization checks. - # See https://bugs.launchpad.net/glance/+bug/1065187 - image = registry.update_image_metadata(req.context, id, - {'status': status}) - - try: - # The image's location field may be None in the case - # of a saving or queued image, therefore don't ask a backend - # to delete the image if the backend doesn't yet store it. - # See https://bugs.launchpad.net/glance/+bug/747799 - if image['location']: - for loc_data in image['location_data']: - if loc_data['status'] == 'active': - upload_utils.initiate_deletion(req, loc_data, id) - except Exception: - with excutils.save_and_reraise_exception(): - registry.update_image_metadata(req.context, id, - {'status': ori_status}) - - registry.delete_image_metadata(req.context, id) - except exception.ImageNotFound as e: - msg = (_("Failed to find image to delete: %s") % - encodeutils.exception_to_unicode(e)) - LOG.warn(msg) - raise HTTPNotFound(explanation=msg, - request=req, - content_type="text/plain") - except exception.Forbidden as e: - msg = (_("Forbidden to delete image: %s") % - encodeutils.exception_to_unicode(e)) - LOG.warn(msg) - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - except store.InUseByStore as e: - msg = (_("Image %(id)s could not be deleted because it is in use: " - "%(exc)s") - % {"id": id, "exc": encodeutils.exception_to_unicode(e)}) - LOG.warn(msg) - raise HTTPConflict(explanation=msg, - request=req, - content_type="text/plain") - else: - self.notifier.info('image.delete', redact_loc(image)) - return Response(body='', status=200) - - def get_store_or_400(self, request, scheme): - """ - Grabs the storage backend for the supplied store name - or raises an HTTPBadRequest (400) response - - :param request: The WSGI/Webob Request object - :param scheme: The backend store scheme - - :raises HTTPBadRequest: if store does not exist - """ - try: - return store.get_store_from_scheme(scheme) - except store.UnknownScheme: - msg = _("Store for scheme %s not found") % scheme - LOG.warn(msg) - raise HTTPBadRequest(explanation=msg, - request=request, - content_type='text/plain') - - -class ImageDeserializer(wsgi.JSONRequestDeserializer): - """Handles deserialization of specific controller method requests.""" - - def _deserialize(self, request): - result = {} - try: - result['image_meta'] = utils.get_image_meta_from_headers(request) - except exception.InvalidParameterValue as e: - msg = encodeutils.exception_to_unicode(e) - LOG.warn(msg, exc_info=True) - raise HTTPBadRequest(explanation=e.msg, request=request) - - image_meta = result['image_meta'] - image_meta = validate_image_meta(request, image_meta) - if request.content_length: - image_size = request.content_length - elif 'size' in image_meta: - image_size = image_meta['size'] - else: - image_size = None - - data = request.body_file if self.has_body(request) else None - - if image_size is None and data is not None: - data = utils.LimitingReader(data, CONF.image_size_cap) - - # NOTE(bcwaldon): this is a hack to make sure the downstream code - # gets the correct image data - request.body_file = data - - elif image_size is not None and image_size > CONF.image_size_cap: - max_image_size = CONF.image_size_cap - msg = (_("Denying attempt to upload image larger than %d" - " bytes.") % max_image_size) - LOG.warn(msg) - raise HTTPBadRequest(explanation=msg, request=request) - - result['image_data'] = data - return result - - def create(self, request): - return self._deserialize(request) - - def update(self, request): - return self._deserialize(request) - - -class ImageSerializer(wsgi.JSONResponseSerializer): - """Handles serialization of specific controller method responses.""" - - def __init__(self): - self.notifier = notifier.Notifier() - - def _inject_location_header(self, response, image_meta): - location = self._get_image_location(image_meta) - if six.PY2: - location = location.encode('utf-8') - response.headers['Location'] = location - - def _inject_checksum_header(self, response, image_meta): - if image_meta['checksum'] is not None: - checksum = image_meta['checksum'] - if six.PY2: - checksum = checksum.encode('utf-8') - response.headers['ETag'] = checksum - - def _inject_image_meta_headers(self, response, image_meta): - """ - Given a response and mapping of image metadata, injects - the Response with a set of HTTP headers for the image - metadata. Each main image metadata field is injected - as a HTTP header with key 'x-image-meta-' except - for the properties field, which is further broken out - into a set of 'x-image-meta-property-' headers - - :param response: The Webob Response object - :param image_meta: Mapping of image metadata - """ - headers = utils.image_meta_to_http_headers(image_meta) - - for k, v in headers.items(): - if six.PY3: - response.headers[str(k)] = str(v) - else: - response.headers[k.encode('utf-8')] = v.encode('utf-8') - - def _get_image_location(self, image_meta): - """Build a relative url to reach the image defined by image_meta.""" - return "/v1/images/%s" % image_meta['id'] - - def meta(self, response, result): - image_meta = result['image_meta'] - self._inject_image_meta_headers(response, image_meta) - self._inject_checksum_header(response, image_meta) - return response - - def show(self, response, result): - image_meta = result['image_meta'] - - image_iter = result['image_iterator'] - # image_meta['size'] should be an int, but could possibly be a str - expected_size = int(image_meta['size']) - response.app_iter = common.size_checked_iter( - response, image_meta, expected_size, image_iter, self.notifier) - # Using app_iter blanks content-length, so we set it here... - response.headers['Content-Length'] = str(image_meta['size']) - response.headers['Content-Type'] = 'application/octet-stream' - - self._inject_image_meta_headers(response, image_meta) - self._inject_checksum_header(response, image_meta) - - return response - - def update(self, response, result): - image_meta = result['image_meta'] - response.body = self.to_json(dict(image=image_meta)) - response.headers['Content-Type'] = 'application/json' - self._inject_checksum_header(response, image_meta) - return response - - def create(self, response, result): - image_meta = result['image_meta'] - response.status = 201 - response.headers['Content-Type'] = 'application/json' - response.body = self.to_json(dict(image=image_meta)) - self._inject_location_header(response, image_meta) - self._inject_checksum_header(response, image_meta) - return response - - -def create_resource(): - """Images resource factory method""" - deserializer = ImageDeserializer() - serializer = ImageSerializer() - return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/glance/api/v1/members.py b/glance/api/v1/members.py deleted file mode 100644 index 4233ea32..00000000 --- a/glance/api/v1/members.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# Copyright 2013 NTT corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -import webob.exc - -from glance.api import policy -from glance.api.v1 import controller -from glance.common import exception -from glance.common import utils -from glance.common import wsgi -from glance.i18n import _ -import glance.registry.client.v1.api as registry - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -CONF.import_opt('image_member_quota', 'glance.common.config') - - -class Controller(controller.BaseController): - - def __init__(self): - self.policy = policy.Enforcer() - - def _check_can_access_image_members(self, context): - if context.owner is None and not context.is_admin: - raise webob.exc.HTTPUnauthorized(_("No authenticated user")) - - def _enforce(self, req, action): - """Authorize an action against our policies""" - try: - self.policy.enforce(req.context, action, {}) - except exception.Forbidden: - LOG.debug("User not permitted to perform '%s' action", action) - raise webob.exc.HTTPForbidden() - - def _raise_404_if_image_deleted(self, req, image_id): - image = self.get_image_meta_or_404(req, image_id) - if image['status'] == 'deleted': - msg = _("Image with identifier %s has been deleted.") % image_id - raise webob.exc.HTTPNotFound(msg) - - def index(self, req, image_id): - """ - Return a list of dictionaries indicating the members of the - image, i.e., those tenants the image is shared with. - - :param req: the Request object coming from the wsgi layer - :param image_id: The opaque image identifier - :returns: The response body is a mapping of the following form - - :: - - {'members': [ - {'member_id': , - 'can_share': , ...}, ... - ]} - - """ - self._enforce(req, 'get_members') - self._raise_404_if_image_deleted(req, image_id) - - try: - members = registry.get_image_members(req.context, image_id) - except exception.NotFound: - msg = _("Image with identifier %s not found") % image_id - LOG.warn(msg) - raise webob.exc.HTTPNotFound(msg) - except exception.Forbidden: - msg = _("Unauthorized image access") - LOG.warn(msg) - raise webob.exc.HTTPForbidden(msg) - return dict(members=members) - - @utils.mutating - def delete(self, req, image_id, id): - """ - Removes a membership from the image. - """ - self._check_can_access_image_members(req.context) - self._enforce(req, 'delete_member') - self._raise_404_if_image_deleted(req, image_id) - - try: - registry.delete_member(req.context, image_id, id) - self._update_store_acls(req, image_id) - except exception.NotFound as e: - LOG.debug(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to remove membership from image " - "'%s'", image_id) - raise webob.exc.HTTPNotFound(explanation=e.msg) - - return webob.exc.HTTPNoContent() - - def default(self, req, image_id, id, body=None): - """This will cover the missing 'show' and 'create' actions""" - raise webob.exc.HTTPMethodNotAllowed() - - def _enforce_image_member_quota(self, req, attempted): - if CONF.image_member_quota < 0: - # If value is negative, allow unlimited number of members - return - - maximum = CONF.image_member_quota - if attempted > maximum: - msg = _("The limit has been exceeded on the number of allowed " - "image members for this image. Attempted: %(attempted)s, " - "Maximum: %(maximum)s") % {'attempted': attempted, - 'maximum': maximum} - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req) - - @utils.mutating - def update(self, req, image_id, id, body=None): - """ - Adds a membership to the image, or updates an existing one. - If a body is present, it is a dict with the following format - - :: - - {'member': { - 'can_share': [True|False] - }} - - If `can_share` is provided, the member's ability to share is - set accordingly. If it is not provided, existing memberships - remain unchanged and new memberships default to False. - """ - self._check_can_access_image_members(req.context) - self._enforce(req, 'modify_member') - self._raise_404_if_image_deleted(req, image_id) - - new_number_of_members = len(registry.get_image_members(req.context, - image_id)) + 1 - self._enforce_image_member_quota(req, new_number_of_members) - - # Figure out can_share - can_share = None - if body and 'member' in body and 'can_share' in body['member']: - can_share = bool(body['member']['can_share']) - try: - registry.add_member(req.context, image_id, id, can_share) - self._update_store_acls(req, image_id) - except exception.Invalid as e: - LOG.debug(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.NotFound as e: - LOG.debug(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Forbidden as e: - LOG.debug(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPNotFound(explanation=e.msg) - - return webob.exc.HTTPNoContent() - - @utils.mutating - def update_all(self, req, image_id, body): - """ - Replaces the members of the image with those specified in the - body. The body is a dict with the following format - - :: - - {'memberships': [ - {'member_id': , - ['can_share': [True|False]]}, ... - ]} - - """ - self._check_can_access_image_members(req.context) - self._enforce(req, 'modify_member') - self._raise_404_if_image_deleted(req, image_id) - - memberships = body.get('memberships') - if memberships: - new_number_of_members = len(body['memberships']) - self._enforce_image_member_quota(req, new_number_of_members) - - try: - registry.replace_members(req.context, image_id, body) - self._update_store_acls(req, image_id) - except exception.Invalid as e: - LOG.debug(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.NotFound as e: - LOG.debug(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Forbidden as e: - LOG.debug(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPNotFound(explanation=e.msg) - - return webob.exc.HTTPNoContent() - - def index_shared_images(self, req, id): - """ - Retrieves list of image memberships for the given member. - - :param req: the Request object coming from the wsgi layer - :param id: the opaque member identifier - :returns: The response body is a mapping of the following form - - :: - - {'shared_images': [ - {'image_id': , - 'can_share': , ...}, ... - ]} - - """ - try: - members = registry.get_member_images(req.context, id) - except exception.NotFound as e: - LOG.debug(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Forbidden as e: - LOG.debug(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPForbidden(explanation=e.msg) - return dict(shared_images=members) - - def _update_store_acls(self, req, image_id): - image_meta = self.get_image_meta_or_404(req, image_id) - location_uri = image_meta.get('location') - public = image_meta.get('is_public') - self.update_store_acls(req, image_id, location_uri, public) - - -def create_resource(): - """Image members resource factory method""" - deserializer = wsgi.JSONRequestDeserializer() - serializer = wsgi.JSONResponseSerializer() - return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/glance/api/v1/router.py b/glance/api/v1/router.py deleted file mode 100644 index 1b3d0755..00000000 --- a/glance/api/v1/router.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from glance.api.v1 import images -from glance.api.v1 import members -from glance.common import wsgi - - -class API(wsgi.Router): - - """WSGI router for Glance v1 API requests.""" - - def __init__(self, mapper): - reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) - - images_resource = images.create_resource() - - mapper.connect("/", - controller=images_resource, - action="index") - mapper.connect("/images", - controller=images_resource, - action='index', - conditions={'method': ['GET']}) - mapper.connect("/images", - controller=images_resource, - action='create', - conditions={'method': ['POST']}) - mapper.connect("/images", - controller=reject_method_resource, - action='reject', - allowed_methods='GET, POST') - mapper.connect("/images/detail", - controller=images_resource, - action='detail', - conditions={'method': ['GET', 'HEAD']}) - mapper.connect("/images/detail", - controller=reject_method_resource, - action='reject', - allowed_methods='GET, HEAD') - mapper.connect("/images/{id}", - controller=images_resource, - action="meta", - conditions=dict(method=["HEAD"])) - mapper.connect("/images/{id}", - controller=images_resource, - action="show", - conditions=dict(method=["GET"])) - mapper.connect("/images/{id}", - controller=images_resource, - action="update", - conditions=dict(method=["PUT"])) - mapper.connect("/images/{id}", - controller=images_resource, - action="delete", - conditions=dict(method=["DELETE"])) - mapper.connect("/images/{id}", - controller=reject_method_resource, - action='reject', - allowed_methods='GET, HEAD, PUT, DELETE') - - members_resource = members.create_resource() - - mapper.connect("/images/{image_id}/members", - controller=members_resource, - action="index", - conditions={'method': ['GET']}) - mapper.connect("/images/{image_id}/members", - controller=members_resource, - action="update_all", - conditions=dict(method=["PUT"])) - mapper.connect("/images/{image_id}/members", - controller=reject_method_resource, - action='reject', - allowed_methods='GET, PUT') - mapper.connect("/images/{image_id}/members/{id}", - controller=members_resource, - action="show", - conditions={'method': ['GET']}) - mapper.connect("/images/{image_id}/members/{id}", - controller=members_resource, - action="update", - conditions={'method': ['PUT']}) - mapper.connect("/images/{image_id}/members/{id}", - controller=members_resource, - action="delete", - conditions={'method': ['DELETE']}) - mapper.connect("/images/{image_id}/members/{id}", - controller=reject_method_resource, - action='reject', - allowed_methods='GET, PUT, DELETE') - mapper.connect("/shared-images/{id}", - controller=members_resource, - action="index_shared_images") - - super(API, self).__init__(mapper) diff --git a/glance/api/v1/upload_utils.py b/glance/api/v1/upload_utils.py deleted file mode 100644 index bfdd264f..00000000 --- a/glance/api/v1/upload_utils.py +++ /dev/null @@ -1,293 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import glance_store as store_api -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -import webob.exc - -from glance.common import exception -from glance.common import store_utils -from glance.common import utils -import glance.db -from glance.i18n import _, _LE, _LI -import glance.registry.client.v1.api as registry - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def initiate_deletion(req, location_data, id): - """ - Deletes image data from the location of backend store. - - :param req: The WSGI/Webob Request object - :param location_data: Location to the image data in a data store - :param id: Opaque image identifier - """ - store_utils.delete_image_location_from_backend(req.context, - id, location_data) - - -def _kill(req, image_id, from_state): - """ - Marks the image status to `killed`. - - :param req: The WSGI/Webob Request object - :param image_id: Opaque image identifier - :param from_state: Permitted current status for transition to 'killed' - """ - # TODO(dosaboy): http://docs.openstack.org/developer/glance/statuses.html - # needs updating to reflect the fact that queued->killed and saving->killed - # are both allowed. - registry.update_image_metadata(req.context, image_id, - {'status': 'killed'}, - from_state=from_state) - - -def safe_kill(req, image_id, from_state): - """ - Mark image killed without raising exceptions if it fails. - - Since _kill is meant to be called from exceptions handlers, it should - not raise itself, rather it should just log its error. - - :param req: The WSGI/Webob Request object - :param image_id: Opaque image identifier - :param from_state: Permitted current status for transition to 'killed' - """ - try: - _kill(req, image_id, from_state) - except Exception: - LOG.exception(_LE("Unable to kill image %(id)s: ") % {'id': image_id}) - - -def upload_data_to_store(req, image_meta, image_data, store, notifier): - """ - Upload image data to specified store. - - Upload image data to the store and cleans up on error. - """ - image_id = image_meta['id'] - - db_api = glance.db.get_api() - image_size = image_meta.get('size') - - try: - # By default image_data will be passed as CooperativeReader object. - # But if 'user_storage_quota' is enabled and 'remaining' is not None - # then it will be passed as object of LimitingReader to - # 'store_add_to_backend' method. - image_data = utils.CooperativeReader(image_data) - - remaining = glance.api.common.check_quota( - req.context, image_size, db_api, image_id=image_id) - if remaining is not None: - image_data = utils.LimitingReader(image_data, remaining) - - (uri, - size, - checksum, - location_metadata) = store_api.store_add_to_backend( - image_meta['id'], - image_data, - image_meta['size'], - store, - context=req.context) - - location_data = {'url': uri, - 'metadata': location_metadata, - 'status': 'active'} - - try: - # recheck the quota in case there were simultaneous uploads that - # did not provide the size - glance.api.common.check_quota( - req.context, size, db_api, image_id=image_id) - except exception.StorageQuotaFull: - with excutils.save_and_reraise_exception(): - LOG.info(_LI('Cleaning up %s after exceeding ' - 'the quota'), image_id) - store_utils.safe_delete_from_backend( - req.context, image_meta['id'], location_data) - - def _kill_mismatched(image_meta, attr, actual): - supplied = image_meta.get(attr) - if supplied and supplied != actual: - msg = (_("Supplied %(attr)s (%(supplied)s) and " - "%(attr)s generated from uploaded image " - "(%(actual)s) did not match. Setting image " - "status to 'killed'.") % {'attr': attr, - 'supplied': supplied, - 'actual': actual}) - LOG.error(msg) - safe_kill(req, image_id, 'saving') - initiate_deletion(req, location_data, image_id) - raise webob.exc.HTTPBadRequest(explanation=msg, - content_type="text/plain", - request=req) - - # Verify any supplied size/checksum value matches size/checksum - # returned from store when adding image - _kill_mismatched(image_meta, 'size', size) - _kill_mismatched(image_meta, 'checksum', checksum) - - # Update the database with the checksum returned - # from the backend store - LOG.debug("Updating image %(image_id)s data. " - "Checksum set to %(checksum)s, size set " - "to %(size)d", {'image_id': image_id, - 'checksum': checksum, - 'size': size}) - update_data = {'checksum': checksum, - 'size': size} - try: - try: - state = 'saving' - image_meta = registry.update_image_metadata(req.context, - image_id, - update_data, - from_state=state) - except exception.Duplicate: - image = registry.get_image_metadata(req.context, image_id) - if image['status'] == 'deleted': - raise exception.ImageNotFound() - else: - raise - except exception.NotAuthenticated as e: - # Delete image data due to possible token expiration. - LOG.debug("Authentication error - the token may have " - "expired during file upload. Deleting image data for " - " %s " % image_id) - initiate_deletion(req, location_data, image_id) - raise webob.exc.HTTPUnauthorized(explanation=e.msg, request=req) - except exception.ImageNotFound: - msg = _("Image %s could not be found after upload. The image may" - " have been deleted during the upload.") % image_id - LOG.info(msg) - - # NOTE(jculp): we need to clean up the datastore if an image - # resource is deleted while the image data is being uploaded - # - # We get "location_data" from above call to store.add(), any - # exceptions that occur there handle this same issue internally, - # Since this is store-agnostic, should apply to all stores. - initiate_deletion(req, location_data, image_id) - raise webob.exc.HTTPPreconditionFailed(explanation=msg, - request=req, - content_type='text/plain') - - except store_api.StoreAddDisabled: - msg = _("Error in store configuration. Adding images to store " - "is disabled.") - LOG.exception(msg) - safe_kill(req, image_id, 'saving') - notifier.error('image.upload', msg) - raise webob.exc.HTTPGone(explanation=msg, request=req, - content_type='text/plain') - - except (store_api.Duplicate, exception.Duplicate) as e: - msg = (_("Attempt to upload duplicate image: %s") % - encodeutils.exception_to_unicode(e)) - LOG.warn(msg) - # NOTE(dosaboy): do not delete the image since it is likely that this - # conflict is a result of another concurrent upload that will be - # successful. - notifier.error('image.upload', msg) - raise webob.exc.HTTPConflict(explanation=msg, - request=req, - content_type="text/plain") - - except exception.Forbidden as e: - msg = (_("Forbidden upload attempt: %s") % - encodeutils.exception_to_unicode(e)) - LOG.warn(msg) - safe_kill(req, image_id, 'saving') - notifier.error('image.upload', msg) - raise webob.exc.HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - - except store_api.StorageFull as e: - msg = (_("Image storage media is full: %s") % - encodeutils.exception_to_unicode(e)) - LOG.error(msg) - safe_kill(req, image_id, 'saving') - notifier.error('image.upload', msg) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req, - content_type='text/plain') - - except store_api.StorageWriteDenied as e: - msg = (_("Insufficient permissions on image storage media: %s") % - encodeutils.exception_to_unicode(e)) - LOG.error(msg) - safe_kill(req, image_id, 'saving') - notifier.error('image.upload', msg) - raise webob.exc.HTTPServiceUnavailable(explanation=msg, - request=req, - content_type='text/plain') - - except exception.ImageSizeLimitExceeded as e: - msg = (_("Denying attempt to upload image larger than %d bytes.") - % CONF.image_size_cap) - LOG.warn(msg) - safe_kill(req, image_id, 'saving') - notifier.error('image.upload', msg) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req, - content_type='text/plain') - - except exception.StorageQuotaFull as e: - msg = (_("Denying attempt to upload image because it exceeds the " - "quota: %s") % encodeutils.exception_to_unicode(e)) - LOG.warn(msg) - safe_kill(req, image_id, 'saving') - notifier.error('image.upload', msg) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req, - content_type='text/plain') - - except webob.exc.HTTPError: - # NOTE(bcwaldon): Ideally, we would just call 'raise' here, - # but something in the above function calls is affecting the - # exception context and we must explicitly re-raise the - # caught exception. - msg = _LE("Received HTTP error while uploading image %s") % image_id - notifier.error('image.upload', msg) - with excutils.save_and_reraise_exception(): - LOG.exception(msg) - safe_kill(req, image_id, 'saving') - - except (ValueError, IOError) as e: - msg = _("Client disconnected before sending all data to backend") - LOG.warn(msg) - safe_kill(req, image_id, 'saving') - raise webob.exc.HTTPBadRequest(explanation=msg, - content_type="text/plain", - request=req) - - except Exception as e: - msg = _("Failed to upload image %s") % image_id - LOG.exception(msg) - safe_kill(req, image_id, 'saving') - notifier.error('image.upload', msg) - raise webob.exc.HTTPInternalServerError(explanation=msg, - request=req, - content_type='text/plain') - - return image_meta, location_data diff --git a/glance/api/v2/__init__.py b/glance/api/v2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/api/v2/discovery.py b/glance/api/v2/discovery.py deleted file mode 100644 index 0166ed9d..00000000 --- a/glance/api/v2/discovery.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2017 RedHat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from glance.common import wsgi - -CONF = cfg.CONF - - -class InfoController(object): - def get_image_import(self, req): - # TODO(jokke): All the rest of the boundaries should be implemented. - # TODO(jokke): Once we have the rest of the methods implemented - # the value should be inherited from the CONF rather than hard- - # coded. - import_methods = { - 'description': 'Import methods available.', - 'type': 'array', - 'value': ['glance-direct'] - } - - # TODO(jokke): Will be removed after the config option - # is removed. (deprecated) - if not CONF.enable_image_import: - import_methods['value'] = [] - - return { - 'import-methods': import_methods - } - - -def create_resource(): - return wsgi.Resource(InfoController()) diff --git a/glance/api/v2/image_actions.py b/glance/api/v2/image_actions.py deleted file mode 100644 index 9f76ec99..00000000 --- a/glance/api/v2/image_actions.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2015 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import glance_store -from oslo_log import log as logging -from six.moves import http_client as http -import webob.exc - -from glance.api import policy -from glance.common import exception -from glance.common import utils -from glance.common import wsgi -import glance.db -import glance.gateway -from glance.i18n import _LI -import glance.notifier - - -LOG = logging.getLogger(__name__) - - -class ImageActionsController(object): - def __init__(self, db_api=None, policy_enforcer=None, notifier=None, - store_api=None): - self.db_api = db_api or glance.db.get_api() - self.policy = policy_enforcer or policy.Enforcer() - self.notifier = notifier or glance.notifier.Notifier() - self.store_api = store_api or glance_store - self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, - self.notifier, self.policy) - - @utils.mutating - def deactivate(self, req, image_id): - image_repo = self.gateway.get_repo(req.context) - try: - image = image_repo.get(image_id) - status = image.status - image.deactivate() - # not necessary to change the status if it's already 'deactivated' - if status == 'active': - image_repo.save(image, from_state='active') - LOG.info(_LI("Image %s is deactivated"), image_id) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to deactivate image '%s'", image_id) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.InvalidImageStatusTransition as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - - @utils.mutating - def reactivate(self, req, image_id): - image_repo = self.gateway.get_repo(req.context) - try: - image = image_repo.get(image_id) - status = image.status - image.reactivate() - # not necessary to change the status if it's already 'active' - if status == 'deactivated': - image_repo.save(image, from_state='deactivated') - LOG.info(_LI("Image %s is reactivated"), image_id) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to reactivate image '%s'", image_id) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.InvalidImageStatusTransition as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - - def deactivate(self, response, result): - response.status_int = http.NO_CONTENT - - def reactivate(self, response, result): - response.status_int = http.NO_CONTENT - - -def create_resource(): - """Image data resource factory method""" - deserializer = None - serializer = ResponseSerializer() - controller = ImageActionsController() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/api/v2/image_data.py b/glance/api/v2/image_data.py deleted file mode 100644 index 541a7e59..00000000 --- a/glance/api/v2/image_data.py +++ /dev/null @@ -1,469 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from cursive import exception as cursive_exception -import glance_store -from glance_store import backend -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -import six -import webob.exc - -import glance.api.policy -from glance.common import exception -from glance.common import trust_auth -from glance.common import utils -from glance.common import wsgi -import glance.db -import glance.gateway -from glance.i18n import _, _LE, _LI -import glance.notifier - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -class ImageDataController(object): - def __init__(self, db_api=None, store_api=None, - policy_enforcer=None, notifier=None, - gateway=None): - if gateway is None: - db_api = db_api or glance.db.get_api() - store_api = store_api or glance_store - policy = policy_enforcer or glance.api.policy.Enforcer() - notifier = notifier or glance.notifier.Notifier() - gateway = glance.gateway.Gateway(db_api, store_api, - notifier, policy) - self.gateway = gateway - - def _restore(self, image_repo, image): - """ - Restore the image to queued status. - - :param image_repo: The instance of ImageRepo - :param image: The image will be restored - """ - try: - if image_repo and image: - image.status = 'queued' - image_repo.save(image) - except Exception as e: - msg = (_LE("Unable to restore image %(image_id)s: %(e)s") % - {'image_id': image.image_id, - 'e': encodeutils.exception_to_unicode(e)}) - LOG.exception(msg) - - def _unstage(self, image_repo, image, staging_store): - """ - Restore the image to queued status and remove data from staging. - - :param image_repo: The instance of ImageRepo - :param image: The image will be restored - :param staging_store: The store used for staging - """ - loc = glance_store.location.get_location_from_uri(str( - CONF.node_staging_uri + '/' + image.image_id)) - try: - staging_store.delete(loc) - except glance_store.exceptions.NotFound: - pass - finally: - self._restore(image_repo, image) - - def _delete(self, image_repo, image): - """Delete the image. - - :param image_repo: The instance of ImageRepo - :param image: The image that will be deleted - """ - try: - if image_repo and image: - image.status = 'killed' - image_repo.save(image) - except Exception as e: - msg = (_LE("Unable to delete image %(image_id)s: %(e)s") % - {'image_id': image.image_id, - 'e': encodeutils.exception_to_unicode(e)}) - LOG.exception(msg) - - @utils.mutating - def upload(self, req, image_id, data, size): - image_repo = self.gateway.get_repo(req.context) - image = None - refresher = None - cxt = req.context - try: - image = image_repo.get(image_id) - image.status = 'saving' - try: - if CONF.data_api == 'glance.db.registry.api': - # create a trust if backend is registry - try: - # request user plugin for current token - user_plugin = req.environ.get('keystone.token_auth') - roles = [] - # use roles from request environment because they - # are not transformed to lower-case unlike cxt.roles - for role_info in req.environ.get( - 'keystone.token_info')['token']['roles']: - roles.append(role_info['name']) - refresher = trust_auth.TokenRefresher(user_plugin, - cxt.tenant, - roles) - except Exception as e: - LOG.info(_LI("Unable to create trust: %s " - "Use the existing user token."), - encodeutils.exception_to_unicode(e)) - - image_repo.save(image, from_state='queued') - image.set_data(data, size) - - try: - image_repo.save(image, from_state='saving') - except exception.NotAuthenticated: - if refresher is not None: - # request a new token to update an image in database - cxt.auth_token = refresher.refresh_token() - image_repo = self.gateway.get_repo(req.context) - image_repo.save(image, from_state='saving') - else: - raise - - try: - # release resources required for re-auth - if refresher is not None: - refresher.release_resources() - except Exception as e: - LOG.info(_LI("Unable to delete trust %(trust)s: %(msg)s"), - {"trust": refresher.trust_id, - "msg": encodeutils.exception_to_unicode(e)}) - - except (glance_store.NotFound, - exception.ImageNotFound, - exception.Conflict): - msg = (_("Image %s could not be found after upload. " - "The image may have been deleted during the " - "upload, cleaning up the chunks uploaded.") % - image_id) - LOG.warn(msg) - # NOTE(sridevi): Cleaning up the uploaded chunks. - try: - image.delete() - except exception.ImageNotFound: - # NOTE(sridevi): Ignore this exception - pass - raise webob.exc.HTTPGone(explanation=msg, - request=req, - content_type='text/plain') - except exception.NotAuthenticated: - msg = (_("Authentication error - the token may have " - "expired during file upload. Deleting image data for " - "%s.") % image_id) - LOG.debug(msg) - try: - image.delete() - except exception.NotAuthenticated: - # NOTE: Ignore this exception - pass - raise webob.exc.HTTPUnauthorized(explanation=msg, - request=req, - content_type='text/plain') - except ValueError as e: - LOG.debug("Cannot save data for image %(id)s: %(e)s", - {'id': image_id, - 'e': encodeutils.exception_to_unicode(e)}) - self._restore(image_repo, image) - raise webob.exc.HTTPBadRequest( - explanation=encodeutils.exception_to_unicode(e)) - - except glance_store.StoreAddDisabled: - msg = _("Error in store configuration. Adding images to store " - "is disabled.") - LOG.exception(msg) - self._restore(image_repo, image) - raise webob.exc.HTTPGone(explanation=msg, request=req, - content_type='text/plain') - - except exception.InvalidImageStatusTransition as e: - msg = encodeutils.exception_to_unicode(e) - LOG.exception(msg) - raise webob.exc.HTTPConflict(explanation=e.msg, request=req) - - except exception.Forbidden as e: - msg = ("Not allowed to upload image data for image %s" % - image_id) - LOG.debug(msg) - raise webob.exc.HTTPForbidden(explanation=msg, request=req) - - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - - except glance_store.StorageFull as e: - msg = _("Image storage media " - "is full: %s") % encodeutils.exception_to_unicode(e) - LOG.error(msg) - self._restore(image_repo, image) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req) - - except exception.StorageQuotaFull as e: - msg = _("Image exceeds the storage " - "quota: %s") % encodeutils.exception_to_unicode(e) - LOG.error(msg) - self._restore(image_repo, image) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req) - - except exception.ImageSizeLimitExceeded as e: - msg = _("The incoming image is " - "too large: %s") % encodeutils.exception_to_unicode(e) - LOG.error(msg) - self._restore(image_repo, image) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req) - - except glance_store.StorageWriteDenied as e: - msg = _("Insufficient permissions on image " - "storage media: %s") % encodeutils.exception_to_unicode(e) - LOG.error(msg) - self._restore(image_repo, image) - raise webob.exc.HTTPServiceUnavailable(explanation=msg, - request=req) - - except cursive_exception.SignatureVerificationError as e: - msg = (_LE("Signature verification failed for image %(id)s: %(e)s") - % {'id': image_id, - 'e': encodeutils.exception_to_unicode(e)}) - LOG.error(msg) - self._delete(image_repo, image) - raise webob.exc.HTTPBadRequest(explanation=msg) - - except webob.exc.HTTPGone as e: - with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to upload image data due to HTTP error")) - - except webob.exc.HTTPError as e: - with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to upload image data due to HTTP error")) - self._restore(image_repo, image) - - except Exception as e: - with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to upload image data due to " - "internal error")) - self._restore(image_repo, image) - - @utils.mutating - def stage(self, req, image_id, data, size): - image_repo = self.gateway.get_repo(req.context) - image = None - - # NOTE(jokke): this is horrible way to do it but as long as - # glance_store is in a shape it is, the only way. Don't hold me - # accountable for it. - def _build_staging_store(self): - conf = cfg.ConfigOpts() - backend.register_opts(conf) - conf.set_override('filesystem_store_datadir', - CONF.node_staging_uri[7:], - group='glance_store') - staging_store = backend._load_store(conf, 'file') - - try: - staging_store.configure() - except AttributeError: - msg = _("'node_staging_uri' is not set correctly. Could not " - "load staging store.") - raise exception.BadStoreUri(message=msg) - return staging_store - - staging_store = _build_staging_store() - - try: - image = image_repo.get(image_id) - image.status = 'uploading' - image_repo.save(image, from_state='queued') - try: - staging_store.add(image_id, data, 0) - except glance_store.Duplicate as e: - msg = _("The image %s has data on staging") % image_id - raise webob.exc.HTTPConflict(explanation=msg) - self._restore(image_repo, image) - - except glance_store.StorageFull as e: - msg = _("Image storage media " - "is full: %s") % encodeutils.exception_to_unicode(e) - LOG.error(msg) - self._unstage(image_repo, image, staging_store) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req) - - except exception.StorageQuotaFull as e: - msg = _("Image exceeds the storage " - "quota: %s") % encodeutils.exception_to_unicode(e) - LOG.debug(msg) - self._unstage(image_repo, image, staging_store) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req) - - except exception.ImageSizeLimitExceeded as e: - msg = _("The incoming image is " - "too large: %s") % encodeutils.exception_to_unicode(e) - LOG.debug(msg) - self._unstage(image_repo, image, staging_store) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, - request=req) - - except glance_store.StorageWriteDenied as e: - msg = _("Insufficient permissions on image " - "storage media: %s") % encodeutils.exception_to_unicode(e) - LOG.error(msg) - self._unstage(image_repo, image) - raise webob.exc.HTTPServiceUnavailable(explanation=msg, - request=req) - - except Exception as e: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to stage image data due to " - "internal error")) - self._restore(image_repo, image) - - def download(self, req, image_id): - image_repo = self.gateway.get_repo(req.context) - try: - image = image_repo.get(image_id) - if image.status == 'deactivated' and not req.context.is_admin: - msg = _('The requested image has been deactivated. ' - 'Image data download is forbidden.') - raise exception.Forbidden(message=msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to download image '%s'", image_id) - raise webob.exc.HTTPForbidden(explanation=e.msg) - - return image - - -class RequestDeserializer(wsgi.JSONRequestDeserializer): - - def upload(self, request): - try: - request.get_content_type(('application/octet-stream',)) - except exception.InvalidContentType as e: - raise webob.exc.HTTPUnsupportedMediaType(explanation=e.msg) - - if self.is_valid_encoding(request) and self.is_valid_method(request): - request.is_body_readable = True - - image_size = request.content_length or None - return {'size': image_size, 'data': request.body_file} - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - - def download(self, response, image): - - offset, chunk_size = 0, None - # NOTE(dharinic): In case of a malformed range header, - # glance/common/wsgi.py will raise HTTPRequestRangeNotSatisfiable - # (setting status_code to 416) - range_val = response.request.get_range_from_request(image.size) - - if range_val: - if isinstance(range_val, webob.byterange.Range): - response_end = image.size - 1 - # NOTE(dharinic): webob parsing is zero-indexed. - # i.e.,to download first 5 bytes of a 10 byte image, - # request should be "bytes=0-4" and the response would be - # "bytes 0-4/10". - # Range if validated, will never have 'start' object as None. - if range_val.start >= 0: - offset = range_val.start - else: - # NOTE(dharinic): Negative start values needs to be - # processed to allow suffix-length for Range request - # like "bytes=-2" as per rfc7233. - if abs(range_val.start) < image.size: - offset = image.size + range_val.start - - if range_val.end is not None and range_val.end < image.size: - chunk_size = range_val.end - offset - response_end = range_val.end - 1 - else: - chunk_size = image.size - offset - - # NOTE(dharinic): For backward compatibility reasons, we maintain - # support for 'Content-Range' in requests even though it's not - # correct to use it in requests. - elif isinstance(range_val, webob.byterange.ContentRange): - response_end = range_val.stop - 1 - # NOTE(flaper87): if not present, both, start - # and stop, will be None. - offset = range_val.start - chunk_size = range_val.stop - offset - - response.status_int = 206 - - response.headers['Content-Type'] = 'application/octet-stream' - - try: - # NOTE(markwash): filesystem store (and maybe others?) cause a - # problem with the caching middleware if they are not wrapped in - # an iterator very strange - response.app_iter = iter(image.get_data(offset=offset, - chunk_size=chunk_size)) - # NOTE(dharinic): In case of a full image download, when - # chunk_size was none, reset it to image.size to set the - # response header's Content-Length. - if chunk_size is not None: - response.headers['Content-Range'] = 'bytes %s-%s/%s'\ - % (offset, - response_end, - image.size) - else: - chunk_size = image.size - except glance_store.NotFound as e: - raise webob.exc.HTTPNoContent(explanation=e.msg) - except glance_store.RemoteServiceUnavailable as e: - raise webob.exc.HTTPServiceUnavailable(explanation=e.msg) - except (glance_store.StoreGetNotSupported, - glance_store.StoreRandomGetNotSupported) as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to download image '%s'", image) - raise webob.exc.HTTPForbidden(explanation=e.msg) - # NOTE(saschpe): "response.app_iter = ..." currently resets Content-MD5 - # (https://github.com/Pylons/webob/issues/86), so it should be set - # afterwards for the time being. - if image.checksum: - response.headers['Content-MD5'] = image.checksum - # NOTE(markwash): "response.app_iter = ..." also erroneously resets the - # content-length - response.headers['Content-Length'] = six.text_type(chunk_size) - - def upload(self, response, result): - response.status_int = 204 - - -def create_resource(): - """Image data resource factory method""" - deserializer = RequestDeserializer() - serializer = ResponseSerializer() - controller = ImageDataController() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/api/v2/image_members.py b/glance/api/v2/image_members.py deleted file mode 100644 index 474fb072..00000000 --- a/glance/api/v2/image_members.py +++ /dev/null @@ -1,391 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import glance_store -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import six -from six.moves import http_client as http -import webob - -from glance.api import policy -from glance.common import exception -from glance.common import timeutils -from glance.common import utils -from glance.common import wsgi -import glance.db -import glance.gateway -from glance.i18n import _ -import glance.notifier -import glance.schema - - -LOG = logging.getLogger(__name__) - - -class ImageMembersController(object): - def __init__(self, db_api=None, policy_enforcer=None, notifier=None, - store_api=None): - self.db_api = db_api or glance.db.get_api() - self.policy = policy_enforcer or policy.Enforcer() - self.notifier = notifier or glance.notifier.Notifier() - self.store_api = store_api or glance_store - self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, - self.notifier, self.policy) - - def _get_member_repo(self, req, image): - try: - # For public, private, and community images, a forbidden exception - # with message "Only shared images have members." is thrown. - return self.gateway.get_member_repo(image, req.context) - except exception.Forbidden as e: - msg = (_("Error fetching members of image %(image_id)s: " - "%(inner_msg)s") % {"image_id": image.image_id, - "inner_msg": e.msg}) - LOG.warning(msg) - raise webob.exc.HTTPForbidden(explanation=msg) - - def _lookup_image(self, req, image_id): - image_repo = self.gateway.get_repo(req.context) - try: - return image_repo.get(image_id) - except (exception.NotFound): - msg = _("Image %s not found.") % image_id - LOG.warning(msg) - raise webob.exc.HTTPNotFound(explanation=msg) - except exception.Forbidden: - msg = _("You are not authorized to lookup image %s.") % image_id - LOG.warning(msg) - raise webob.exc.HTTPForbidden(explanation=msg) - - def _lookup_member(self, req, image, member_id): - member_repo = self._get_member_repo(req, image) - try: - return member_repo.get(member_id) - except (exception.NotFound): - msg = (_("%(m_id)s not found in the member list of the image " - "%(i_id)s.") % {"m_id": member_id, - "i_id": image.image_id}) - LOG.warning(msg) - raise webob.exc.HTTPNotFound(explanation=msg) - except exception.Forbidden: - msg = (_("You are not authorized to lookup the members of the " - "image %s.") % image.image_id) - LOG.warning(msg) - raise webob.exc.HTTPForbidden(explanation=msg) - - @utils.mutating - def create(self, req, image_id, member_id): - """ - Adds a membership to the image. - :param req: the Request object coming from the wsgi layer - :param image_id: the image identifier - :param member_id: the member identifier - :returns: The response body is a mapping of the following form - - :: - - {'member_id': , - 'image_id': , - 'status': - 'created_at': .., - 'updated_at': ..} - - """ - image = self._lookup_image(req, image_id) - member_repo = self._get_member_repo(req, image) - image_member_factory = self.gateway.get_image_member_factory( - req.context) - try: - new_member = image_member_factory.new_image_member(image, - member_id) - member_repo.add(new_member) - return new_member - except exception.Forbidden: - msg = _("Not allowed to create members for image %s.") % image_id - LOG.warning(msg) - raise webob.exc.HTTPForbidden(explanation=msg) - except exception.Duplicate: - msg = _("Member %(member_id)s is duplicated for image " - "%(image_id)s") % {"member_id": member_id, - "image_id": image_id} - LOG.warning(msg) - raise webob.exc.HTTPConflict(explanation=msg) - except exception.ImageMemberLimitExceeded as e: - msg = (_("Image member limit exceeded for image %(id)s: %(e)s:") - % {"id": image_id, - "e": encodeutils.exception_to_unicode(e)}) - LOG.warning(msg) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) - - @utils.mutating - def update(self, req, image_id, member_id, status): - """ - Adds a membership to the image. - :param req: the Request object coming from the wsgi layer - :param image_id: the image identifier - :param member_id: the member identifier - :returns: The response body is a mapping of the following form - - :: - - {'member_id': , - 'image_id': , - 'status': , - 'created_at': .., - 'updated_at': ..} - - """ - image = self._lookup_image(req, image_id) - member_repo = self._get_member_repo(req, image) - member = self._lookup_member(req, image, member_id) - try: - member.status = status - member_repo.save(member) - return member - except exception.Forbidden: - msg = _("Not allowed to update members for image %s.") % image_id - LOG.warning(msg) - raise webob.exc.HTTPForbidden(explanation=msg) - except ValueError as e: - msg = (_("Incorrect request: %s") - % encodeutils.exception_to_unicode(e)) - LOG.warning(msg) - raise webob.exc.HTTPBadRequest(explanation=msg) - - def index(self, req, image_id): - """ - Return a list of dictionaries indicating the members of the - image, i.e., those tenants the image is shared with. - - :param req: the Request object coming from the wsgi layer - :param image_id: The image identifier - :returns: The response body is a mapping of the following form - - :: - - {'members': [ - {'member_id': , - 'image_id': , - 'status': , - 'created_at': .., - 'updated_at': ..}, .. - ]} - - """ - image = self._lookup_image(req, image_id) - member_repo = self._get_member_repo(req, image) - members = [] - try: - for member in member_repo.list(): - members.append(member) - except exception.Forbidden: - msg = _("Not allowed to list members for image %s.") % image_id - LOG.warning(msg) - raise webob.exc.HTTPForbidden(explanation=msg) - return dict(members=members) - - def show(self, req, image_id, member_id): - """ - Returns the membership of the tenant wrt to the image_id specified. - - :param req: the Request object coming from the wsgi layer - :param image_id: The image identifier - :returns: The response body is a mapping of the following form - - :: - - {'member_id': , - 'image_id': , - 'status': - 'created_at': .., - 'updated_at': ..} - - """ - try: - image = self._lookup_image(req, image_id) - return self._lookup_member(req, image, member_id) - except webob.exc.HTTPForbidden as e: - # Convert Forbidden to NotFound to prevent information - # leakage. - raise webob.exc.HTTPNotFound(explanation=e.explanation) - - @utils.mutating - def delete(self, req, image_id, member_id): - """ - Removes a membership from the image. - """ - image = self._lookup_image(req, image_id) - member_repo = self._get_member_repo(req, image) - member = self._lookup_member(req, image, member_id) - try: - member_repo.remove(member) - return webob.Response(body='', status=http.NO_CONTENT) - except exception.Forbidden: - msg = _("Not allowed to delete members for image %s.") % image_id - LOG.warning(msg) - raise webob.exc.HTTPForbidden(explanation=msg) - - -class RequestDeserializer(wsgi.JSONRequestDeserializer): - - def __init__(self): - super(RequestDeserializer, self).__init__() - - def _get_request_body(self, request): - output = super(RequestDeserializer, self).default(request) - if 'body' not in output: - msg = _('Body expected in request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - return output['body'] - - def create(self, request): - body = self._get_request_body(request) - try: - member_id = body['member'] - if not member_id: - raise ValueError() - except KeyError: - msg = _("Member to be added not specified") - raise webob.exc.HTTPBadRequest(explanation=msg) - except ValueError: - msg = _("Member can't be empty") - raise webob.exc.HTTPBadRequest(explanation=msg) - except TypeError: - msg = _('Expected a member in the form: ' - '{"member": "image_id"}') - raise webob.exc.HTTPBadRequest(explanation=msg) - return dict(member_id=member_id) - - def update(self, request): - body = self._get_request_body(request) - try: - status = body['status'] - except KeyError: - msg = _("Status not specified") - raise webob.exc.HTTPBadRequest(explanation=msg) - except TypeError: - msg = _('Expected a status in the form: ' - '{"status": "status"}') - raise webob.exc.HTTPBadRequest(explanation=msg) - return dict(status=status) - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - def __init__(self, schema=None): - super(ResponseSerializer, self).__init__() - self.schema = schema or get_schema() - - def _format_image_member(self, member): - member_view = {} - attributes = ['member_id', 'image_id', 'status'] - for key in attributes: - member_view[key] = getattr(member, key) - member_view['created_at'] = timeutils.isotime(member.created_at) - member_view['updated_at'] = timeutils.isotime(member.updated_at) - member_view['schema'] = '/v2/schemas/member' - member_view = self.schema.filter(member_view) - return member_view - - def create(self, response, image_member): - image_member_view = self._format_image_member(image_member) - body = jsonutils.dumps(image_member_view, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def update(self, response, image_member): - image_member_view = self._format_image_member(image_member) - body = jsonutils.dumps(image_member_view, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def index(self, response, image_members): - image_members = image_members['members'] - image_members_view = [] - for image_member in image_members: - image_member_view = self._format_image_member(image_member) - image_members_view.append(image_member_view) - totalview = dict(members=image_members_view) - totalview['schema'] = '/v2/schemas/members' - body = jsonutils.dumps(totalview, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def show(self, response, image_member): - image_member_view = self._format_image_member(image_member) - body = jsonutils.dumps(image_member_view, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - -_MEMBER_SCHEMA = { - 'member_id': { - 'type': 'string', - 'description': _('An identifier for the image member (tenantId)') - }, - 'image_id': { - 'type': 'string', - 'description': _('An identifier for the image'), - 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' - '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), - }, - 'created_at': { - 'type': 'string', - 'description': _('Date and time of image member creation'), - # TODO(brian-rosmaita): our jsonschema library doesn't seem to like the - # format attribute, figure out why (and also fix in images.py) - # 'format': 'date-time', - }, - 'updated_at': { - 'type': 'string', - 'description': _('Date and time of last modification of image member'), - # 'format': 'date-time', - }, - 'status': { - 'type': 'string', - 'description': _('The status of this image member'), - 'enum': [ - 'pending', - 'accepted', - 'rejected' - ] - }, - 'schema': { - 'readOnly': True, - 'type': 'string' - } -} - - -def get_schema(): - properties = copy.deepcopy(_MEMBER_SCHEMA) - schema = glance.schema.Schema('member', properties) - return schema - - -def get_collection_schema(): - member_schema = get_schema() - return glance.schema.CollectionSchema('members', member_schema) - - -def create_resource(): - """Image Members resource factory method""" - deserializer = RequestDeserializer() - serializer = ResponseSerializer() - controller = ImageMembersController() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/api/v2/image_tags.py b/glance/api/v2/image_tags.py deleted file mode 100644 index 5d37f224..00000000 --- a/glance/api/v2/image_tags.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import glance_store -from oslo_log import log as logging -from oslo_utils import encodeutils -from six.moves import http_client as http -import webob.exc - -from glance.api import policy -from glance.api.v2 import images as v2_api -from glance.common import exception -from glance.common import utils -from glance.common import wsgi -import glance.db -import glance.gateway -from glance.i18n import _ -import glance.notifier - - -LOG = logging.getLogger(__name__) - - -class Controller(object): - def __init__(self, db_api=None, policy_enforcer=None, notifier=None, - store_api=None): - self.db_api = db_api or glance.db.get_api() - self.policy = policy_enforcer or policy.Enforcer() - self.notifier = notifier or glance.notifier.Notifier() - self.store_api = store_api or glance_store - self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, - self.notifier, self.policy) - - @utils.mutating - def update(self, req, image_id, tag_value): - image_repo = self.gateway.get_repo(req.context) - try: - image = image_repo.get(image_id) - image.tags.add(tag_value) - image_repo.save(image) - except exception.NotFound: - msg = _("Image %s not found.") % image_id - LOG.warning(msg) - raise webob.exc.HTTPNotFound(explanation=msg) - except exception.Forbidden: - msg = _("Not allowed to update tags for image %s.") % image_id - LOG.warning(msg) - raise webob.exc.HTTPForbidden(explanation=msg) - except exception.Invalid as e: - msg = (_("Could not update image: %s") - % encodeutils.exception_to_unicode(e)) - LOG.warning(msg) - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.ImageTagLimitExceeded as e: - msg = (_("Image tag limit exceeded for image %(id)s: %(e)s:") - % {"id": image_id, - "e": encodeutils.exception_to_unicode(e)}) - LOG.warning(msg) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) - - @utils.mutating - def delete(self, req, image_id, tag_value): - image_repo = self.gateway.get_repo(req.context) - try: - image = image_repo.get(image_id) - if tag_value not in image.tags: - raise webob.exc.HTTPNotFound() - image.tags.remove(tag_value) - image_repo.save(image) - except exception.NotFound: - msg = _("Image %s not found.") % image_id - LOG.warning(msg) - raise webob.exc.HTTPNotFound(explanation=msg) - except exception.Forbidden: - msg = _("Not allowed to delete tags for image %s.") % image_id - LOG.warning(msg) - raise webob.exc.HTTPForbidden(explanation=msg) - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - def update(self, response, result): - response.status_int = http.NO_CONTENT - - def delete(self, response, result): - response.status_int = http.NO_CONTENT - - -class RequestDeserializer(wsgi.JSONRequestDeserializer): - def update(self, request): - try: - schema = v2_api.get_schema() - schema_format = {"tags": [request.urlvars.get('tag_value')]} - schema.validate(schema_format) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - return super(RequestDeserializer, self).default(request) - - -def create_resource(): - """Images resource factory method""" - serializer = ResponseSerializer() - deserializer = RequestDeserializer() - controller = Controller() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/api/v2/images.py b/glance/api/v2/images.py deleted file mode 100644 index a0077c32..00000000 --- a/glance/api/v2/images.py +++ /dev/null @@ -1,1067 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -import glance_store -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils as json -from oslo_utils import encodeutils -import six -from six.moves import http_client as http -import six.moves.urllib.parse as urlparse -import webob.exc - -from glance.api import common -from glance.api import policy -from glance.common import exception -from glance.common import location_strategy -from glance.common import timeutils -from glance.common import utils -from glance.common import wsgi -import glance.db -import glance.gateway -from glance.i18n import _, _LW -import glance.notifier -import glance.schema - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF -CONF.import_opt('disk_formats', 'glance.common.config', group='image_format') -CONF.import_opt('container_formats', 'glance.common.config', - group='image_format') -CONF.import_opt('show_multiple_locations', 'glance.common.config') - - -class ImagesController(object): - def __init__(self, db_api=None, policy_enforcer=None, notifier=None, - store_api=None): - self.db_api = db_api or glance.db.get_api() - self.policy = policy_enforcer or policy.Enforcer() - self.notifier = notifier or glance.notifier.Notifier() - self.store_api = store_api or glance_store - self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, - self.notifier, self.policy) - - @utils.mutating - def create(self, req, image, extra_properties, tags): - image_factory = self.gateway.get_image_factory(req.context) - image_repo = self.gateway.get_repo(req.context) - try: - image = image_factory.new_image(extra_properties=extra_properties, - tags=tags, **image) - image_repo.add(image) - except (exception.DuplicateLocation, - exception.Invalid) as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except (exception.ReservedProperty, - exception.ReadonlyProperty) as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to create image") - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.LimitExceeded as e: - LOG.warn(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=e.msg, request=req, content_type='text/plain') - except exception.Duplicate as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except exception.NotAuthenticated as e: - raise webob.exc.HTTPUnauthorized(explanation=e.msg) - except TypeError as e: - LOG.debug(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=e) - - return image - - @utils.mutating - def import_image(self, req, image): - task_factory = self.gateway.get_task_factory(req.context) - executor_factory = self.gateway.get_task_executor_factory(req.context) - task_repo = self.gateway.get_task_repo(req.context) - - task_input = {} - - try: - import_task = task_factory.new_task(task_type='api_image_import', - owner=req.context.owner, - task_input=task_input) - task_repo.add(import_task) - task_executor = executor_factory.new_task_executor(req.context) - pool = common.get_thread_pool("tasks_eventlet_pool") - pool.spawn_n(import_task.run, task_executor) - except exception.Forbidden as e: - LOG.debug("User not permitted to create image import task.") - raise webob.exc.HTTPForbidden(explanation=e.msg) - - return image - - def index(self, req, marker=None, limit=None, sort_key=None, - sort_dir=None, filters=None, member_status='accepted'): - sort_key = ['created_at'] if not sort_key else sort_key - - sort_dir = ['desc'] if not sort_dir else sort_dir - - result = {} - if filters is None: - filters = {} - filters['deleted'] = False - - protected = filters.get('protected') - if protected is not None: - if protected not in ['true', 'false']: - message = _("Invalid value '%s' for 'protected' filter." - " Valid values are 'true' or 'false'.") % protected - raise webob.exc.HTTPBadRequest(explanation=message) - # ensure the type of protected is boolean - filters['protected'] = protected == 'true' - - if limit is None: - limit = CONF.limit_param_default - limit = min(CONF.api_limit_max, limit) - - image_repo = self.gateway.get_repo(req.context) - try: - images = image_repo.list(marker=marker, limit=limit, - sort_key=sort_key, - sort_dir=sort_dir, - filters=filters, - member_status=member_status) - if len(images) != 0 and len(images) == limit: - result['next_marker'] = images[-1].image_id - except (exception.NotFound, exception.InvalidSortKey, - exception.InvalidFilterRangeValue, - exception.InvalidParameterValue, - exception.InvalidFilterOperatorValue) as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to retrieve images index") - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotAuthenticated as e: - raise webob.exc.HTTPUnauthorized(explanation=e.msg) - result['images'] = images - return result - - def show(self, req, image_id): - image_repo = self.gateway.get_repo(req.context) - try: - return image_repo.get(image_id) - except exception.Forbidden as e: - LOG.debug("User not permitted to show image '%s'", image_id) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.NotAuthenticated as e: - raise webob.exc.HTTPUnauthorized(explanation=e.msg) - - @utils.mutating - def update(self, req, image_id, changes): - image_repo = self.gateway.get_repo(req.context) - try: - image = image_repo.get(image_id) - - for change in changes: - change_method_name = '_do_%s' % change['op'] - change_method = getattr(self, change_method_name) - change_method(req, image, change) - - if changes: - image_repo.save(image) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except (exception.Invalid, exception.BadStoreUri) as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to update image '%s'", image_id) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.StorageQuotaFull as e: - msg = (_("Denying attempt to upload image because it exceeds the" - " quota: %s") % encodeutils.exception_to_unicode(e)) - LOG.warn(msg) - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=msg, request=req, content_type='text/plain') - except exception.LimitExceeded as e: - LOG.exception(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=e.msg, request=req, content_type='text/plain') - except exception.NotAuthenticated as e: - raise webob.exc.HTTPUnauthorized(explanation=e.msg) - - return image - - def _do_replace(self, req, image, change): - path = change['path'] - path_root = path[0] - value = change['value'] - if path_root == 'locations' and value == []: - msg = _("Cannot set locations to empty list.") - raise webob.exc.HTTPForbidden(msg) - elif path_root == 'locations' and value != []: - self._do_replace_locations(image, value) - elif path_root == 'owner' and req.context.is_admin == False: - msg = _("Owner can't be updated by non admin.") - raise webob.exc.HTTPForbidden(msg) - else: - if hasattr(image, path_root): - setattr(image, path_root, value) - elif path_root in image.extra_properties: - image.extra_properties[path_root] = value - else: - msg = _("Property %s does not exist.") - raise webob.exc.HTTPConflict(msg % path_root) - - def _do_add(self, req, image, change): - path = change['path'] - path_root = path[0] - value = change['value'] - json_schema_version = change.get('json_schema_version', 10) - if path_root == 'locations': - self._do_add_locations(image, path[1], value) - else: - if ((hasattr(image, path_root) or - path_root in image.extra_properties) - and json_schema_version == 4): - msg = _("Property %s already present.") - raise webob.exc.HTTPConflict(msg % path_root) - if hasattr(image, path_root): - setattr(image, path_root, value) - else: - image.extra_properties[path_root] = value - - def _do_remove(self, req, image, change): - path = change['path'] - path_root = path[0] - if path_root == 'locations': - try: - self._do_remove_locations(image, path[1]) - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(e.msg) - else: - if hasattr(image, path_root): - msg = _("Property %s may not be removed.") - raise webob.exc.HTTPForbidden(msg % path_root) - elif path_root in image.extra_properties: - del image.extra_properties[path_root] - else: - msg = _("Property %s does not exist.") - raise webob.exc.HTTPConflict(msg % path_root) - - @utils.mutating - def delete(self, req, image_id): - image_repo = self.gateway.get_repo(req.context) - try: - image = image_repo.get(image_id) - image.delete() - image_repo.remove(image) - except (glance_store.Forbidden, exception.Forbidden) as e: - LOG.debug("User not permitted to delete image '%s'", image_id) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except (glance_store.NotFound, exception.NotFound) as e: - msg = (_("Failed to find image %(image_id)s to delete") % - {'image_id': image_id}) - LOG.warn(msg) - raise webob.exc.HTTPNotFound(explanation=msg) - except glance_store.exceptions.InUseByStore as e: - msg = (_("Image %(id)s could not be deleted " - "because it is in use: %(exc)s") % - {"id": image_id, - "exc": e.msg}) - LOG.warn(msg) - raise webob.exc.HTTPConflict(explanation=msg) - except glance_store.exceptions.HasSnapshot as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except exception.InvalidImageStatusTransition as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.NotAuthenticated as e: - raise webob.exc.HTTPUnauthorized(explanation=e.msg) - - def _get_locations_op_pos(self, path_pos, max_pos, allow_max): - if path_pos is None or max_pos is None: - return None - pos = max_pos if allow_max else max_pos - 1 - if path_pos.isdigit(): - pos = int(path_pos) - elif path_pos != '-': - return None - if not (allow_max or 0 <= pos < max_pos): - return None - return pos - - def _do_replace_locations(self, image, value): - if CONF.show_multiple_locations == False: - msg = _("It's not allowed to update locations if locations are " - "invisible.") - raise webob.exc.HTTPForbidden(explanation=msg) - - if image.status not in ('active', 'queued'): - msg = _("It's not allowed to replace locations if image status is " - "%s.") % image.status - raise webob.exc.HTTPConflict(explanation=msg) - - try: - # NOTE(flwang): _locations_proxy's setattr method will check if - # the update is acceptable. - image.locations = value - except (exception.BadStoreUri, exception.DuplicateLocation) as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except ValueError as ve: # update image status failed. - raise webob.exc.HTTPBadRequest( - explanation=encodeutils.exception_to_unicode(ve)) - - def _do_add_locations(self, image, path_pos, value): - if CONF.show_multiple_locations == False: - msg = _("It's not allowed to add locations if locations are " - "invisible.") - raise webob.exc.HTTPForbidden(explanation=msg) - - if image.status not in ('active', 'queued'): - msg = _("It's not allowed to add locations if image status is " - "%s.") % image.status - raise webob.exc.HTTPConflict(explanation=msg) - - pos = self._get_locations_op_pos(path_pos, - len(image.locations), True) - if pos is None: - msg = _("Invalid position for adding a location.") - raise webob.exc.HTTPBadRequest(explanation=msg) - try: - image.locations.insert(pos, value) - if image.status == 'queued': - image.status = 'active' - except (exception.BadStoreUri, exception.DuplicateLocation) as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except ValueError as e: # update image status failed. - raise webob.exc.HTTPBadRequest( - explanation=encodeutils.exception_to_unicode(e)) - - def _do_remove_locations(self, image, path_pos): - if CONF.show_multiple_locations == False: - msg = _("It's not allowed to remove locations if locations are " - "invisible.") - raise webob.exc.HTTPForbidden(explanation=msg) - - if image.status not in ('active'): - msg = _("It's not allowed to remove locations if image status is " - "%s.") % image.status - raise webob.exc.HTTPConflict(explanation=msg) - - if len(image.locations) == 1: - LOG.debug("User forbidden to remove last location of image %s", - image.image_id) - msg = _("Cannot remove last location in the image.") - raise exception.Forbidden(msg) - pos = self._get_locations_op_pos(path_pos, - len(image.locations), False) - if pos is None: - msg = _("Invalid position for removing a location.") - raise webob.exc.HTTPBadRequest(explanation=msg) - try: - # NOTE(zhiyan): this actually deletes the location - # from the backend store. - image.locations.pop(pos) - # TODO(jokke): Fix this, we should catch what store throws and - # provide definitely something else than IternalServerError to user. - except Exception as e: - raise webob.exc.HTTPInternalServerError( - explanation=encodeutils.exception_to_unicode(e)) - - -class RequestDeserializer(wsgi.JSONRequestDeserializer): - - _disallowed_properties = ('direct_url', 'self', 'file', 'schema') - _readonly_properties = ('created_at', 'updated_at', 'status', 'checksum', - 'size', 'virtual_size', 'direct_url', 'self', - 'file', 'schema', 'id') - _reserved_properties = ('location', 'deleted', 'deleted_at') - _base_properties = ('checksum', 'created_at', 'container_format', - 'disk_format', 'id', 'min_disk', 'min_ram', 'name', - 'size', 'virtual_size', 'status', 'tags', 'owner', - 'updated_at', 'visibility', 'protected') - _available_sort_keys = ('name', 'status', 'container_format', - 'disk_format', 'size', 'id', 'created_at', - 'updated_at') - - _default_sort_key = 'created_at' - - _default_sort_dir = 'desc' - - _path_depth_limits = {'locations': {'add': 2, 'remove': 2, 'replace': 1}} - - _supported_operations = ('add', 'remove', 'replace') - - def __init__(self, schema=None): - super(RequestDeserializer, self).__init__() - self.schema = schema or get_schema() - - def _get_request_body(self, request): - output = super(RequestDeserializer, self).default(request) - if 'body' not in output: - msg = _('Body expected in request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - return output['body'] - - @classmethod - def _check_allowed(cls, image): - for key in cls._disallowed_properties: - if key in image: - msg = _("Attribute '%s' is read-only.") % key - raise webob.exc.HTTPForbidden( - explanation=six.text_type(msg)) - - def create(self, request): - body = self._get_request_body(request) - self._check_allowed(body) - try: - self.schema.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - image = {} - properties = body - tags = properties.pop('tags', []) - for key in self._base_properties: - try: - # NOTE(flwang): Instead of changing the _check_unexpected - # of ImageFactory. It would be better to do the mapping - # at here. - if key == 'id': - image['image_id'] = properties.pop(key) - else: - image[key] = properties.pop(key) - except KeyError: - pass - return dict(image=image, extra_properties=properties, tags=tags) - - def _get_change_operation_d10(self, raw_change): - op = raw_change.get('op') - if op is None: - msg = (_('Unable to find `op` in JSON Schema change. ' - 'It must be one of the following: %(available)s.') % - {'available': ', '.join(self._supported_operations)}) - raise webob.exc.HTTPBadRequest(explanation=msg) - if op not in self._supported_operations: - msg = (_('Invalid operation: `%(op)s`. ' - 'It must be one of the following: %(available)s.') % - {'op': op, - 'available': ', '.join(self._supported_operations)}) - raise webob.exc.HTTPBadRequest(explanation=msg) - return op - - def _get_change_operation_d4(self, raw_change): - op = None - for key in self._supported_operations: - if key in raw_change: - if op is not None: - msg = _('Operation objects must contain only one member' - ' named "add", "remove", or "replace".') - raise webob.exc.HTTPBadRequest(explanation=msg) - op = key - if op is None: - msg = _('Operation objects must contain exactly one member' - ' named "add", "remove", or "replace".') - raise webob.exc.HTTPBadRequest(explanation=msg) - return op - - def _get_change_path_d10(self, raw_change): - try: - return raw_change['path'] - except KeyError: - msg = _("Unable to find '%s' in JSON Schema change") % 'path' - raise webob.exc.HTTPBadRequest(explanation=msg) - - def _get_change_path_d4(self, raw_change, op): - return raw_change[op] - - def _decode_json_pointer(self, pointer): - """Parse a json pointer. - - Json Pointers are defined in - http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . - The pointers use '/' for separation between object attributes, such - that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character - in an attribute name is encoded as "~1" and a '~' character is encoded - as "~0". - """ - self._validate_json_pointer(pointer) - ret = [] - for part in pointer.lstrip('/').split('/'): - ret.append(part.replace('~1', '/').replace('~0', '~').strip()) - return ret - - def _validate_json_pointer(self, pointer): - """Validate a json pointer. - - We only accept a limited form of json pointers. - """ - if not pointer.startswith('/'): - msg = _('Pointer `%s` does not start with "/".') % pointer - raise webob.exc.HTTPBadRequest(explanation=msg) - if re.search('/\s*?/', pointer[1:]): - msg = _('Pointer `%s` contains adjacent "/".') % pointer - raise webob.exc.HTTPBadRequest(explanation=msg) - if len(pointer) > 1 and pointer.endswith('/'): - msg = _('Pointer `%s` end with "/".') % pointer - raise webob.exc.HTTPBadRequest(explanation=msg) - if pointer[1:].strip() == '/': - msg = _('Pointer `%s` does not contains valid token.') % pointer - raise webob.exc.HTTPBadRequest(explanation=msg) - if re.search('~[^01]', pointer) or pointer.endswith('~'): - msg = _('Pointer `%s` contains "~" not part of' - ' a recognized escape sequence.') % pointer - raise webob.exc.HTTPBadRequest(explanation=msg) - - def _get_change_value(self, raw_change, op): - if 'value' not in raw_change: - msg = _('Operation "%s" requires a member named "value".') - raise webob.exc.HTTPBadRequest(explanation=msg % op) - return raw_change['value'] - - def _validate_change(self, change): - path_root = change['path'][0] - if path_root in self._readonly_properties: - msg = _("Attribute '%s' is read-only.") % path_root - raise webob.exc.HTTPForbidden(explanation=six.text_type(msg)) - if path_root in self._reserved_properties: - msg = _("Attribute '%s' is reserved.") % path_root - raise webob.exc.HTTPForbidden(explanation=six.text_type(msg)) - - if change['op'] == 'remove': - return - - partial_image = None - if len(change['path']) == 1: - partial_image = {path_root: change['value']} - elif ((path_root in get_base_properties().keys()) and - (get_base_properties()[path_root].get('type', '') == 'array')): - # NOTE(zhiyan): client can use the PATCH API to add an element - # directly to an existing property - # Such as: 1. using '/locations/N' path to add a location - # to the image's 'locations' list at position N. - # (implemented) - # 2. using '/tags/-' path to append a tag to the - # image's 'tags' list at the end. (Not implemented) - partial_image = {path_root: [change['value']]} - - if partial_image: - try: - self.schema.validate(partial_image) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - - def _validate_path(self, op, path): - path_root = path[0] - limits = self._path_depth_limits.get(path_root, {}) - if len(path) != limits.get(op, 1): - msg = _("Invalid JSON pointer for this resource: " - "'/%s'") % '/'.join(path) - raise webob.exc.HTTPBadRequest(explanation=six.text_type(msg)) - - def _parse_json_schema_change(self, raw_change, draft_version): - if draft_version == 10: - op = self._get_change_operation_d10(raw_change) - path = self._get_change_path_d10(raw_change) - elif draft_version == 4: - op = self._get_change_operation_d4(raw_change) - path = self._get_change_path_d4(raw_change, op) - else: - msg = _('Unrecognized JSON Schema draft version') - raise webob.exc.HTTPBadRequest(explanation=msg) - - path_list = self._decode_json_pointer(path) - return op, path_list - - def update(self, request): - changes = [] - content_types = { - 'application/openstack-images-v2.0-json-patch': 4, - 'application/openstack-images-v2.1-json-patch': 10, - } - if request.content_type not in content_types: - headers = {'Accept-Patch': - ', '.join(sorted(content_types.keys()))} - raise webob.exc.HTTPUnsupportedMediaType(headers=headers) - - json_schema_version = content_types[request.content_type] - - body = self._get_request_body(request) - - if not isinstance(body, list): - msg = _('Request body must be a JSON array of operation objects.') - raise webob.exc.HTTPBadRequest(explanation=msg) - - for raw_change in body: - if not isinstance(raw_change, dict): - msg = _('Operations must be JSON objects.') - raise webob.exc.HTTPBadRequest(explanation=msg) - - (op, path) = self._parse_json_schema_change(raw_change, - json_schema_version) - - # NOTE(zhiyan): the 'path' is a list. - self._validate_path(op, path) - change = {'op': op, 'path': path, - 'json_schema_version': json_schema_version} - - if not op == 'remove': - change['value'] = self._get_change_value(raw_change, op) - - self._validate_change(change) - - changes.append(change) - - return {'changes': changes} - - def _validate_limit(self, limit): - try: - limit = int(limit) - except ValueError: - msg = _("limit param must be an integer") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if limit < 0: - msg = _("limit param must be positive") - raise webob.exc.HTTPBadRequest(explanation=msg) - - return limit - - def _validate_sort_key(self, sort_key): - if sort_key not in self._available_sort_keys: - msg = _('Invalid sort key: %(sort_key)s. ' - 'It must be one of the following: %(available)s.') % ( - {'sort_key': sort_key, - 'available': ', '.join(self._available_sort_keys)}) - raise webob.exc.HTTPBadRequest(explanation=msg) - - return sort_key - - def _validate_sort_dir(self, sort_dir): - if sort_dir not in ['asc', 'desc']: - msg = _('Invalid sort direction: %s') % sort_dir - raise webob.exc.HTTPBadRequest(explanation=msg) - - return sort_dir - - def _validate_member_status(self, member_status): - if member_status not in ['pending', 'accepted', 'rejected', 'all']: - msg = _('Invalid status: %s') % member_status - raise webob.exc.HTTPBadRequest(explanation=msg) - - return member_status - - def _get_filters(self, filters): - visibility = filters.get('visibility') - if visibility: - if visibility not in ['community', 'public', 'private', 'shared']: - msg = _('Invalid visibility value: %s') % visibility - raise webob.exc.HTTPBadRequest(explanation=msg) - changes_since = filters.get('changes-since') - if changes_since: - msg = _('The "changes-since" filter is no longer available on v2.') - raise webob.exc.HTTPBadRequest(explanation=msg) - - return filters - - def _get_sorting_params(self, params): - """ - Process sorting params. - Currently glance supports two sorting syntax: classic and new one, - that is uniform for all OpenStack projects. - Classic syntax: sort_key=name&sort_dir=asc&sort_key=size&sort_dir=desc - New syntax: sort=name:asc,size:desc - """ - sort_keys = [] - sort_dirs = [] - - if 'sort' in params: - # use new sorting syntax here - if 'sort_key' in params or 'sort_dir' in params: - msg = _('Old and new sorting syntax cannot be combined') - raise webob.exc.HTTPBadRequest(explanation=msg) - for sort_param in params.pop('sort').strip().split(','): - key, _sep, dir = sort_param.partition(':') - if not dir: - dir = self._default_sort_dir - sort_keys.append(self._validate_sort_key(key.strip())) - sort_dirs.append(self._validate_sort_dir(dir.strip())) - else: - # continue with classic syntax - # NOTE(mfedosin): we have 3 options here: - # 1. sort_dir wasn't passed: we use default one - 'desc'. - # 2. Only one sort_dir was passed: use it for every sort_key - # in the list. - # 3. Multiple sort_dirs were passed: consistently apply each one to - # the corresponding sort_key. - # If number of sort_dirs and sort_keys doesn't match then raise an - # exception. - while 'sort_key' in params: - sort_keys.append(self._validate_sort_key( - params.pop('sort_key').strip())) - - while 'sort_dir' in params: - sort_dirs.append(self._validate_sort_dir( - params.pop('sort_dir').strip())) - - if sort_dirs: - dir_len = len(sort_dirs) - key_len = len(sort_keys) - - if dir_len > 1 and dir_len != key_len: - msg = _('Number of sort dirs does not match the number ' - 'of sort keys') - raise webob.exc.HTTPBadRequest(explanation=msg) - - if not sort_keys: - sort_keys = [self._default_sort_key] - - if not sort_dirs: - sort_dirs = [self._default_sort_dir] - - return sort_keys, sort_dirs - - def index(self, request): - params = request.params.copy() - limit = params.pop('limit', None) - marker = params.pop('marker', None) - member_status = params.pop('member_status', 'accepted') - - # NOTE (flwang) To avoid using comma or any predefined chars to split - # multiple tags, now we allow user specify multiple 'tag' parameters - # in URL, such as v2/images?tag=x86&tag=64bit. - tags = [] - while 'tag' in params: - tags.append(params.pop('tag').strip()) - - query_params = { - 'filters': self._get_filters(params), - 'member_status': self._validate_member_status(member_status), - } - - if marker is not None: - query_params['marker'] = marker - - if limit is not None: - query_params['limit'] = self._validate_limit(limit) - - if tags: - query_params['filters']['tags'] = tags - - # NOTE(mfedosin): param is still called sort_key and sort_dir, - # instead of sort_keys and sort_dirs respectively. - # It's done because in v1 it's still a single value. - - query_params['sort_key'], query_params['sort_dir'] = ( - self._get_sorting_params(params)) - - return query_params - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - def __init__(self, schema=None): - super(ResponseSerializer, self).__init__() - self.schema = schema or get_schema() - - def _get_image_href(self, image, subcollection=''): - base_href = '/v2/images/%s' % image.image_id - if subcollection: - base_href = '%s/%s' % (base_href, subcollection) - return base_href - - def _format_image(self, image): - - def _get_image_locations(image): - try: - return list(image.locations) - except exception.Forbidden: - return [] - - try: - image_view = dict(image.extra_properties) - attributes = ['name', 'disk_format', 'container_format', - 'visibility', 'size', 'virtual_size', 'status', - 'checksum', 'protected', 'min_ram', 'min_disk', - 'owner'] - for key in attributes: - image_view[key] = getattr(image, key) - image_view['id'] = image.image_id - image_view['created_at'] = timeutils.isotime(image.created_at) - image_view['updated_at'] = timeutils.isotime(image.updated_at) - - if CONF.show_multiple_locations: - locations = _get_image_locations(image) - if locations: - image_view['locations'] = [] - for loc in locations: - tmp = dict(loc) - tmp.pop('id', None) - tmp.pop('status', None) - image_view['locations'].append(tmp) - else: - # NOTE (flwang): We will still show "locations": [] if - # image.locations is None to indicate it's allowed to show - # locations but it's just non-existent. - image_view['locations'] = [] - LOG.debug("There is not available location " - "for image %s", image.image_id) - - if CONF.show_image_direct_url: - locations = _get_image_locations(image) - if locations: - # Choose best location configured strategy - l = location_strategy.choose_best_location(locations) - image_view['direct_url'] = l['url'] - else: - LOG.debug("There is not available location " - "for image %s", image.image_id) - - image_view['tags'] = list(image.tags) - image_view['self'] = self._get_image_href(image) - image_view['file'] = self._get_image_href(image, 'file') - image_view['schema'] = '/v2/schemas/image' - image_view = self.schema.filter(image_view) # domain - return image_view - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - - def create(self, response, image): - response.status_int = http.CREATED - self.show(response, image) - response.location = self._get_image_href(image) - # TODO(jokke): make this configurable when swift-local is implemented - # and remove the if statement with the config option. - if CONF.enable_image_import: - import_methods = "OpenStack-image-import-methods: glance-direct" - response.headerlist.extend([import_methods]) - - def show(self, response, image): - image_view = self._format_image(image) - body = json.dumps(image_view, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def update(self, response, image): - image_view = self._format_image(image) - body = json.dumps(image_view, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def index(self, response, result): - params = dict(response.request.params) - params.pop('marker', None) - query = urlparse.urlencode(params) - body = { - 'images': [self._format_image(i) for i in result['images']], - 'first': '/v2/images', - 'schema': '/v2/schemas/images', - } - if query: - body['first'] = '%s?%s' % (body['first'], query) - if 'next_marker' in result: - params['marker'] = result['next_marker'] - next_query = urlparse.urlencode(params) - body['next'] = '/v2/images?%s' % next_query - response.unicode_body = six.text_type(json.dumps(body, - ensure_ascii=False)) - response.content_type = 'application/json' - - def delete(self, response, result): - response.status_int = http.NO_CONTENT - - -def get_base_properties(): - return { - 'id': { - 'type': 'string', - 'description': _('An identifier for the image'), - 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' - '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), - }, - 'name': { - 'type': ['null', 'string'], - 'description': _('Descriptive name for the image'), - 'maxLength': 255, - }, - 'status': { - 'type': 'string', - 'readOnly': True, - 'description': _('Status of the image'), - 'enum': ['queued', 'saving', 'active', 'killed', - 'deleted', 'pending_delete', 'deactivated'], - }, - 'visibility': { - 'type': 'string', - 'description': _('Scope of image accessibility'), - 'enum': ['community', 'public', 'private', 'shared'], - }, - 'protected': { - 'type': 'boolean', - 'description': _('If true, image will not be deletable.'), - }, - 'checksum': { - 'type': ['null', 'string'], - 'readOnly': True, - 'description': _('md5 hash of image contents.'), - 'maxLength': 32, - }, - 'owner': { - 'type': ['null', 'string'], - 'description': _('Owner of the image'), - 'maxLength': 255, - }, - 'size': { - 'type': ['null', 'integer'], - 'readOnly': True, - 'description': _('Size of image file in bytes'), - }, - 'virtual_size': { - 'type': ['null', 'integer'], - 'readOnly': True, - 'description': _('Virtual size of image in bytes'), - }, - 'container_format': { - 'type': ['null', 'string'], - 'description': _('Format of the container'), - 'enum': [None] + CONF.image_format.container_formats, - }, - 'disk_format': { - 'type': ['null', 'string'], - 'description': _('Format of the disk'), - 'enum': [None] + CONF.image_format.disk_formats, - }, - 'created_at': { - 'type': 'string', - 'readOnly': True, - 'description': _('Date and time of image registration' - ), - # TODO(bcwaldon): our jsonschema library doesn't seem to like the - # format attribute, figure out why! - # 'format': 'date-time', - }, - 'updated_at': { - 'type': 'string', - 'readOnly': True, - 'description': _('Date and time of the last image modification' - ), - # 'format': 'date-time', - }, - 'tags': { - 'type': 'array', - 'description': _('List of strings related to the image'), - 'items': { - 'type': 'string', - 'maxLength': 255, - }, - }, - 'direct_url': { - 'type': 'string', - 'readOnly': True, - 'description': _('URL to access the image file kept in external ' - 'store'), - }, - 'min_ram': { - 'type': 'integer', - 'description': _('Amount of ram (in MB) required to boot image.'), - }, - 'min_disk': { - 'type': 'integer', - 'description': _('Amount of disk space (in GB) required to boot ' - 'image.'), - }, - 'self': { - 'type': 'string', - 'readOnly': True, - 'description': _('An image self url'), - }, - 'file': { - 'type': 'string', - 'readOnly': True, - 'description': _('An image file url'), - }, - 'schema': { - 'type': 'string', - 'readOnly': True, - 'description': _('An image schema url'), - }, - 'locations': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'url': { - 'type': 'string', - 'maxLength': 255, - }, - 'metadata': { - 'type': 'object', - }, - }, - 'required': ['url', 'metadata'], - }, - 'description': _('A set of URLs to access the image file kept in ' - 'external store'), - }, - } - - -def _get_base_links(): - return [ - {'rel': 'self', 'href': '{self}'}, - {'rel': 'enclosure', 'href': '{file}'}, - {'rel': 'describedby', 'href': '{schema}'}, - ] - - -def get_schema(custom_properties=None): - properties = get_base_properties() - links = _get_base_links() - if CONF.allow_additional_image_properties: - schema = glance.schema.PermissiveSchema('image', properties, links) - else: - schema = glance.schema.Schema('image', properties) - - if custom_properties: - for property_value in custom_properties.values(): - property_value['is_base'] = False - schema.merge_properties(custom_properties) - return schema - - -def get_collection_schema(custom_properties=None): - image_schema = get_schema(custom_properties) - return glance.schema.CollectionSchema('images', image_schema) - - -def load_custom_properties(): - """Find the schema properties files and load them into a dict.""" - filename = 'schema-image.json' - match = CONF.find_file(filename) - if match: - with open(match, 'r') as schema_file: - schema_data = schema_file.read() - return json.loads(schema_data) - else: - msg = (_LW('Could not find schema properties file %s. Continuing ' - 'without custom properties') % filename) - LOG.warn(msg) - return {} - - -def create_resource(custom_properties=None): - """Images resource factory method""" - schema = get_schema(custom_properties) - deserializer = RequestDeserializer(schema) - serializer = ResponseSerializer(schema) - controller = ImagesController() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/api/v2/metadef_namespaces.py b/glance/api/v2/metadef_namespaces.py deleted file mode 100644 index 3bd44907..00000000 --- a/glance/api/v2/metadef_namespaces.py +++ /dev/null @@ -1,834 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import six -from six.moves import http_client as http -import six.moves.urllib.parse as urlparse -import webob.exc -from wsme.rest import json - -from glance.api import policy -from glance.api.v2.model.metadef_namespace import Namespace -from glance.api.v2.model.metadef_namespace import Namespaces -from glance.api.v2.model.metadef_object import MetadefObject -from glance.api.v2.model.metadef_property_type import PropertyType -from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation -from glance.api.v2.model.metadef_tag import MetadefTag -from glance.common import exception -from glance.common import utils -from glance.common import wsgi -from glance.common import wsme_utils -import glance.db -import glance.gateway -from glance.i18n import _, _LE -import glance.notifier -import glance.schema - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -class NamespaceController(object): - def __init__(self, db_api=None, policy_enforcer=None, notifier=None): - self.db_api = db_api or glance.db.get_api() - self.policy = policy_enforcer or policy.Enforcer() - self.notifier = notifier or glance.notifier.Notifier() - self.gateway = glance.gateway.Gateway(db_api=self.db_api, - notifier=self.notifier, - policy_enforcer=self.policy) - self.ns_schema_link = '/v2/schemas/metadefs/namespace' - self.obj_schema_link = '/v2/schemas/metadefs/object' - self.tag_schema_link = '/v2/schemas/metadefs/tag' - - def index(self, req, marker=None, limit=None, sort_key='created_at', - sort_dir='desc', filters=None): - try: - ns_repo = self.gateway.get_metadef_namespace_repo(req.context) - - # Get namespace id - if marker: - namespace_obj = ns_repo.get(marker) - marker = namespace_obj.namespace_id - - database_ns_list = ns_repo.list( - marker=marker, limit=limit, sort_key=sort_key, - sort_dir=sort_dir, filters=filters) - for db_namespace in database_ns_list: - # Get resource type associations - filters = dict() - filters['namespace'] = db_namespace.namespace - rs_repo = ( - self.gateway.get_metadef_resource_type_repo(req.context)) - repo_rs_type_list = rs_repo.list(filters=filters) - resource_type_list = [ResourceTypeAssociation.to_wsme_model( - resource_type) for resource_type in repo_rs_type_list] - if resource_type_list: - db_namespace.resource_type_associations = ( - resource_type_list) - - namespace_list = [Namespace.to_wsme_model( - db_namespace, - get_namespace_href(db_namespace), - self.ns_schema_link) for db_namespace in database_ns_list] - namespaces = Namespaces() - namespaces.namespaces = namespace_list - if len(namespace_list) != 0 and len(namespace_list) == limit: - namespaces.next = namespace_list[-1].namespace - - except exception.Forbidden as e: - LOG.debug("User not permitted to retrieve metadata namespaces " - "index") - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - return namespaces - - @utils.mutating - def create(self, req, namespace): - try: - namespace_created = False - # Create Namespace - ns_factory = self.gateway.get_metadef_namespace_factory( - req.context) - ns_repo = self.gateway.get_metadef_namespace_repo(req.context) - new_namespace = ns_factory.new_namespace(**namespace.to_dict()) - ns_repo.add(new_namespace) - namespace_created = True - - # Create Resource Types - if namespace.resource_type_associations: - rs_factory = (self.gateway.get_metadef_resource_type_factory( - req.context)) - rs_repo = self.gateway.get_metadef_resource_type_repo( - req.context) - for resource_type in namespace.resource_type_associations: - new_resource = rs_factory.new_resource_type( - namespace=namespace.namespace, - **resource_type.to_dict()) - rs_repo.add(new_resource) - - # Create Objects - if namespace.objects: - object_factory = self.gateway.get_metadef_object_factory( - req.context) - object_repo = self.gateway.get_metadef_object_repo( - req.context) - for metadata_object in namespace.objects: - new_meta_object = object_factory.new_object( - namespace=namespace.namespace, - **metadata_object.to_dict()) - object_repo.add(new_meta_object) - - # Create Tags - if namespace.tags: - tag_factory = self.gateway.get_metadef_tag_factory( - req.context) - tag_repo = self.gateway.get_metadef_tag_repo(req.context) - for metadata_tag in namespace.tags: - new_meta_tag = tag_factory.new_tag( - namespace=namespace.namespace, - **metadata_tag.to_dict()) - tag_repo.add(new_meta_tag) - - # Create Namespace Properties - if namespace.properties: - prop_factory = (self.gateway.get_metadef_property_factory( - req.context)) - prop_repo = self.gateway.get_metadef_property_repo( - req.context) - for (name, value) in namespace.properties.items(): - new_property_type = ( - prop_factory.new_namespace_property( - namespace=namespace.namespace, - **self._to_property_dict(name, value) - )) - prop_repo.add(new_property_type) - except exception.Invalid as e: - msg = (_("Couldn't create metadata namespace: %s") - % encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.Forbidden as e: - self._cleanup_namespace(ns_repo, namespace, namespace_created) - LOG.debug("User not permitted to create metadata namespace") - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - self._cleanup_namespace(ns_repo, namespace, namespace_created) - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Duplicate as e: - self._cleanup_namespace(ns_repo, namespace, namespace_created) - raise webob.exc.HTTPConflict(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - # Return the user namespace as we don't expose the id to user - new_namespace.properties = namespace.properties - new_namespace.objects = namespace.objects - new_namespace.resource_type_associations = ( - namespace.resource_type_associations) - new_namespace.tags = namespace.tags - return Namespace.to_wsme_model(new_namespace, - get_namespace_href(new_namespace), - self.ns_schema_link) - - def _to_property_dict(self, name, value): - # Convert the model PropertyTypes dict to a JSON string - db_property_type_dict = dict() - db_property_type_dict['schema'] = json.tojson(PropertyType, value) - db_property_type_dict['name'] = name - return db_property_type_dict - - def _cleanup_namespace(self, namespace_repo, namespace, namespace_created): - if namespace_created: - try: - namespace_obj = namespace_repo.get(namespace.namespace) - namespace_obj.delete() - namespace_repo.remove(namespace_obj) - LOG.debug("Cleaned up namespace %(namespace)s ", - {'namespace': namespace.namespace}) - except Exception as e: - msg = (_LE("Failed to delete namespace %(namespace)s." - "Exception: %(exception)s"), - {'namespace': namespace.namespace, - 'exception': encodeutils.exception_to_unicode(e)}) - LOG.error(msg) - - def show(self, req, namespace, filters=None): - try: - # Get namespace - ns_repo = self.gateway.get_metadef_namespace_repo(req.context) - namespace_obj = ns_repo.get(namespace) - namespace_detail = Namespace.to_wsme_model( - namespace_obj, - get_namespace_href(namespace_obj), - self.ns_schema_link) - ns_filters = dict() - ns_filters['namespace'] = namespace - - # Get objects - object_repo = self.gateway.get_metadef_object_repo(req.context) - db_metaobject_list = object_repo.list(filters=ns_filters) - object_list = [MetadefObject.to_wsme_model( - db_metaobject, - get_object_href(namespace, db_metaobject), - self.obj_schema_link) for db_metaobject in db_metaobject_list] - if object_list: - namespace_detail.objects = object_list - - # Get resource type associations - rs_repo = self.gateway.get_metadef_resource_type_repo(req.context) - db_resource_type_list = rs_repo.list(filters=ns_filters) - resource_type_list = [ResourceTypeAssociation.to_wsme_model( - resource_type) for resource_type in db_resource_type_list] - if resource_type_list: - namespace_detail.resource_type_associations = ( - resource_type_list) - - # Get properties - prop_repo = self.gateway.get_metadef_property_repo(req.context) - db_properties = prop_repo.list(filters=ns_filters) - property_list = Namespace.to_model_properties(db_properties) - if property_list: - namespace_detail.properties = property_list - - if filters and filters['resource_type']: - namespace_detail = self._prefix_property_name( - namespace_detail, filters['resource_type']) - - # Get tags - tag_repo = self.gateway.get_metadef_tag_repo(req.context) - db_metatag_list = tag_repo.list(filters=ns_filters) - tag_list = [MetadefTag(**{'name': db_metatag.name}) - for db_metatag in db_metatag_list] - if tag_list: - namespace_detail.tags = tag_list - - except exception.Forbidden as e: - LOG.debug("User not permitted to show metadata namespace " - "'%s'", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - return namespace_detail - - def update(self, req, user_ns, namespace): - namespace_repo = self.gateway.get_metadef_namespace_repo(req.context) - try: - ns_obj = namespace_repo.get(namespace) - ns_obj._old_namespace = ns_obj.namespace - ns_obj.namespace = wsme_utils._get_value(user_ns.namespace) - ns_obj.display_name = wsme_utils._get_value(user_ns.display_name) - ns_obj.description = wsme_utils._get_value(user_ns.description) - # Following optional fields will default to same values as in - # create namespace if not specified - ns_obj.visibility = ( - wsme_utils._get_value(user_ns.visibility) or 'private') - ns_obj.protected = ( - wsme_utils._get_value(user_ns.protected) or False) - ns_obj.owner = ( - wsme_utils._get_value(user_ns.owner) or req.context.owner) - updated_namespace = namespace_repo.save(ns_obj) - except exception.Invalid as e: - msg = (_("Couldn't update metadata namespace: %s") - % encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to update metadata namespace " - "'%s'", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Duplicate as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - return Namespace.to_wsme_model(updated_namespace, - get_namespace_href(updated_namespace), - self.ns_schema_link) - - def delete(self, req, namespace): - namespace_repo = self.gateway.get_metadef_namespace_repo(req.context) - try: - namespace_obj = namespace_repo.get(namespace) - namespace_obj.delete() - namespace_repo.remove(namespace_obj) - except exception.Forbidden as e: - LOG.debug("User not permitted to delete metadata namespace " - "'%s'", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - def delete_objects(self, req, namespace): - ns_repo = self.gateway.get_metadef_namespace_repo(req.context) - try: - namespace_obj = ns_repo.get(namespace) - namespace_obj.delete() - ns_repo.remove_objects(namespace_obj) - except exception.Forbidden as e: - LOG.debug("User not permitted to delete metadata objects " - "within '%s' namespace", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - def delete_tags(self, req, namespace): - ns_repo = self.gateway.get_metadef_namespace_repo(req.context) - try: - namespace_obj = ns_repo.get(namespace) - namespace_obj.delete() - ns_repo.remove_tags(namespace_obj) - except exception.Forbidden as e: - LOG.debug("User not permitted to delete metadata tags " - "within '%s' namespace", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - def delete_properties(self, req, namespace): - ns_repo = self.gateway.get_metadef_namespace_repo(req.context) - try: - namespace_obj = ns_repo.get(namespace) - namespace_obj.delete() - ns_repo.remove_properties(namespace_obj) - except exception.Forbidden as e: - LOG.debug("User not permitted to delete metadata properties " - "within '%s' namespace", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - def _prefix_property_name(self, namespace_detail, user_resource_type): - prefix = None - if user_resource_type and namespace_detail.resource_type_associations: - for resource_type in namespace_detail.resource_type_associations: - if resource_type.name == user_resource_type: - prefix = resource_type.prefix - break - - if prefix: - if namespace_detail.properties: - new_property_dict = dict() - for (key, value) in namespace_detail.properties.items(): - new_property_dict[prefix + key] = value - namespace_detail.properties = new_property_dict - - if namespace_detail.objects: - for object in namespace_detail.objects: - new_object_property_dict = dict() - for (key, value) in object.properties.items(): - new_object_property_dict[prefix + key] = value - object.properties = new_object_property_dict - - if object.required and len(object.required) > 0: - required = [prefix + name for name in object.required] - object.required = required - - return namespace_detail - - -class RequestDeserializer(wsgi.JSONRequestDeserializer): - _disallowed_properties = ['self', 'schema', 'created_at', 'updated_at'] - - def __init__(self, schema=None): - super(RequestDeserializer, self).__init__() - self.schema = schema or get_schema() - - def _get_request_body(self, request): - output = super(RequestDeserializer, self).default(request) - if 'body' not in output: - msg = _('Body expected in request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - return output['body'] - - @classmethod - def _check_allowed(cls, image): - for key in cls._disallowed_properties: - if key in image: - msg = _("Attribute '%s' is read-only.") % key - raise webob.exc.HTTPForbidden(explanation=msg) - - def index(self, request): - params = request.params.copy() - limit = params.pop('limit', None) - marker = params.pop('marker', None) - sort_dir = params.pop('sort_dir', 'desc') - - if limit is None: - limit = CONF.limit_param_default - limit = min(CONF.api_limit_max, int(limit)) - - query_params = { - 'sort_key': params.pop('sort_key', 'created_at'), - 'sort_dir': self._validate_sort_dir(sort_dir), - 'filters': self._get_filters(params) - } - - if marker is not None: - query_params['marker'] = marker - - if limit is not None: - query_params['limit'] = self._validate_limit(limit) - - return query_params - - def _validate_sort_dir(self, sort_dir): - if sort_dir not in ['asc', 'desc']: - msg = _('Invalid sort direction: %s') % sort_dir - raise webob.exc.HTTPBadRequest(explanation=msg) - - return sort_dir - - def _get_filters(self, filters): - visibility = filters.get('visibility') - if visibility: - if visibility not in ['public', 'private']: - msg = _('Invalid visibility value: %s') % visibility - raise webob.exc.HTTPBadRequest(explanation=msg) - - return filters - - def _validate_limit(self, limit): - try: - limit = int(limit) - except ValueError: - msg = _("limit param must be an integer") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if limit < 0: - msg = _("limit param must be positive") - raise webob.exc.HTTPBadRequest(explanation=msg) - - return limit - - def show(self, request): - params = request.params.copy() - query_params = { - 'filters': self._get_filters(params) - } - return query_params - - def create(self, request): - body = self._get_request_body(request) - self._check_allowed(body) - try: - self.schema.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - namespace = json.fromjson(Namespace, body) - return dict(namespace=namespace) - - def update(self, request): - body = self._get_request_body(request) - self._check_allowed(body) - try: - self.schema.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - namespace = json.fromjson(Namespace, body) - return dict(user_ns=namespace) - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - def __init__(self, schema=None): - super(ResponseSerializer, self).__init__() - self.schema = schema - - def create(self, response, namespace): - ns_json = json.tojson(Namespace, namespace) - response = self.__render(ns_json, response, http.CREATED) - response.location = get_namespace_href(namespace) - - def show(self, response, namespace): - ns_json = json.tojson(Namespace, namespace) - response = self.__render(ns_json, response) - - def index(self, response, result): - params = dict(response.request.params) - params.pop('marker', None) - query = urlparse.urlencode(params) - result.first = "/v2/metadefs/namespaces" - result.schema = "/v2/schemas/metadefs/namespaces" - if query: - result.first = '%s?%s' % (result.first, query) - if result.next: - params['marker'] = result.next - next_query = urlparse.urlencode(params) - result.next = '/v2/metadefs/namespaces?%s' % next_query - - ns_json = json.tojson(Namespaces, result) - response = self.__render(ns_json, response) - - def update(self, response, namespace): - ns_json = json.tojson(Namespace, namespace) - response = self.__render(ns_json, response, http.OK) - - def delete(self, response, result): - response.status_int = http.NO_CONTENT - - def delete_objects(self, response, result): - response.status_int = http.NO_CONTENT - - def delete_properties(self, response, result): - response.status_int = http.NO_CONTENT - - def delete_tags(self, response, result): - response.status_int = http.NO_CONTENT - - def __render(self, json_data, response, response_status=None): - body = jsonutils.dumps(json_data, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - if response_status: - response.status_int = response_status - return response - - -def _get_base_definitions(): - return get_schema_definitions() - - -def get_schema_definitions(): - return { - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ - {"$ref": "#/definitions/positiveInteger"}, - {"default": 0} - ] - }, - "stringArray": { - "type": "array", - "items": {"type": "string"}, - # "minItems": 1, - "uniqueItems": True - }, - "property": { - "type": "object", - "additionalProperties": { - "type": "object", - "required": ["title", "type"], - "properties": { - "name": { - "type": "string", - "maxLength": 255 - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "operators": { - "type": "array", - "items": { - "type": "string" - } - }, - "type": { - "type": "string", - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - None - ] - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "minimum": { - "type": "number" - }, - "maximum": { - "type": "number" - }, - "maxLength": { - "$ref": "#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "type": "string", - "format": "regex" - }, - "enum": { - "type": "array" - }, - "readonly": { - "type": "boolean" - }, - "default": {}, - "items": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - None - ] - }, - "enum": { - "type": "array" - } - } - }, - "maxItems": { - "$ref": "#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "type": "boolean", - "default": False - }, - "additionalItems": { - "type": "boolean" - }, - } - } - } - } - - -def _get_base_properties(): - return { - "namespace": { - "type": "string", - "description": _("The unique namespace text."), - "maxLength": 80, - }, - "display_name": { - "type": "string", - "description": _("The user friendly name for the namespace. Used " - "by UI if available."), - "maxLength": 80, - }, - "description": { - "type": "string", - "description": _("Provides a user friendly description of the " - "namespace."), - "maxLength": 500, - }, - "visibility": { - "type": "string", - "description": _("Scope of namespace accessibility."), - "enum": ["public", "private"], - }, - "protected": { - "type": "boolean", - "description": _("If true, namespace will not be deletable."), - }, - "owner": { - "type": "string", - "description": _("Owner of the namespace."), - "maxLength": 255, - }, - "created_at": { - "type": "string", - "readOnly": True, - "description": _("Date and time of namespace creation"), - "format": "date-time" - }, - "updated_at": { - "type": "string", - "readOnly": True, - "description": _("Date and time of the last namespace" - " modification"), - "format": "date-time" - }, - "schema": { - 'readOnly': True, - "type": "string" - }, - "self": { - 'readOnly': True, - "type": "string" - }, - "resource_type_associations": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "properties_target": { - "type": "string" - } - } - } - }, - "properties": { - "$ref": "#/definitions/property" - }, - "objects": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "properties": { - "$ref": "#/definitions/property" - }, - } - } - }, - "tags": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - } - }, - } - - -def get_schema(): - properties = _get_base_properties() - definitions = _get_base_definitions() - mandatory_attrs = Namespace.get_mandatory_attrs() - schema = glance.schema.Schema( - 'namespace', - properties, - required=mandatory_attrs, - definitions=definitions - ) - return schema - - -def get_collection_schema(): - namespace_schema = get_schema() - return glance.schema.CollectionSchema('namespaces', namespace_schema) - - -def get_namespace_href(namespace): - base_href = '/v2/metadefs/namespaces/%s' % namespace.namespace - return base_href - - -def get_object_href(namespace_name, metadef_object): - base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % - (namespace_name, metadef_object.name)) - return base_href - - -def get_tag_href(namespace_name, metadef_tag): - base_href = ('/v2/metadefs/namespaces/%s/tags/%s' % - (namespace_name, metadef_tag.name)) - return base_href - - -def create_resource(): - """Namespaces resource factory method""" - schema = get_schema() - deserializer = RequestDeserializer(schema) - serializer = ResponseSerializer(schema) - controller = NamespaceController() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/api/v2/metadef_objects.py b/glance/api/v2/metadef_objects.py deleted file mode 100644 index c42d69f3..00000000 --- a/glance/api/v2/metadef_objects.py +++ /dev/null @@ -1,367 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import six -from six.moves import http_client as http -import webob.exc -from wsme.rest import json - -from glance.api import policy -from glance.api.v2 import metadef_namespaces as namespaces -from glance.api.v2.model.metadef_object import MetadefObject -from glance.api.v2.model.metadef_object import MetadefObjects -from glance.common import exception -from glance.common import wsgi -from glance.common import wsme_utils -import glance.db -from glance.i18n import _ -import glance.notifier -import glance.schema - -LOG = logging.getLogger(__name__) - - -class MetadefObjectsController(object): - def __init__(self, db_api=None, policy_enforcer=None, notifier=None): - self.db_api = db_api or glance.db.get_api() - self.policy = policy_enforcer or policy.Enforcer() - self.notifier = notifier or glance.notifier.Notifier() - self.gateway = glance.gateway.Gateway(db_api=self.db_api, - notifier=self.notifier, - policy_enforcer=self.policy) - self.obj_schema_link = '/v2/schemas/metadefs/object' - - def create(self, req, metadata_object, namespace): - object_factory = self.gateway.get_metadef_object_factory(req.context) - object_repo = self.gateway.get_metadef_object_repo(req.context) - try: - new_meta_object = object_factory.new_object( - namespace=namespace, - **metadata_object.to_dict()) - object_repo.add(new_meta_object) - - except exception.Forbidden as e: - LOG.debug("User not permitted to create metadata object within " - "'%s' namespace", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.Invalid as e: - msg = (_("Couldn't create metadata object: %s") - % encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Duplicate as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - return MetadefObject.to_wsme_model( - new_meta_object, - get_object_href(namespace, new_meta_object), - self.obj_schema_link) - - def index(self, req, namespace, marker=None, limit=None, - sort_key='created_at', sort_dir='desc', filters=None): - try: - filters = filters or dict() - filters['namespace'] = namespace - object_repo = self.gateway.get_metadef_object_repo(req.context) - db_metaobject_list = object_repo.list( - marker=marker, limit=limit, sort_key=sort_key, - sort_dir=sort_dir, filters=filters) - object_list = [MetadefObject.to_wsme_model( - db_metaobject, - get_object_href(namespace, db_metaobject), - self.obj_schema_link) for db_metaobject in db_metaobject_list] - metadef_objects = MetadefObjects() - metadef_objects.objects = object_list - except exception.Forbidden as e: - LOG.debug("User not permitted to retrieve metadata objects within " - "'%s' namespace", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - return metadef_objects - - def show(self, req, namespace, object_name): - meta_object_repo = self.gateway.get_metadef_object_repo( - req.context) - try: - metadef_object = meta_object_repo.get(namespace, object_name) - return MetadefObject.to_wsme_model( - metadef_object, - get_object_href(namespace, metadef_object), - self.obj_schema_link) - except exception.Forbidden as e: - LOG.debug("User not permitted to show metadata object '%s' " - "within '%s' namespace", namespace, object_name) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - def update(self, req, metadata_object, namespace, object_name): - meta_repo = self.gateway.get_metadef_object_repo(req.context) - try: - metadef_object = meta_repo.get(namespace, object_name) - metadef_object._old_name = metadef_object.name - metadef_object.name = wsme_utils._get_value( - metadata_object.name) - metadef_object.description = wsme_utils._get_value( - metadata_object.description) - metadef_object.required = wsme_utils._get_value( - metadata_object.required) - metadef_object.properties = wsme_utils._get_value( - metadata_object.properties) - updated_metadata_obj = meta_repo.save(metadef_object) - except exception.Invalid as e: - msg = (_("Couldn't update metadata object: %s") - % encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to update metadata object '%s' " - "within '%s' namespace ", object_name, namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Duplicate as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - return MetadefObject.to_wsme_model( - updated_metadata_obj, - get_object_href(namespace, updated_metadata_obj), - self.obj_schema_link) - - def delete(self, req, namespace, object_name): - meta_repo = self.gateway.get_metadef_object_repo(req.context) - try: - metadef_object = meta_repo.get(namespace, object_name) - metadef_object.delete() - meta_repo.remove(metadef_object) - except exception.Forbidden as e: - LOG.debug("User not permitted to delete metadata object '%s' " - "within '%s' namespace", object_name, namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - -def _get_base_definitions(): - return namespaces.get_schema_definitions() - - -def _get_base_properties(): - return { - "name": { - "type": "string", - "maxLength": 255 - }, - "description": { - "type": "string" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "properties": { - "$ref": "#/definitions/property" - }, - "schema": { - 'readOnly': True, - "type": "string" - }, - "self": { - 'readOnly': True, - "type": "string" - }, - "created_at": { - "type": "string", - "readOnly": True, - "description": _("Date and time of object creation"), - "format": "date-time" - }, - "updated_at": { - "type": "string", - "readOnly": True, - "description": _("Date and time of the last object modification"), - "format": "date-time" - } - } - - -def get_schema(): - definitions = _get_base_definitions() - properties = _get_base_properties() - mandatory_attrs = MetadefObject.get_mandatory_attrs() - schema = glance.schema.Schema( - 'object', - properties, - required=mandatory_attrs, - definitions=definitions, - ) - return schema - - -def get_collection_schema(): - object_schema = get_schema() - return glance.schema.CollectionSchema('objects', object_schema) - - -class RequestDeserializer(wsgi.JSONRequestDeserializer): - _disallowed_properties = ['self', 'schema', 'created_at', 'updated_at'] - - def __init__(self, schema=None): - super(RequestDeserializer, self).__init__() - self.schema = schema or get_schema() - - def _get_request_body(self, request): - output = super(RequestDeserializer, self).default(request) - if 'body' not in output: - msg = _('Body expected in request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - return output['body'] - - def create(self, request): - body = self._get_request_body(request) - self._check_allowed(body) - try: - self.schema.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - metadata_object = json.fromjson(MetadefObject, body) - return dict(metadata_object=metadata_object) - - def update(self, request): - body = self._get_request_body(request) - self._check_allowed(body) - try: - self.schema.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - metadata_object = json.fromjson(MetadefObject, body) - return dict(metadata_object=metadata_object) - - def index(self, request): - params = request.params.copy() - limit = params.pop('limit', None) - marker = params.pop('marker', None) - sort_dir = params.pop('sort_dir', 'desc') - - query_params = { - 'sort_key': params.pop('sort_key', 'created_at'), - 'sort_dir': self._validate_sort_dir(sort_dir), - 'filters': self._get_filters(params) - } - - if marker is not None: - query_params['marker'] = marker - - if limit is not None: - query_params['limit'] = self._validate_limit(limit) - - return query_params - - def _validate_sort_dir(self, sort_dir): - if sort_dir not in ['asc', 'desc']: - msg = _('Invalid sort direction: %s') % sort_dir - raise webob.exc.HTTPBadRequest(explanation=msg) - - return sort_dir - - def _get_filters(self, filters): - visibility = filters.get('visibility') - if visibility: - if visibility not in ['public', 'private', 'shared']: - msg = _('Invalid visibility value: %s') % visibility - raise webob.exc.HTTPBadRequest(explanation=msg) - - return filters - - def _validate_limit(self, limit): - try: - limit = int(limit) - except ValueError: - msg = _("limit param must be an integer") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if limit <= 0: - msg = _("limit param must be positive") - raise webob.exc.HTTPBadRequest(explanation=msg) - - return limit - - @classmethod - def _check_allowed(cls, image): - for key in cls._disallowed_properties: - if key in image: - msg = _("Attribute '%s' is read-only.") % key - raise webob.exc.HTTPForbidden(explanation=msg) - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - def __init__(self, schema=None): - super(ResponseSerializer, self).__init__() - self.schema = schema or get_schema() - - def create(self, response, metadata_object): - response.status_int = http.CREATED - self.show(response, metadata_object) - - def show(self, response, metadata_object): - metadata_object_json = json.tojson(MetadefObject, metadata_object) - body = jsonutils.dumps(metadata_object_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def update(self, response, metadata_object): - response.status_int = http.OK - self.show(response, metadata_object) - - def index(self, response, result): - result.schema = "v2/schemas/metadefs/objects" - metadata_objects_json = json.tojson(MetadefObjects, result) - body = jsonutils.dumps(metadata_objects_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def delete(self, response, result): - response.status_int = http.NO_CONTENT - - -def get_object_href(namespace_name, metadef_object): - base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % - (namespace_name, metadef_object.name)) - return base_href - - -def create_resource(): - """Metadef objects resource factory method""" - schema = get_schema() - deserializer = RequestDeserializer(schema) - serializer = ResponseSerializer(schema) - controller = MetadefObjectsController() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/api/v2/metadef_properties.py b/glance/api/v2/metadef_properties.py deleted file mode 100644 index c8685e2c..00000000 --- a/glance/api/v2/metadef_properties.py +++ /dev/null @@ -1,316 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import six -from six.moves import http_client as http -import webob.exc -from wsme.rest import json - -from glance.api import policy -from glance.api.v2 import metadef_namespaces as namespaces -from glance.api.v2.model.metadef_namespace import Namespace -from glance.api.v2.model.metadef_property_type import PropertyType -from glance.api.v2.model.metadef_property_type import PropertyTypes -from glance.common import exception -from glance.common import wsgi -import glance.db -import glance.gateway -from glance.i18n import _ -import glance.notifier -import glance.schema - -LOG = logging.getLogger(__name__) - - -class NamespacePropertiesController(object): - def __init__(self, db_api=None, policy_enforcer=None, notifier=None): - self.db_api = db_api or glance.db.get_api() - self.policy = policy_enforcer or policy.Enforcer() - self.notifier = notifier or glance.notifier.Notifier() - self.gateway = glance.gateway.Gateway(db_api=self.db_api, - notifier=self.notifier, - policy_enforcer=self.policy) - - def _to_dict(self, model_property_type): - # Convert the model PropertyTypes dict to a JSON encoding - db_property_type_dict = dict() - db_property_type_dict['schema'] = json.tojson( - PropertyType, model_property_type) - db_property_type_dict['name'] = model_property_type.name - return db_property_type_dict - - def _to_model(self, db_property_type): - # Convert the persisted json schema to a dict of PropertyTypes - property_type = json.fromjson( - PropertyType, db_property_type.schema) - property_type.name = db_property_type.name - return property_type - - def index(self, req, namespace): - try: - filters = dict() - filters['namespace'] = namespace - prop_repo = self.gateway.get_metadef_property_repo(req.context) - db_properties = prop_repo.list(filters=filters) - property_list = Namespace.to_model_properties(db_properties) - namespace_properties = PropertyTypes() - namespace_properties.properties = property_list - except exception.Forbidden as e: - LOG.debug("User not permitted to retrieve metadata properties " - "within '%s' namespace", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - return namespace_properties - - def show(self, req, namespace, property_name, filters=None): - try: - if filters and filters['resource_type']: - rs_repo = self.gateway.get_metadef_resource_type_repo( - req.context) - db_resource_type = rs_repo.get(filters['resource_type'], - namespace) - prefix = db_resource_type.prefix - if prefix and property_name.startswith(prefix): - property_name = property_name[len(prefix):] - else: - msg = (_("Property %(property_name)s does not start " - "with the expected resource type association " - "prefix of '%(prefix)s'.") - % {'property_name': property_name, - 'prefix': prefix}) - raise exception.NotFound(msg) - - prop_repo = self.gateway.get_metadef_property_repo(req.context) - db_property = prop_repo.get(namespace, property_name) - property = self._to_model(db_property) - except exception.Forbidden as e: - LOG.debug("User not permitted to show metadata property '%s' " - "within '%s' namespace", property_name, namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - return property - - def create(self, req, namespace, property_type): - prop_factory = self.gateway.get_metadef_property_factory(req.context) - prop_repo = self.gateway.get_metadef_property_repo(req.context) - try: - new_property_type = prop_factory.new_namespace_property( - namespace=namespace, **self._to_dict(property_type)) - prop_repo.add(new_property_type) - except exception.Forbidden as e: - LOG.debug("User not permitted to create metadata property within " - "'%s' namespace", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.Invalid as e: - msg = (_("Couldn't create metadata property: %s") - % encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Duplicate as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - return self._to_model(new_property_type) - - def update(self, req, namespace, property_name, property_type): - prop_repo = self.gateway.get_metadef_property_repo(req.context) - try: - db_property_type = prop_repo.get(namespace, property_name) - db_property_type._old_name = db_property_type.name - db_property_type.name = property_type.name - db_property_type.schema = (self._to_dict(property_type))['schema'] - updated_property_type = prop_repo.save(db_property_type) - except exception.Invalid as e: - msg = (_("Couldn't update metadata property: %s") - % encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to update metadata property '%s' " - "within '%s' namespace", property_name, namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Duplicate as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - return self._to_model(updated_property_type) - - def delete(self, req, namespace, property_name): - prop_repo = self.gateway.get_metadef_property_repo(req.context) - try: - property_type = prop_repo.get(namespace, property_name) - property_type.delete() - prop_repo.remove(property_type) - except exception.Forbidden as e: - LOG.debug("User not permitted to delete metadata property '%s' " - "within '%s' namespace", property_name, namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - -class RequestDeserializer(wsgi.JSONRequestDeserializer): - _disallowed_properties = ['created_at', 'updated_at'] - - def __init__(self, schema=None): - super(RequestDeserializer, self).__init__() - self.schema = schema or get_schema() - - def _get_request_body(self, request): - output = super(RequestDeserializer, self).default(request) - if 'body' not in output: - msg = _('Body expected in request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - return output['body'] - - @classmethod - def _check_allowed(cls, image): - for key in cls._disallowed_properties: - if key in image: - msg = _("Attribute '%s' is read-only.") % key - raise webob.exc.HTTPForbidden(explanation=msg) - - def create(self, request): - body = self._get_request_body(request) - self._check_allowed(body) - try: - self.schema.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - property_type = json.fromjson(PropertyType, body) - return dict(property_type=property_type) - - def update(self, request): - body = self._get_request_body(request) - self._check_allowed(body) - try: - self.schema.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - property_type = json.fromjson(PropertyType, body) - return dict(property_type=property_type) - - def show(self, request): - params = request.params.copy() - query_params = { - 'filters': params - } - return query_params - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - def __init__(self, schema=None): - super(ResponseSerializer, self).__init__() - self.schema = schema - - def show(self, response, result): - property_type_json = json.tojson(PropertyType, result) - body = jsonutils.dumps(property_type_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def index(self, response, result): - property_type_json = json.tojson(PropertyTypes, result) - body = jsonutils.dumps(property_type_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def create(self, response, result): - response.status_int = http.CREATED - self.show(response, result) - - def update(self, response, result): - response.status_int = http.OK - self.show(response, result) - - def delete(self, response, result): - response.status_int = http.NO_CONTENT - - -def _get_base_definitions(): - return { - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ - {"$ref": "#/definitions/positiveInteger"}, - {"default": 0} - ] - }, - "stringArray": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - "uniqueItems": True - } - } - - -def _get_base_properties(): - base_def = namespaces.get_schema_definitions() - return base_def['property']['additionalProperties']['properties'] - - -def get_schema(): - definitions = _get_base_definitions() - properties = _get_base_properties() - mandatory_attrs = PropertyType.get_mandatory_attrs() - # name is required attribute when use as single property type - mandatory_attrs.append('name') - schema = glance.schema.Schema( - 'property', - properties, - required=mandatory_attrs, - definitions=definitions - ) - return schema - - -def get_collection_schema(): - namespace_properties_schema = get_schema() - # Property name is a dict key and not a required attribute in - # individual property schema inside property collections - namespace_properties_schema.required.remove('name') - return glance.schema.DictCollectionSchema('properties', - namespace_properties_schema) - - -def create_resource(): - """NamespaceProperties resource factory method""" - schema = get_schema() - deserializer = RequestDeserializer(schema) - serializer = ResponseSerializer(schema) - controller = NamespacePropertiesController() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/api/v2/metadef_resource_types.py b/glance/api/v2/metadef_resource_types.py deleted file mode 100644 index ef5956fc..00000000 --- a/glance/api/v2/metadef_resource_types.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import six -from six.moves import http_client as http -import webob.exc -from wsme.rest import json - -from glance.api import policy -from glance.api.v2.model.metadef_resource_type import ResourceType -from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation -from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociations -from glance.api.v2.model.metadef_resource_type import ResourceTypes -from glance.common import exception -from glance.common import wsgi -import glance.db -import glance.gateway -from glance.i18n import _ -import glance.notifier -import glance.schema - -LOG = logging.getLogger(__name__) - - -class ResourceTypeController(object): - def __init__(self, db_api=None, policy_enforcer=None, notifier=None): - self.db_api = db_api or glance.db.get_api() - self.policy = policy_enforcer or policy.Enforcer() - self.notifier = notifier or glance.notifier.Notifier() - self.gateway = glance.gateway.Gateway(db_api=self.db_api, - notifier=self.notifier, - policy_enforcer=self.policy) - - def index(self, req): - try: - filters = {'namespace': None} - rs_type_repo = self.gateway.get_metadef_resource_type_repo( - req.context) - db_resource_type_list = rs_type_repo.list(filters=filters) - resource_type_list = [ResourceType.to_wsme_model( - resource_type) for resource_type in db_resource_type_list] - resource_types = ResourceTypes() - resource_types.resource_types = resource_type_list - except exception.Forbidden as e: - LOG.debug("User not permitted to retrieve metadata resource types " - "index") - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError(e) - return resource_types - - def show(self, req, namespace): - try: - filters = {'namespace': namespace} - rs_type_repo = self.gateway.get_metadef_resource_type_repo( - req.context) - db_resource_type_list = rs_type_repo.list(filters=filters) - resource_type_list = [ResourceTypeAssociation.to_wsme_model( - resource_type) for resource_type in db_resource_type_list] - resource_types = ResourceTypeAssociations() - resource_types.resource_type_associations = resource_type_list - except exception.Forbidden as e: - LOG.debug("User not permitted to retrieve metadata resource types " - "within '%s' namespace", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError(e) - return resource_types - - def create(self, req, resource_type, namespace): - rs_type_factory = self.gateway.get_metadef_resource_type_factory( - req.context) - rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context) - try: - new_resource_type = rs_type_factory.new_resource_type( - namespace=namespace, **resource_type.to_dict()) - rs_type_repo.add(new_resource_type) - - except exception.Forbidden as e: - LOG.debug("User not permitted to create metadata resource type " - "within '%s' namespace", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Duplicate as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - return ResourceTypeAssociation.to_wsme_model(new_resource_type) - - def delete(self, req, namespace, resource_type): - rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context) - try: - filters = {} - found = False - filters['namespace'] = namespace - db_resource_type_list = rs_type_repo.list(filters=filters) - for db_resource_type in db_resource_type_list: - if db_resource_type.name == resource_type: - db_resource_type.delete() - rs_type_repo.remove(db_resource_type) - found = True - if not found: - raise exception.NotFound() - except exception.Forbidden as e: - LOG.debug("User not permitted to delete metadata resource type " - "'%s' within '%s' namespace", resource_type, namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - msg = (_("Failed to find resource type %(resourcetype)s to " - "delete") % {'resourcetype': resource_type}) - LOG.error(msg) - raise webob.exc.HTTPNotFound(explanation=msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - -class RequestDeserializer(wsgi.JSONRequestDeserializer): - _disallowed_properties = ['created_at', 'updated_at'] - - def __init__(self, schema=None): - super(RequestDeserializer, self).__init__() - self.schema = schema or get_schema() - - def _get_request_body(self, request): - output = super(RequestDeserializer, self).default(request) - if 'body' not in output: - msg = _('Body expected in request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - return output['body'] - - @classmethod - def _check_allowed(cls, image): - for key in cls._disallowed_properties: - if key in image: - msg = _("Attribute '%s' is read-only.") % key - raise webob.exc.HTTPForbidden(explanation=msg) - - def create(self, request): - body = self._get_request_body(request) - self._check_allowed(body) - try: - self.schema.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - resource_type = json.fromjson(ResourceTypeAssociation, body) - return dict(resource_type=resource_type) - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - def __init__(self, schema=None): - super(ResponseSerializer, self).__init__() - self.schema = schema - - def show(self, response, result): - resource_type_json = json.tojson(ResourceTypeAssociations, result) - body = jsonutils.dumps(resource_type_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def index(self, response, result): - resource_type_json = json.tojson(ResourceTypes, result) - body = jsonutils.dumps(resource_type_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def create(self, response, result): - resource_type_json = json.tojson(ResourceTypeAssociation, result) - response.status_int = http.CREATED - body = jsonutils.dumps(resource_type_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def delete(self, response, result): - response.status_int = http.NO_CONTENT - - -def _get_base_properties(): - return { - 'name': { - 'type': 'string', - 'description': _('Resource type names should be aligned with Heat ' - 'resource types whenever possible: ' - 'http://docs.openstack.org/developer/heat/' - 'template_guide/openstack.html'), - 'maxLength': 80, - }, - 'prefix': { - 'type': 'string', - 'description': _('Specifies the prefix to use for the given ' - 'resource type. Any properties in the namespace ' - 'should be prefixed with this prefix when being ' - 'applied to the specified resource type. Must ' - 'include prefix separator (e.g. a colon :).'), - 'maxLength': 80, - }, - 'properties_target': { - 'type': 'string', - 'description': _('Some resource types allow more than one key / ' - 'value pair per instance. For example, Cinder ' - 'allows user and image metadata on volumes. Only ' - 'the image properties metadata is evaluated by ' - 'Nova (scheduling or drivers). This property ' - 'allows a namespace target to remove the ' - 'ambiguity.'), - 'maxLength': 80, - }, - "created_at": { - "type": "string", - "readOnly": True, - "description": _("Date and time of resource type association"), - "format": "date-time" - }, - "updated_at": { - "type": "string", - "readOnly": True, - "description": _("Date and time of the last resource type " - "association modification"), - "format": "date-time" - } - } - - -def get_schema(): - properties = _get_base_properties() - mandatory_attrs = ResourceTypeAssociation.get_mandatory_attrs() - schema = glance.schema.Schema( - 'resource_type_association', - properties, - required=mandatory_attrs, - ) - return schema - - -def get_collection_schema(): - resource_type_schema = get_schema() - return glance.schema.CollectionSchema('resource_type_associations', - resource_type_schema) - - -def create_resource(): - """ResourceTypeAssociation resource factory method""" - schema = get_schema() - deserializer = RequestDeserializer(schema) - serializer = ResponseSerializer(schema) - controller = ResourceTypeController() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/api/v2/metadef_tags.py b/glance/api/v2/metadef_tags.py deleted file mode 100644 index 1ebb31cc..00000000 --- a/glance/api/v2/metadef_tags.py +++ /dev/null @@ -1,411 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import six -from six.moves import http_client as http -import webob.exc -from wsme.rest import json - -from glance.api import policy -from glance.api.v2.model.metadef_tag import MetadefTag -from glance.api.v2.model.metadef_tag import MetadefTags -from glance.common import exception -from glance.common import wsgi -from glance.common import wsme_utils -import glance.db -from glance.i18n import _ -import glance.notifier -import glance.schema - -LOG = logging.getLogger(__name__) - - -class TagsController(object): - def __init__(self, db_api=None, policy_enforcer=None, notifier=None, - schema=None): - self.db_api = db_api or glance.db.get_api() - self.policy = policy_enforcer or policy.Enforcer() - self.notifier = notifier or glance.notifier.Notifier() - self.gateway = glance.gateway.Gateway(db_api=self.db_api, - notifier=self.notifier, - policy_enforcer=self.policy) - self.schema = schema or get_schema() - self.tag_schema_link = '/v2/schemas/metadefs/tag' - - def create(self, req, namespace, tag_name): - tag_factory = self.gateway.get_metadef_tag_factory(req.context) - tag_repo = self.gateway.get_metadef_tag_repo(req.context) - tag_name_as_dict = {'name': tag_name} - try: - self.schema.validate(tag_name_as_dict) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - try: - new_meta_tag = tag_factory.new_tag( - namespace=namespace, - **tag_name_as_dict) - tag_repo.add(new_meta_tag) - except exception.Invalid as e: - msg = (_("Couldn't create metadata tag: %s") - % encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to create metadata tag within " - "'%s' namespace", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Duplicate as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - return MetadefTag.to_wsme_model(new_meta_tag) - - def create_tags(self, req, metadata_tags, namespace): - tag_factory = self.gateway.get_metadef_tag_factory(req.context) - tag_repo = self.gateway.get_metadef_tag_repo(req.context) - try: - tag_list = [] - for metadata_tag in metadata_tags.tags: - tag_list.append(tag_factory.new_tag( - namespace=namespace, **metadata_tag.to_dict())) - tag_repo.add_tags(tag_list) - tag_list_out = [MetadefTag(**{'name': db_metatag.name}) - for db_metatag in tag_list] - metadef_tags = MetadefTags() - metadef_tags.tags = tag_list_out - except exception.Forbidden as e: - LOG.debug("User not permitted to create metadata tags within " - "'%s' namespace", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Duplicate as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - return metadef_tags - - def index(self, req, namespace, marker=None, limit=None, - sort_key='created_at', sort_dir='desc', filters=None): - try: - filters = filters or dict() - filters['namespace'] = namespace - - tag_repo = self.gateway.get_metadef_tag_repo(req.context) - if marker: - metadef_tag = tag_repo.get(namespace, marker) - marker = metadef_tag.tag_id - - db_metatag_list = tag_repo.list( - marker=marker, limit=limit, sort_key=sort_key, - sort_dir=sort_dir, filters=filters) - - tag_list = [MetadefTag(**{'name': db_metatag.name}) - for db_metatag in db_metatag_list] - - metadef_tags = MetadefTags() - metadef_tags.tags = tag_list - except exception.Forbidden as e: - LOG.debug("User not permitted to retrieve metadata tags " - "within '%s' namespace", namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - return metadef_tags - - def show(self, req, namespace, tag_name): - meta_tag_repo = self.gateway.get_metadef_tag_repo(req.context) - try: - metadef_tag = meta_tag_repo.get(namespace, tag_name) - return MetadefTag.to_wsme_model(metadef_tag) - except exception.Forbidden as e: - LOG.debug("User not permitted to show metadata tag '%s' " - "within '%s' namespace", tag_name, namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - def update(self, req, metadata_tag, namespace, tag_name): - meta_repo = self.gateway.get_metadef_tag_repo(req.context) - try: - metadef_tag = meta_repo.get(namespace, tag_name) - metadef_tag._old_name = metadef_tag.name - metadef_tag.name = wsme_utils._get_value( - metadata_tag.name) - updated_metadata_tag = meta_repo.save(metadef_tag) - except exception.Invalid as e: - msg = (_("Couldn't update metadata tag: %s") - % encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.Forbidden as e: - LOG.debug("User not permitted to update metadata tag '%s' " - "within '%s' namespace", tag_name, namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Duplicate as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - return MetadefTag.to_wsme_model(updated_metadata_tag) - - def delete(self, req, namespace, tag_name): - meta_repo = self.gateway.get_metadef_tag_repo(req.context) - try: - metadef_tag = meta_repo.get(namespace, tag_name) - metadef_tag.delete() - meta_repo.remove(metadef_tag) - except exception.Forbidden as e: - LOG.debug("User not permitted to delete metadata tag '%s' " - "within '%s' namespace", tag_name, namespace) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPInternalServerError() - - -def _get_base_definitions(): - return None - - -def _get_base_properties(): - return { - "name": { - "type": "string", - "maxLength": 255 - }, - "created_at": { - "type": "string", - "readOnly": True, - "description": _("Date and time of tag creation"), - "format": "date-time" - }, - "updated_at": { - "type": "string", - "readOnly": True, - "description": _("Date and time of the last tag modification"), - "format": "date-time" - } - } - - -def _get_base_properties_for_list(): - return { - "tags": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - }, - 'required': ['name'], - "additionalProperties": False - } - }, - } - - -def get_schema(): - definitions = _get_base_definitions() - properties = _get_base_properties() - mandatory_attrs = MetadefTag.get_mandatory_attrs() - schema = glance.schema.Schema( - 'tag', - properties, - required=mandatory_attrs, - definitions=definitions, - ) - return schema - - -def get_schema_for_list(): - definitions = _get_base_definitions() - properties = _get_base_properties_for_list() - schema = glance.schema.Schema( - 'tags', - properties, - required=None, - definitions=definitions, - ) - return schema - - -def get_collection_schema(): - tag_schema = get_schema() - return glance.schema.CollectionSchema('tags', tag_schema) - - -class RequestDeserializer(wsgi.JSONRequestDeserializer): - _disallowed_properties = ['created_at', 'updated_at'] - - def __init__(self, schema=None): - super(RequestDeserializer, self).__init__() - self.schema = schema or get_schema() - self.schema_for_list = get_schema_for_list() - - def _get_request_body(self, request): - output = super(RequestDeserializer, self).default(request) - if 'body' not in output: - msg = _('Body expected in request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - return output['body'] - - def _validate_sort_dir(self, sort_dir): - if sort_dir not in ['asc', 'desc']: - msg = _('Invalid sort direction: %s') % sort_dir - raise webob.exc.HTTPBadRequest(explanation=msg) - - return sort_dir - - def _get_filters(self, filters): - visibility = filters.get('visibility') - if visibility: - if visibility not in ['public', 'private', 'shared']: - msg = _('Invalid visibility value: %s') % visibility - raise webob.exc.HTTPBadRequest(explanation=msg) - - return filters - - def _validate_limit(self, limit): - try: - limit = int(limit) - except ValueError: - msg = _("limit param must be an integer") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if limit < 0: - msg = _("limit param must be positive") - raise webob.exc.HTTPBadRequest(explanation=msg) - - return limit - - def update(self, request): - body = self._get_request_body(request) - self._check_allowed(body) - try: - self.schema.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - metadata_tag = json.fromjson(MetadefTag, body) - return dict(metadata_tag=metadata_tag) - - def index(self, request): - params = request.params.copy() - limit = params.pop('limit', None) - marker = params.pop('marker', None) - sort_dir = params.pop('sort_dir', 'desc') - - query_params = { - 'sort_key': params.pop('sort_key', 'created_at'), - 'sort_dir': self._validate_sort_dir(sort_dir), - 'filters': self._get_filters(params) - } - - if marker: - query_params['marker'] = marker - - if limit: - query_params['limit'] = self._validate_limit(limit) - - return query_params - - def create_tags(self, request): - body = self._get_request_body(request) - self._check_allowed(body) - try: - self.schema_for_list.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - metadata_tags = json.fromjson(MetadefTags, body) - return dict(metadata_tags=metadata_tags) - - @classmethod - def _check_allowed(cls, image): - for key in cls._disallowed_properties: - if key in image: - msg = _("Attribute '%s' is read-only.") % key - raise webob.exc.HTTPForbidden(explanation=msg) - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - def __init__(self, schema=None): - super(ResponseSerializer, self).__init__() - self.schema = schema or get_schema() - - def create(self, response, metadata_tag): - response.status_int = http.CREATED - self.show(response, metadata_tag) - - def create_tags(self, response, result): - response.status_int = http.CREATED - metadata_tags_json = json.tojson(MetadefTags, result) - body = jsonutils.dumps(metadata_tags_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def show(self, response, metadata_tag): - metadata_tag_json = json.tojson(MetadefTag, metadata_tag) - body = jsonutils.dumps(metadata_tag_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def update(self, response, metadata_tag): - response.status_int = http.OK - self.show(response, metadata_tag) - - def index(self, response, result): - metadata_tags_json = json.tojson(MetadefTags, result) - body = jsonutils.dumps(metadata_tags_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def delete(self, response, result): - response.status_int = http.NO_CONTENT - - -def get_tag_href(namespace_name, metadef_tag): - base_href = ('/v2/metadefs/namespaces/%s/tags/%s' % - (namespace_name, metadef_tag.name)) - return base_href - - -def create_resource(): - """Metadef tags resource factory method""" - schema = get_schema() - deserializer = RequestDeserializer(schema) - serializer = ResponseSerializer(schema) - controller = TagsController() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/api/v2/model/__init__.py b/glance/api/v2/model/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/api/v2/model/metadef_namespace.py b/glance/api/v2/model/metadef_namespace.py deleted file mode 100644 index 64006366..00000000 --- a/glance/api/v2/model/metadef_namespace.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import wsme -from wsme.rest import json -from wsme import types - -from glance.api.v2.model.metadef_object import MetadefObject -from glance.api.v2.model.metadef_property_type import PropertyType -from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation -from glance.api.v2.model.metadef_tag import MetadefTag -from glance.common.wsme_utils import WSMEModelTransformer - - -class Namespace(types.Base, WSMEModelTransformer): - - # Base fields - namespace = wsme.wsattr(types.text, mandatory=True) - display_name = wsme.wsattr(types.text, mandatory=False) - description = wsme.wsattr(types.text, mandatory=False) - visibility = wsme.wsattr(types.text, mandatory=False) - protected = wsme.wsattr(bool, mandatory=False) - owner = wsme.wsattr(types.text, mandatory=False) - - # Not using datetime since time format has to be - # in oslo_utils.timeutils.isotime() format - created_at = wsme.wsattr(types.text, mandatory=False) - updated_at = wsme.wsattr(types.text, mandatory=False) - - # Contained fields - resource_type_associations = wsme.wsattr([ResourceTypeAssociation], - mandatory=False) - properties = wsme.wsattr({types.text: PropertyType}, mandatory=False) - objects = wsme.wsattr([MetadefObject], mandatory=False) - tags = wsme.wsattr([MetadefTag], mandatory=False) - - # Generated fields - self = wsme.wsattr(types.text, mandatory=False) - schema = wsme.wsattr(types.text, mandatory=False) - - def __init__(cls, **kwargs): - super(Namespace, cls).__init__(**kwargs) - - @staticmethod - def to_model_properties(db_property_types): - property_types = {} - for db_property_type in db_property_types: - # Convert the persisted json schema to a dict of PropertyTypes - property_type = json.fromjson( - PropertyType, db_property_type.schema) - property_type_name = db_property_type.name - property_types[property_type_name] = property_type - - return property_types - - -class Namespaces(types.Base, WSMEModelTransformer): - - namespaces = wsme.wsattr([Namespace], mandatory=False) - - # Pagination - next = wsme.wsattr(types.text, mandatory=False) - schema = wsme.wsattr(types.text, mandatory=True) - first = wsme.wsattr(types.text, mandatory=True) - - def __init__(self, **kwargs): - super(Namespaces, self).__init__(**kwargs) diff --git a/glance/api/v2/model/metadef_object.py b/glance/api/v2/model/metadef_object.py deleted file mode 100644 index 3569cc65..00000000 --- a/glance/api/v2/model/metadef_object.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import wsme -from wsme import types - -from glance.api.v2.model.metadef_property_type import PropertyType -from glance.common.wsme_utils import WSMEModelTransformer - - -class MetadefObject(types.Base, WSMEModelTransformer): - - name = wsme.wsattr(types.text, mandatory=True) - required = wsme.wsattr([types.text], mandatory=False) - description = wsme.wsattr(types.text, mandatory=False) - properties = wsme.wsattr({types.text: PropertyType}, mandatory=False) - - # Not using datetime since time format has to be - # in oslo_utils.timeutils.isotime() format - created_at = wsme.wsattr(types.text, mandatory=False) - updated_at = wsme.wsattr(types.text, mandatory=False) - - # Generated fields - self = wsme.wsattr(types.text, mandatory=False) - schema = wsme.wsattr(types.text, mandatory=False) - - def __init__(cls, **kwargs): - super(MetadefObject, cls).__init__(**kwargs) - - -class MetadefObjects(types.Base, WSMEModelTransformer): - - objects = wsme.wsattr([MetadefObject], mandatory=False) - schema = wsme.wsattr(types.text, mandatory=True) - - def __init__(self, **kwargs): - super(MetadefObjects, self).__init__(**kwargs) diff --git a/glance/api/v2/model/metadef_property_item_type.py b/glance/api/v2/model/metadef_property_item_type.py deleted file mode 100644 index 228147a1..00000000 --- a/glance/api/v2/model/metadef_property_item_type.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import wsme -from wsme import types - - -class ItemType(types.Base): - type = wsme.wsattr(types.text, mandatory=True) - enum = wsme.wsattr([types.text], mandatory=False) - - _wsme_attr_order = ('type', 'enum') - - def __init__(self, **kwargs): - super(ItemType, self).__init__(**kwargs) diff --git a/glance/api/v2/model/metadef_property_type.py b/glance/api/v2/model/metadef_property_type.py deleted file mode 100644 index 12948a80..00000000 --- a/glance/api/v2/model/metadef_property_type.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import wsme -from wsme import types - -from glance.api.v2.model.metadef_property_item_type import ItemType -from glance.common.wsme_utils import WSMEModelTransformer - - -class PropertyType(types.Base, WSMEModelTransformer): - # When used in collection of PropertyTypes, name is a dictionary key - # and not included as separate field. - name = wsme.wsattr(types.text, mandatory=False) - - type = wsme.wsattr(types.text, mandatory=True) - title = wsme.wsattr(types.text, mandatory=True) - description = wsme.wsattr(types.text, mandatory=False) - operators = wsme.wsattr([types.text], mandatory=False) - default = wsme.wsattr(types.bytes, mandatory=False) - readonly = wsme.wsattr(bool, mandatory=False) - - # fields for type = string - minimum = wsme.wsattr(int, mandatory=False) - maximum = wsme.wsattr(int, mandatory=False) - enum = wsme.wsattr([types.text], mandatory=False) - pattern = wsme.wsattr(types.text, mandatory=False) - - # fields for type = integer, number - minLength = wsme.wsattr(int, mandatory=False) - maxLength = wsme.wsattr(int, mandatory=False) - confidential = wsme.wsattr(bool, mandatory=False) - - # fields for type = array - items = wsme.wsattr(ItemType, mandatory=False) - uniqueItems = wsme.wsattr(bool, mandatory=False) - minItems = wsme.wsattr(int, mandatory=False) - maxItems = wsme.wsattr(int, mandatory=False) - additionalItems = wsme.wsattr(bool, mandatory=False) - - def __init__(self, **kwargs): - super(PropertyType, self).__init__(**kwargs) - - -class PropertyTypes(types.Base, WSMEModelTransformer): - properties = wsme.wsattr({types.text: PropertyType}, mandatory=False) - - def __init__(self, **kwargs): - super(PropertyTypes, self).__init__(**kwargs) diff --git a/glance/api/v2/model/metadef_resource_type.py b/glance/api/v2/model/metadef_resource_type.py deleted file mode 100644 index 94fcb31e..00000000 --- a/glance/api/v2/model/metadef_resource_type.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import wsme -from wsme import types - -from glance.common.wsme_utils import WSMEModelTransformer - - -class ResourceTypeAssociation(types.Base, WSMEModelTransformer): - name = wsme.wsattr(types.text, mandatory=True) - prefix = wsme.wsattr(types.text, mandatory=False) - properties_target = wsme.wsattr(types.text, mandatory=False) - - # Not using datetime since time format has to be - # in oslo_utils.timeutils.isotime() format - created_at = wsme.wsattr(types.text, mandatory=False) - updated_at = wsme.wsattr(types.text, mandatory=False) - - def __init__(self, **kwargs): - super(ResourceTypeAssociation, self).__init__(**kwargs) - - -class ResourceTypeAssociations(types.Base, WSMEModelTransformer): - - resource_type_associations = wsme.wsattr([ResourceTypeAssociation], - mandatory=False) - - def __init__(self, **kwargs): - super(ResourceTypeAssociations, self).__init__(**kwargs) - - -class ResourceType(types.Base, WSMEModelTransformer): - name = wsme.wsattr(types.text, mandatory=True) - - # Not using datetime since time format has to be - # in oslo_utils.timeutils.isotime() format - created_at = wsme.wsattr(types.text, mandatory=False) - updated_at = wsme.wsattr(types.text, mandatory=False) - - def __init__(self, **kwargs): - super(ResourceType, self).__init__(**kwargs) - - -class ResourceTypes(types.Base, WSMEModelTransformer): - - resource_types = wsme.wsattr([ResourceType], mandatory=False) - - def __init__(self, **kwargs): - super(ResourceTypes, self).__init__(**kwargs) diff --git a/glance/api/v2/model/metadef_tag.py b/glance/api/v2/model/metadef_tag.py deleted file mode 100644 index f0fbcb2b..00000000 --- a/glance/api/v2/model/metadef_tag.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import wsme -from wsme import types - -from glance.common import wsme_utils - - -class MetadefTag(types.Base, wsme_utils.WSMEModelTransformer): - - name = wsme.wsattr(types.text, mandatory=True) - - # Not using datetime since time format has to be - # in oslo_utils.timeutils.isotime() format - created_at = wsme.wsattr(types.text, mandatory=False) - updated_at = wsme.wsattr(types.text, mandatory=False) - - -class MetadefTags(types.Base, wsme_utils.WSMEModelTransformer): - - tags = wsme.wsattr([MetadefTag], mandatory=False) diff --git a/glance/api/v2/router.py b/glance/api/v2/router.py deleted file mode 100644 index a398a7c8..00000000 --- a/glance/api/v2/router.py +++ /dev/null @@ -1,569 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.api.v2 import discovery -from glance.api.v2 import image_actions -from glance.api.v2 import image_data -from glance.api.v2 import image_members -from glance.api.v2 import image_tags -from glance.api.v2 import images -from glance.api.v2 import metadef_namespaces -from glance.api.v2 import metadef_objects -from glance.api.v2 import metadef_properties -from glance.api.v2 import metadef_resource_types -from glance.api.v2 import metadef_tags -from glance.api.v2 import schemas -from glance.api.v2 import tasks -from glance.common import wsgi - - -class API(wsgi.Router): - - """WSGI router for Glance v2 API requests.""" - - def __init__(self, mapper): - custom_image_properties = images.load_custom_properties() - reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) - - schemas_resource = schemas.create_resource(custom_image_properties) - mapper.connect('/schemas/image', - controller=schemas_resource, - action='image', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/image', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - mapper.connect('/schemas/images', - controller=schemas_resource, - action='images', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/images', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - mapper.connect('/schemas/member', - controller=schemas_resource, - action='member', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/member', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/schemas/members', - controller=schemas_resource, - action='members', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/members', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/schemas/task', - controller=schemas_resource, - action='task', - conditions={'method': ['GET']}) - mapper.connect('/schemas/task', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - mapper.connect('/schemas/tasks', - controller=schemas_resource, - action='tasks', - conditions={'method': ['GET']}) - mapper.connect('/schemas/tasks', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/schemas/metadefs/namespace', - controller=schemas_resource, - action='metadef_namespace', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/metadefs/namespace', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/schemas/metadefs/namespaces', - controller=schemas_resource, - action='metadef_namespaces', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/metadefs/namespaces', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/schemas/metadefs/resource_type', - controller=schemas_resource, - action='metadef_resource_type', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/metadefs/resource_type', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/schemas/metadefs/resource_types', - controller=schemas_resource, - action='metadef_resource_types', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/metadefs/resource_types', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/schemas/metadefs/property', - controller=schemas_resource, - action='metadef_property', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/metadefs/property', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/schemas/metadefs/properties', - controller=schemas_resource, - action='metadef_properties', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/metadefs/properties', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/schemas/metadefs/object', - controller=schemas_resource, - action='metadef_object', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/metadefs/object', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/schemas/metadefs/objects', - controller=schemas_resource, - action='metadef_objects', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/metadefs/objects', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/schemas/metadefs/tag', - controller=schemas_resource, - action='metadef_tag', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/metadefs/tag', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/schemas/metadefs/tags', - controller=schemas_resource, - action='metadef_tags', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/schemas/metadefs/tags', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - # Metadef resource types - metadef_resource_types_resource = ( - metadef_resource_types.create_resource()) - - mapper.connect('/metadefs/resource_types', - controller=metadef_resource_types_resource, - action='index', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/metadefs/resource_types', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - mapper.connect('/metadefs/namespaces/{namespace}/resource_types', - controller=metadef_resource_types_resource, - action='show', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/metadefs/namespaces/{namespace}/resource_types', - controller=metadef_resource_types_resource, - action='create', - conditions={'method': ['POST']}) - mapper.connect('/metadefs/namespaces/{namespace}/resource_types', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, POST') - - mapper.connect('/metadefs/namespaces/{namespace}/resource_types/' - '{resource_type}', - controller=metadef_resource_types_resource, - action='delete', - conditions={'method': ['DELETE']}, - body_reject=True) - mapper.connect('/metadefs/namespaces/{namespace}/resource_types/' - '{resource_type}', - controller=reject_method_resource, - action='reject', - allowed_methods='DELETE') - - # Metadef Namespaces - metadef_namespace_resource = metadef_namespaces.create_resource() - mapper.connect('/metadefs/namespaces', - controller=metadef_namespace_resource, - action='index', - conditions={'method': ['GET']}) - mapper.connect('/metadefs/namespaces', - controller=metadef_namespace_resource, - action='create', - conditions={'method': ['POST']}) - mapper.connect('/metadefs/namespaces', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, POST') - - mapper.connect('/metadefs/namespaces/{namespace}', - controller=metadef_namespace_resource, - action='show', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/metadefs/namespaces/{namespace}', - controller=metadef_namespace_resource, - action='update', - conditions={'method': ['PUT']}) - mapper.connect('/metadefs/namespaces/{namespace}', - controller=metadef_namespace_resource, - action='delete', - conditions={'method': ['DELETE']}, - body_reject=True) - mapper.connect('/metadefs/namespaces/{namespace}', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, PUT, DELETE') - - # Metadef namespace properties - metadef_properties_resource = metadef_properties.create_resource() - mapper.connect('/metadefs/namespaces/{namespace}/properties', - controller=metadef_properties_resource, - action='index', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/metadefs/namespaces/{namespace}/properties', - controller=metadef_properties_resource, - action='create', - conditions={'method': ['POST']}) - mapper.connect('/metadefs/namespaces/{namespace}/properties', - controller=metadef_namespace_resource, - action='delete_properties', - conditions={'method': ['DELETE']}) - mapper.connect('/metadefs/namespaces/{namespace}/properties', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, POST, DELETE') - - mapper.connect('/metadefs/namespaces/{namespace}/properties/{' - 'property_name}', - controller=metadef_properties_resource, - action='show', - conditions={'method': ['GET']}) - mapper.connect('/metadefs/namespaces/{namespace}/properties/{' - 'property_name}', - controller=metadef_properties_resource, - action='update', - conditions={'method': ['PUT']}) - mapper.connect('/metadefs/namespaces/{namespace}/properties/{' - 'property_name}', - controller=metadef_properties_resource, - action='delete', - conditions={'method': ['DELETE']}) - mapper.connect('/metadefs/namespaces/{namespace}/properties/{' - 'property_name}', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, PUT, DELETE') - - # Metadef objects - metadef_objects_resource = metadef_objects.create_resource() - mapper.connect('/metadefs/namespaces/{namespace}/objects', - controller=metadef_objects_resource, - action='index', - conditions={'method': ['GET']}) - mapper.connect('/metadefs/namespaces/{namespace}/objects', - controller=metadef_objects_resource, - action='create', - conditions={'method': ['POST']}) - mapper.connect('/metadefs/namespaces/{namespace}/objects', - controller=metadef_namespace_resource, - action='delete_objects', - conditions={'method': ['DELETE']}) - mapper.connect('/metadefs/namespaces/{namespace}/objects', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, POST, DELETE') - - mapper.connect('/metadefs/namespaces/{namespace}/objects/{' - 'object_name}', - controller=metadef_objects_resource, - action='show', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/metadefs/namespaces/{namespace}/objects/{' - 'object_name}', - controller=metadef_objects_resource, - action='update', - conditions={'method': ['PUT']}) - mapper.connect('/metadefs/namespaces/{namespace}/objects/{' - 'object_name}', - controller=metadef_objects_resource, - action='delete', - conditions={'method': ['DELETE']}, - body_reject=True) - mapper.connect('/metadefs/namespaces/{namespace}/objects/{' - 'object_name}', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, PUT, DELETE') - - # Metadef tags - metadef_tags_resource = metadef_tags.create_resource() - mapper.connect('/metadefs/namespaces/{namespace}/tags', - controller=metadef_tags_resource, - action='index', - conditions={'method': ['GET']}) - mapper.connect('/metadefs/namespaces/{namespace}/tags', - controller=metadef_tags_resource, - action='create_tags', - conditions={'method': ['POST']}) - mapper.connect('/metadefs/namespaces/{namespace}/tags', - controller=metadef_namespace_resource, - action='delete_tags', - conditions={'method': ['DELETE']}) - mapper.connect('/metadefs/namespaces/{namespace}/tags', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, POST, DELETE') - - mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', - controller=metadef_tags_resource, - action='show', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', - controller=metadef_tags_resource, - action='create', - conditions={'method': ['POST']}, - body_reject=True) - mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', - controller=metadef_tags_resource, - action='update', - conditions={'method': ['PUT']}) - mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', - controller=metadef_tags_resource, - action='delete', - conditions={'method': ['DELETE']}, - body_reject=True) - mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, POST, PUT, DELETE') - - images_resource = images.create_resource(custom_image_properties) - mapper.connect('/images', - controller=images_resource, - action='index', - conditions={'method': ['GET']}) - mapper.connect('/images', - controller=images_resource, - action='create', - conditions={'method': ['POST']}) - mapper.connect('/images', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, POST') - - mapper.connect('/images/{image_id}', - controller=images_resource, - action='update', - conditions={'method': ['PATCH']}) - mapper.connect('/images/{image_id}', - controller=images_resource, - action='show', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/images/{image_id}', - controller=images_resource, - action='delete', - conditions={'method': ['DELETE']}, - body_reject=True) - mapper.connect('/images/{image_id}', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, PATCH, DELETE') - mapper.connect('/images/{image_id}/import', - controller=images_resource, - action='import_image', - conditions={'method': ['POST']}) - mapper.connect('/images/{image_id}/import', - controller=reject_method_resource, - action='reject', - allowed_methods='POST') - - image_actions_resource = image_actions.create_resource() - mapper.connect('/images/{image_id}/actions/deactivate', - controller=image_actions_resource, - action='deactivate', - conditions={'method': ['POST']}, - body_reject=True) - mapper.connect('/images/{image_id}/actions/reactivate', - controller=image_actions_resource, - action='reactivate', - conditions={'method': ['POST']}, - body_reject=True) - mapper.connect('/images/{image_id}/actions/deactivate', - controller=reject_method_resource, - action='reject', - allowed_methods='POST') - mapper.connect('/images/{image_id}/actions/reactivate', - controller=reject_method_resource, - action='reject', - allowed_methods='POST') - - image_data_resource = image_data.create_resource() - mapper.connect('/images/{image_id}/file', - controller=image_data_resource, - action='download', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/images/{image_id}/file', - controller=image_data_resource, - action='upload', - conditions={'method': ['PUT']}) - mapper.connect('/images/{image_id}/file', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, PUT') - mapper.connect('/images/{image_id}/stage', - controller=image_data_resource, - action='stage', - conditions={'method': ['PUT']}) - mapper.connect('/images/{image_id}/stage', - controller=reject_method_resource, - action='reject', - allowed_methods='PUT') - - image_tags_resource = image_tags.create_resource() - mapper.connect('/images/{image_id}/tags/{tag_value}', - controller=image_tags_resource, - action='update', - conditions={'method': ['PUT']}, - body_reject=True) - mapper.connect('/images/{image_id}/tags/{tag_value}', - controller=image_tags_resource, - action='delete', - conditions={'method': ['DELETE']}, - body_reject=True) - mapper.connect('/images/{image_id}/tags/{tag_value}', - controller=reject_method_resource, - action='reject', - allowed_methods='PUT, DELETE') - - image_members_resource = image_members.create_resource() - mapper.connect('/images/{image_id}/members', - controller=image_members_resource, - action='index', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/images/{image_id}/members', - controller=image_members_resource, - action='create', - conditions={'method': ['POST']}) - mapper.connect('/images/{image_id}/members', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, POST') - - mapper.connect('/images/{image_id}/members/{member_id}', - controller=image_members_resource, - action='show', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('/images/{image_id}/members/{member_id}', - controller=image_members_resource, - action='update', - conditions={'method': ['PUT']}) - mapper.connect('/images/{image_id}/members/{member_id}', - controller=image_members_resource, - action='delete', - conditions={'method': ['DELETE']}, - body_reject=True) - mapper.connect('/images/{image_id}/members/{member_id}', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, PUT, DELETE') - - tasks_resource = tasks.create_resource() - mapper.connect('/tasks', - controller=tasks_resource, - action='create', - conditions={'method': ['POST']}) - mapper.connect('/tasks', - controller=tasks_resource, - action='index', - conditions={'method': ['GET']}) - mapper.connect('/tasks', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, POST') - - mapper.connect('/tasks/{task_id}', - controller=tasks_resource, - action='get', - conditions={'method': ['GET']}) - mapper.connect('/tasks/{task_id}', - controller=tasks_resource, - action='delete', - conditions={'method': ['DELETE']}) - mapper.connect('/tasks/{task_id}', - controller=reject_method_resource, - action='reject', - allowed_methods='GET, DELETE') - - # Discovery API - info_resource = discovery.create_resource() - mapper.connect('info/import', - controller=info_resource, - action='get_image_import', - conditions={'method': ['GET']}, - body_reject=True) - mapper.connect('info/import', - controller=reject_method_resource, - action='reject', - allowed_methods='GET') - - super(API, self).__init__(mapper) diff --git a/glance/api/v2/schemas.py b/glance/api/v2/schemas.py deleted file mode 100644 index 75039209..00000000 --- a/glance/api/v2/schemas.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.api.v2 import image_members -from glance.api.v2 import images -from glance.api.v2 import metadef_namespaces -from glance.api.v2 import metadef_objects -from glance.api.v2 import metadef_properties -from glance.api.v2 import metadef_resource_types -from glance.api.v2 import metadef_tags -from glance.api.v2 import tasks -from glance.common import wsgi - - -class Controller(object): - def __init__(self, custom_image_properties=None): - self.image_schema = images.get_schema(custom_image_properties) - self.image_collection_schema = images.get_collection_schema( - custom_image_properties) - self.member_schema = image_members.get_schema() - self.member_collection_schema = image_members.get_collection_schema() - self.task_schema = tasks.get_task_schema() - self.task_collection_schema = tasks.get_collection_schema() - - # Metadef schemas - self.metadef_namespace_schema = metadef_namespaces.get_schema() - self.metadef_namespace_collection_schema = ( - metadef_namespaces.get_collection_schema()) - - self.metadef_resource_type_schema = metadef_resource_types.get_schema() - self.metadef_resource_type_collection_schema = ( - metadef_resource_types.get_collection_schema()) - - self.metadef_property_schema = metadef_properties.get_schema() - self.metadef_property_collection_schema = ( - metadef_properties.get_collection_schema()) - - self.metadef_object_schema = metadef_objects.get_schema() - self.metadef_object_collection_schema = ( - metadef_objects.get_collection_schema()) - - self.metadef_tag_schema = metadef_tags.get_schema() - self.metadef_tag_collection_schema = ( - metadef_tags.get_collection_schema()) - - def image(self, req): - return self.image_schema.raw() - - def images(self, req): - return self.image_collection_schema.raw() - - def member(self, req): - return self.member_schema.minimal() - - def members(self, req): - return self.member_collection_schema.minimal() - - def task(self, req): - return self.task_schema.minimal() - - def tasks(self, req): - return self.task_collection_schema.minimal() - - def metadef_namespace(self, req): - return self.metadef_namespace_schema.raw() - - def metadef_namespaces(self, req): - return self.metadef_namespace_collection_schema.raw() - - def metadef_resource_type(self, req): - return self.metadef_resource_type_schema.raw() - - def metadef_resource_types(self, req): - return self.metadef_resource_type_collection_schema.raw() - - def metadef_property(self, req): - return self.metadef_property_schema.raw() - - def metadef_properties(self, req): - return self.metadef_property_collection_schema.raw() - - def metadef_object(self, req): - return self.metadef_object_schema.raw() - - def metadef_objects(self, req): - return self.metadef_object_collection_schema.raw() - - def metadef_tag(self, req): - return self.metadef_tag_schema.raw() - - def metadef_tags(self, req): - return self.metadef_tag_collection_schema.raw() - - -def create_resource(custom_image_properties=None): - controller = Controller(custom_image_properties) - return wsgi.Resource(controller) diff --git a/glance/api/v2/tasks.py b/glance/api/v2/tasks.py deleted file mode 100644 index fe11695e..00000000 --- a/glance/api/v2/tasks.py +++ /dev/null @@ -1,418 +0,0 @@ -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import debtcollector -import glance_store -from oslo_config import cfg -from oslo_log import log as logging -import oslo_serialization.jsonutils as json -from oslo_utils import encodeutils -from oslo_utils import uuidutils -import six -from six.moves import http_client as http -import six.moves.urllib.parse as urlparse -import webob.exc - -from glance.api import common -from glance.api import policy -from glance.common import exception -from glance.common import timeutils -from glance.common import wsgi -import glance.db -import glance.gateway -from glance.i18n import _, _LW -import glance.notifier -import glance.schema - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF -CONF.import_opt('task_time_to_live', 'glance.common.config', group='task') - -_DEPRECATION_MESSAGE = ("The task API is being deprecated and " - "it will be superseded by the new image import " - "API. Please refer to this link for more " - "information about the aforementioned process: " - "https://specs.openstack.org/openstack/glance-specs/" - "specs/mitaka/approved/image-import/" - "image-import-refactor.html") - - -class TasksController(object): - """Manages operations on tasks.""" - - def __init__(self, db_api=None, policy_enforcer=None, notifier=None, - store_api=None): - self.db_api = db_api or glance.db.get_api() - self.policy = policy_enforcer or policy.Enforcer() - self.notifier = notifier or glance.notifier.Notifier() - self.store_api = store_api or glance_store - self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, - self.notifier, self.policy) - - @debtcollector.removals.remove(message=_DEPRECATION_MESSAGE) - def create(self, req, task): - task_factory = self.gateway.get_task_factory(req.context) - executor_factory = self.gateway.get_task_executor_factory(req.context) - task_repo = self.gateway.get_task_repo(req.context) - try: - new_task = task_factory.new_task(task_type=task['type'], - owner=req.context.owner, - task_input=task['input']) - task_repo.add(new_task) - task_executor = executor_factory.new_task_executor(req.context) - pool = common.get_thread_pool("tasks_eventlet_pool") - pool.spawn_n(new_task.run, task_executor) - except exception.Forbidden as e: - msg = (_LW("Forbidden to create task. Reason: %(reason)s") - % {'reason': encodeutils.exception_to_unicode(e)}) - LOG.warn(msg) - raise webob.exc.HTTPForbidden(explanation=e.msg) - return new_task - - @debtcollector.removals.remove(message=_DEPRECATION_MESSAGE) - def index(self, req, marker=None, limit=None, sort_key='created_at', - sort_dir='desc', filters=None): - result = {} - if filters is None: - filters = {} - filters['deleted'] = False - - if limit is None: - limit = CONF.limit_param_default - limit = min(CONF.api_limit_max, limit) - - task_repo = self.gateway.get_task_stub_repo(req.context) - try: - tasks = task_repo.list(marker, limit, sort_key, - sort_dir, filters) - if len(tasks) != 0 and len(tasks) == limit: - result['next_marker'] = tasks[-1].task_id - except (exception.NotFound, exception.InvalidSortKey, - exception.InvalidFilterRangeValue) as e: - LOG.warn(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.Forbidden as e: - LOG.warn(encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPForbidden(explanation=e.msg) - result['tasks'] = tasks - return result - - @debtcollector.removals.remove(message=_DEPRECATION_MESSAGE) - def get(self, req, task_id): - try: - task_repo = self.gateway.get_task_repo(req.context) - task = task_repo.get(task_id) - except exception.NotFound as e: - msg = (_LW("Failed to find task %(task_id)s. Reason: %(reason)s") - % {'task_id': task_id, - 'reason': encodeutils.exception_to_unicode(e)}) - LOG.warn(msg) - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Forbidden as e: - msg = (_LW("Forbidden to get task %(task_id)s. Reason:" - " %(reason)s") - % {'task_id': task_id, - 'reason': encodeutils.exception_to_unicode(e)}) - LOG.warn(msg) - raise webob.exc.HTTPForbidden(explanation=e.msg) - return task - - @debtcollector.removals.remove(message=_DEPRECATION_MESSAGE) - def delete(self, req, task_id): - msg = (_("This operation is currently not permitted on Glance Tasks. " - "They are auto deleted after reaching the time based on " - "their expires_at property.")) - raise webob.exc.HTTPMethodNotAllowed(explanation=msg, - headers={'Allow': 'GET'}, - body_template='${explanation}') - - -class RequestDeserializer(wsgi.JSONRequestDeserializer): - _required_properties = ['type', 'input'] - - def _get_request_body(self, request): - output = super(RequestDeserializer, self).default(request) - if 'body' not in output: - msg = _('Body expected in request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - return output['body'] - - def _validate_sort_dir(self, sort_dir): - if sort_dir not in ['asc', 'desc']: - msg = _('Invalid sort direction: %s') % sort_dir - raise webob.exc.HTTPBadRequest(explanation=msg) - - return sort_dir - - def _get_filters(self, filters): - status = filters.get('status') - if status: - if status not in ['pending', 'processing', 'success', 'failure']: - msg = _('Invalid status value: %s') % status - raise webob.exc.HTTPBadRequest(explanation=msg) - - type = filters.get('type') - if type: - if type not in ['import']: - msg = _('Invalid type value: %s') % type - raise webob.exc.HTTPBadRequest(explanation=msg) - - return filters - - def _validate_marker(self, marker): - if marker and not uuidutils.is_uuid_like(marker): - msg = _('Invalid marker format') - raise webob.exc.HTTPBadRequest(explanation=msg) - return marker - - def _validate_limit(self, limit): - try: - limit = int(limit) - except ValueError: - msg = _("limit param must be an integer") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if limit < 0: - msg = _("limit param must be positive") - raise webob.exc.HTTPBadRequest(explanation=msg) - - return limit - - def _validate_create_body(self, body): - """Validate the body of task creating request""" - for param in self._required_properties: - if param not in body: - msg = _("Task '%s' is required") % param - raise webob.exc.HTTPBadRequest(explanation=msg) - - def __init__(self, schema=None): - super(RequestDeserializer, self).__init__() - self.schema = schema or get_task_schema() - - def create(self, request): - body = self._get_request_body(request) - self._validate_create_body(body) - try: - self.schema.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - task = {} - properties = body - for key in self._required_properties: - try: - task[key] = properties.pop(key) - except KeyError: - pass - return dict(task=task) - - def index(self, request): - params = request.params.copy() - limit = params.pop('limit', None) - marker = params.pop('marker', None) - sort_dir = params.pop('sort_dir', 'desc') - query_params = { - 'sort_key': params.pop('sort_key', 'created_at'), - 'sort_dir': self._validate_sort_dir(sort_dir), - 'filters': self._get_filters(params) - } - - if marker is not None: - query_params['marker'] = self._validate_marker(marker) - - if limit is not None: - query_params['limit'] = self._validate_limit(limit) - return query_params - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - def __init__(self, task_schema=None, partial_task_schema=None): - super(ResponseSerializer, self).__init__() - self.task_schema = task_schema or get_task_schema() - self.partial_task_schema = (partial_task_schema - or _get_partial_task_schema()) - - def _inject_location_header(self, response, task): - location = self._get_task_location(task) - if six.PY2: - location = location.encode('utf-8') - response.headers['Location'] = location - - def _get_task_location(self, task): - return '/v2/tasks/%s' % task.task_id - - def _format_task(self, schema, task): - task_view = { - 'id': task.task_id, - 'input': task.task_input, - 'type': task.type, - 'status': task.status, - 'owner': task.owner, - 'message': task.message, - 'result': task.result, - 'created_at': timeutils.isotime(task.created_at), - 'updated_at': timeutils.isotime(task.updated_at), - 'self': self._get_task_location(task), - 'schema': '/v2/schemas/task' - } - if task.expires_at: - task_view['expires_at'] = timeutils.isotime(task.expires_at) - task_view = schema.filter(task_view) # domain - return task_view - - def _format_task_stub(self, schema, task): - task_view = { - 'id': task.task_id, - 'type': task.type, - 'status': task.status, - 'owner': task.owner, - 'created_at': timeutils.isotime(task.created_at), - 'updated_at': timeutils.isotime(task.updated_at), - 'self': self._get_task_location(task), - 'schema': '/v2/schemas/task' - } - if task.expires_at: - task_view['expires_at'] = timeutils.isotime(task.expires_at) - task_view = schema.filter(task_view) # domain - return task_view - - def create(self, response, task): - response.status_int = http.CREATED - self._inject_location_header(response, task) - self.get(response, task) - - def get(self, response, task): - task_view = self._format_task(self.task_schema, task) - body = json.dumps(task_view, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def index(self, response, result): - params = dict(response.request.params) - params.pop('marker', None) - query = urlparse.urlencode(params) - body = { - 'tasks': [self._format_task_stub(self.partial_task_schema, task) - for task in result['tasks']], - 'first': '/v2/tasks', - 'schema': '/v2/schemas/tasks', - } - if query: - body['first'] = '%s?%s' % (body['first'], query) - if 'next_marker' in result: - params['marker'] = result['next_marker'] - next_query = urlparse.urlencode(params) - body['next'] = '/v2/tasks?%s' % next_query - response.unicode_body = six.text_type(json.dumps(body, - ensure_ascii=False)) - response.content_type = 'application/json' - - -_TASK_SCHEMA = { - "id": { - "description": _("An identifier for the task"), - "pattern": _('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' - '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), - "type": "string" - }, - "type": { - "description": _("The type of task represented by this content"), - "enum": [ - "import", - ], - "type": "string" - }, - "status": { - "description": _("The current status of this task"), - "enum": [ - "pending", - "processing", - "success", - "failure" - ], - "type": "string" - }, - "input": { - "description": _("The parameters required by task, JSON blob"), - "type": ["null", "object"], - }, - "result": { - "description": _("The result of current task, JSON blob"), - "type": ["null", "object"], - }, - "owner": { - "description": _("An identifier for the owner of this task"), - "type": "string" - }, - "message": { - "description": _("Human-readable informative message only included" - " when appropriate (usually on failure)"), - "type": "string", - }, - "expires_at": { - "description": _("Datetime when this resource would be" - " subject to removal"), - "type": ["null", "string"] - }, - "created_at": { - "description": _("Datetime when this resource was created"), - "type": "string" - }, - "updated_at": { - "description": _("Datetime when this resource was updated"), - "type": "string" - }, - 'self': { - 'readOnly': True, - 'type': 'string' - }, - 'schema': { - 'readOnly': True, - 'type': 'string' - } -} - - -def get_task_schema(): - properties = copy.deepcopy(_TASK_SCHEMA) - schema = glance.schema.Schema('task', properties) - return schema - - -def _get_partial_task_schema(): - properties = copy.deepcopy(_TASK_SCHEMA) - hide_properties = ['input', 'result', 'message'] - for key in hide_properties: - del properties[key] - schema = glance.schema.Schema('task', properties) - return schema - - -def get_collection_schema(): - task_schema = _get_partial_task_schema() - return glance.schema.CollectionSchema('tasks', task_schema) - - -def create_resource(): - """Task resource factory method""" - task_schema = get_task_schema() - partial_task_schema = _get_partial_task_schema() - deserializer = RequestDeserializer(task_schema) - serializer = ResponseSerializer(task_schema, partial_task_schema) - controller = TasksController() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/api/versions.py b/glance/api/versions.py deleted file mode 100644 index 8bcc9fd2..00000000 --- a/glance/api/versions.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from six.moves import http_client -import webob.dec - -from glance.common import wsgi -from glance.i18n import _, _LW - - -versions_opts = [ - cfg.StrOpt('public_endpoint', - help=_(""" -Public url endpoint to use for Glance versions response. - -This is the public url endpoint that will appear in the Glance -"versions" response. If no value is specified, the endpoint that is -displayed in the version's response is that of the host running the -API service. Change the endpoint to represent the proxy URL if the -API service is running behind a proxy. If the service is running -behind a load balancer, add the load balancer's URL for this value. - -Possible values: - * None - * Proxy URL - * Load balancer URL - -Related options: - * None - -""")), -] - -CONF = cfg.CONF -CONF.register_opts(versions_opts) - -LOG = logging.getLogger(__name__) - - -class Controller(object): - - """A wsgi controller that reports which API versions are supported.""" - - def index(self, req, explicit=False): - """Respond to a request for all OpenStack API versions.""" - def build_version_object(version, path, status): - url = CONF.public_endpoint or req.host_url - return { - 'id': 'v%s' % version, - 'status': status, - 'links': [ - { - 'rel': 'self', - 'href': '%s/%s/' % (url, path), - }, - ], - } - - version_objs = [] - if CONF.enable_v2_api: - version_objs.extend([ - build_version_object(2.5, 'v2', 'CURRENT'), - build_version_object(2.4, 'v2', 'SUPPORTED'), - build_version_object(2.3, 'v2', 'SUPPORTED'), - build_version_object(2.2, 'v2', 'SUPPORTED'), - build_version_object(2.1, 'v2', 'SUPPORTED'), - build_version_object(2.0, 'v2', 'SUPPORTED'), - ]) - if CONF.enable_v1_api: - LOG.warn(_LW('The Images (Glance) v1 API is deprecated and will ' - 'be removed on or after the Pike release, following ' - 'the standard OpenStack deprecation policy. ' - 'Currently, the solution is to set ' - 'enable_v1_api=False and enable_v2_api=True in your ' - 'glance-api.conf file. Once those options are ' - 'removed from the code, Images (Glance) v2 API will ' - 'be switched on by default and will be the only ' - 'option to deploy and use.')) - version_objs.extend([ - build_version_object(1.1, 'v1', 'DEPRECATED'), - build_version_object(1.0, 'v1', 'DEPRECATED'), - ]) - - status = explicit and http_client.OK or http_client.MULTIPLE_CHOICES - response = webob.Response(request=req, - status=status, - content_type='application/json') - response.body = jsonutils.dump_as_bytes(dict(versions=version_objs)) - return response - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - return self.index(req) - - -def create_resource(conf): - return wsgi.Resource(Controller()) diff --git a/glance/async/__init__.py b/glance/async/__init__.py deleted file mode 100644 index b637745b..00000000 --- a/glance/async/__init__.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from glance.i18n import _LE - - -LOG = logging.getLogger(__name__) - - -class TaskExecutor(object): - """Base class for Asynchronous task executors. It does not support the - execution mechanism. - - Provisions the extensible classes with necessary variables to utilize - important Glance modules like, context, task_repo, image_repo, - image_factory. - - Note: - It also gives abstraction for the standard pre-processing and - post-processing operations to be executed by a task. These may include - validation checks, security checks, introspection, error handling etc. - The aim is to give developers an abstract sense of the execution - pipeline logic. - - Args: - context: glance.context.RequestContext object for AuthZ and AuthN - checks - task_repo: glance.db.TaskRepo object which acts as a translator for - glance.domain.Task and glance.domain.TaskStub objects - into ORM semantics - image_repo: glance.db.ImageRepo object which acts as a translator for - glance.domain.Image object into ORM semantics - image_factory: glance.domain.ImageFactory object to be used for - creating new images for certain types of tasks viz. import, cloning - """ - - def __init__(self, context, task_repo, image_repo, image_factory): - self.context = context - self.task_repo = task_repo - self.image_repo = image_repo - self.image_factory = image_factory - - def begin_processing(self, task_id): - task = self.task_repo.get(task_id) - task.begin_processing() - self.task_repo.save(task) - - # start running - self._run(task_id, task.type) - - def _run(self, task_id, task_type): - task = self.task_repo.get(task_id) - msg = _LE("This execution of Tasks is not setup. Please consult the " - "project documentation for more information on the " - "executors available.") - LOG.error(msg) - task.fail(_LE("Internal error occurred while trying to process task.")) - self.task_repo.save(task) diff --git a/glance/async/flows/__init__.py b/glance/async/flows/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/async/flows/api_image_import.py b/glance/async/flows/api_image_import.py deleted file mode 100644 index e5036f69..00000000 --- a/glance/async/flows/api_image_import.py +++ /dev/null @@ -1,361 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import glance_store as store_api -from glance_store import backend -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -import six -from taskflow.patterns import linear_flow as lf -from taskflow import retry -from taskflow import task - -from glance.common import exception -from glance.common.scripts.image_import import main as image_import -from glance.common.scripts import utils as script_utils -from glance.i18n import _, _LE, _LI - - -LOG = logging.getLogger(__name__) - - -CONF = cfg.CONF - - -# TODO(jokke): We should refactor the task implementations so that we do not -# need to duplicate what we have already for example in base_import.py. - -class _DeleteFromFS(task.Task): - - def __init__(self, task_id, task_type): - self.task_id = task_id - self.task_type = task_type - super(_DeleteFromFS, self).__init__( - name='%s-DeleteFromFS-%s' % (task_type, task_id)) - - def execute(self, file_path): - """Remove file from the backend - - :param file_path: path to the file being deleted - """ - store_api.delete_from_backend(file_path) - - -class _VerifyStaging(task.Task): - - # NOTE(jokke): This could be also for example "staging_path" but to - # keep this compatible with other flows we want to stay consistent - # with base_import - default_provides = 'file_path' - - def __init__(self, task_id, task_type, task_repo, uri): - self.task_id = task_id - self.task_type = task_type - self.task_repo = task_repo - self.uri = uri - super(_VerifyStaging, self).__init__( - name='%s-ConfigureStaging-%s' % (task_type, task_id)) - - # NOTE(jokke): If we want to use other than 'file' store in the - # future, this is one thing that needs to change. - try: - uri.index('file:///', 0) - except ValueError: - msg = (_("%(task_id)s of %(task_type)s not configured " - "properly. Value of node_staging_uri must be " - " in format 'file://'") % - {'task_id': self.task_id, - 'task_type': self.task_type}) - raise exception.BadTaskConfiguration(msg) - - # NOTE(jokke): We really don't need the store for anything but - # verifying that we actually can build the store will allow us to - # fail the flow early with clear message why that happens. - self._build_store() - - def _build_store(self): - # NOTE(jokke): If we want to use some other store for staging, we can - # implement the logic more general here. For now this should do. - # NOTE(flaper87): Due to the nice glance_store api (#sarcasm), we're - # forced to build our own config object, register the required options - # (and by required I mean *ALL* of them, even the ones we don't want), - # and create our own store instance by calling a private function. - # This is certainly unfortunate but it's the best we can do until the - # glance_store refactor is done. A good thing is that glance_store is - # under our team's management and it gates on Glance so changes to - # this API will (should?) break task's tests. - conf = cfg.ConfigOpts() - backend.register_opts(conf) - conf.set_override('filesystem_store_datadir', - CONF.node_staging_uri[7:], - group='glance_store') - - # NOTE(flaper87): Do not even try to judge me for this... :( - # With the glance_store refactor, this code will change, until - # that happens, we don't have a better option and this is the - # least worst one, IMHO. - store = backend._load_store(conf, 'file') - - try: - store.configure() - except AttributeError: - msg = (_("%(task_id)s of %(task_type)s not configured " - "properly. Could not load the filesystem store") % - {'task_id': self.task_id, 'task_type': self.task_type}) - raise exception.BadTaskConfiguration(msg) - - def execute(self): - """Test the backend store and return the 'file_path'""" - return self.uri - - -class _ImportToStore(task.Task): - - def __init__(self, task_id, task_type, image_repo, uri): - self.task_id = task_id - self.task_type = task_type - self.image_repo = image_repo - self.uri = uri - super(_ImportToStore, self).__init__( - name='%s-ImportToStore-%s' % (task_type, task_id)) - - def execute(self, image_id, file_path=None): - """Bringing the imported image to back end store - - :param image_id: Glance Image ID - :param file_path: path to the image file - """ - # NOTE(flaper87): There are a couple of interesting bits in the - # interaction between this task and the `_ImportToFS` one. I'll try - # to cover them in this comment. - # - # NOTE(jokke): We do not have _ImportToFS currently in this flow but - # I will leave Flavio's comments here as we will utilize it or fork - # of it after MVP. - # - # NOTE(flaper87): - # `_ImportToFS` downloads the image to a dedicated `work_dir` which - # needs to be configured in advance (please refer to the config option - # docs for more info). The motivation behind this is also explained in - # the `_ImportToFS.execute` method. - # - # Due to the fact that we have an `_ImportToFS` task which downloads - # the image data already, we need to be as smart as we can in this task - # to avoid downloading the data several times and reducing the copy or - # write times. There are several scenarios where the interaction - # between this task and `_ImportToFS` could be improved. All these - # scenarios assume the `_ImportToFS` task has been executed before - # and/or in a more abstract scenario, that `file_path` is being - # provided. - # - # Scenario 1: FS Store is Remote, introspection enabled, - # conversion disabled - # - # In this scenario, the user would benefit from having the scratch path - # being the same path as the fs store. Only one write would happen and - # an extra read will happen in order to introspect the image. Note that - # this read is just for the image headers and not the entire file. - # - # Scenario 2: FS Store is remote, introspection enabled, - # conversion enabled - # - # In this scenario, the user would benefit from having a *local* store - # into which the image can be converted. This will require downloading - # the image locally, converting it and then copying the converted image - # to the remote store. - # - # Scenario 3: FS Store is local, introspection enabled, - # conversion disabled - # Scenario 4: FS Store is local, introspection enabled, - # conversion enabled - # - # In both these scenarios the user shouldn't care if the FS - # store path and the work dir are the same, therefore probably - # benefit, about the scratch path and the FS store being the - # same from a performance perspective. Space wise, regardless - # of the scenario, the user will have to account for it in - # advance. - # - # Lets get to it and identify the different scenarios in the - # implementation - image = self.image_repo.get(image_id) - image.status = 'importing' - self.image_repo.save(image) - - # NOTE(flaper87): Let's dance... and fall - # - # Unfortunatelly, because of the way our domain layers work and - # the checks done in the FS store, we can't simply rename the file - # and set the location. To do that, we'd have to duplicate the logic - # of every and each of the domain factories (quota, location, etc) - # and we'd also need to hack the FS store to prevent it from raising - # a "duplication path" error. I'd rather have this task copying the - # image bits one more time than duplicating all that logic. - # - # Since I don't think this should be the definitive solution, I'm - # leaving the code below as a reference for what should happen here - # once the FS store and domain code will be able to handle this case. - # - # if file_path is None: - # image_import.set_image_data(image, self.uri, None) - # return - - # NOTE(flaper87): Don't assume the image was stored in the - # work_dir. Think in the case this path was provided by another task. - # Also, lets try to neither assume things nor create "logic" - # dependencies between this task and `_ImportToFS` - # - # base_path = os.path.dirname(file_path.split("file://")[-1]) - - # NOTE(flaper87): Hopefully just scenarios #3 and #4. I say - # hopefully because nothing prevents the user to use the same - # FS store path as a work dir - # - # image_path = os.path.join(base_path, image_id) - # - # if (base_path == CONF.glance_store.filesystem_store_datadir or - # base_path in CONF.glance_store.filesystem_store_datadirs): - # os.rename(file_path, image_path) - # - # image_import.set_image_data(image, image_path, None) - - # NOTE(jokke): The different options here are kind of pointless as we - # will need the file path anyways for our delete workflow for now. - # For future proofing keeping this as is. - image_import.set_image_data(image, file_path or self.uri, self.task_id) - - # NOTE(flaper87): We need to save the image again after the locations - # have been set in the image. - self.image_repo.save(image) - - -class _SaveImage(task.Task): - - def __init__(self, task_id, task_type, image_repo): - self.task_id = task_id - self.task_type = task_type - self.image_repo = image_repo - super(_SaveImage, self).__init__( - name='%s-SaveImage-%s' % (task_type, task_id)) - - def execute(self, image_id): - """Transition image status to active - - :param image_id: Glance Image ID - """ - new_image = self.image_repo.get(image_id) - if new_image.status == 'saving': - # NOTE(flaper87): THIS IS WRONG! - # we should be doing atomic updates to avoid - # race conditions. This happens in other places - # too. - new_image.status = 'active' - self.image_repo.save(new_image) - - -class _CompleteTask(task.Task): - - def __init__(self, task_id, task_type, task_repo): - self.task_id = task_id - self.task_type = task_type - self.task_repo = task_repo - super(_CompleteTask, self).__init__( - name='%s-CompleteTask-%s' % (task_type, task_id)) - - def execute(self, image_id): - """Finishing the task flow - - :param image_id: Glance Image ID - """ - task = script_utils.get_task(self.task_repo, self.task_id) - if task is None: - return - try: - task.succeed({'image_id': image_id}) - except Exception as e: - # Note: The message string contains Error in it to indicate - # in the task.message that it's a error message for the user. - - # TODO(nikhil): need to bring back save_and_reraise_exception when - # necessary - log_msg = _LE("Task ID %(task_id)s failed. Error: %(exc_type)s: " - "%(e)s") - LOG.exception(log_msg, {'exc_type': six.text_type(type(e)), - 'e': encodeutils.exception_to_unicode(e), - 'task_id': task.task_id}) - - err_msg = _("Error: %(exc_type)s: %(e)s") - task.fail(err_msg % {'exc_type': six.text_type(type(e)), - 'e': encodeutils.exception_to_unicode(e)}) - finally: - self.task_repo.save(task) - - LOG.info(_LI("%(task_id)s of %(task_type)s completed"), - {'task_id': self.task_id, 'task_type': self.task_type}) - - -def get_flow(**kwargs): - """Return task flow - - :param task_id: Task ID - :param task_type: Type of the task - :param task_repo: Task repo - :param image_repo: Image repository used - :param image_id: ID of the Image to be processed - :param uri: uri for the image file - """ - task_id = kwargs.get('task_id') - task_type = kwargs.get('task_type') - task_repo = kwargs.get('task_repo') - image_repo = kwargs.get('image_repo') - image_id = kwargs.get('image_id') - uri = kwargs.get('uri') - - if not uri: - separator = '' - if not CONF.node_staging_uri.endsWith('/'): - separator = '/' - uri = separator.join((CONF.node_staging_uri, str(image_id))) - - flow = lf.flow(task_type, retry=retry.AlwaysRevert()) - flow.add(_VerifyStaging(task_id, task_type, uri)) - - # TODO(jokke): For the pluggable tasks like image verification or - # image conversion we need to implement the plugin logic here. - - import_to_store = _ImportToStore(task_id, - task_type, - image_repo, - uri, - rebind_args={'image_id': image_id}) - flow.add(import_to_store) - - delete_task = lf.Flow(task_type).add(_DeleteFromFS(task_id, task_type)) - flow.add(delete_task) - - save_task = _SaveImage(task_id, - task_type, - image_repo, - rebind_args={'image_id': image_id}) - flow.add(save_task) - - complete_task = _CompleteTask(task_id, - task_type, - task_repo, - rebind_args={'image_id': image_id}) - flow.add(complete_task) - - return flow diff --git a/glance/async/flows/base_import.py b/glance/async/flows/base_import.py deleted file mode 100644 index 4fc79c84..00000000 --- a/glance/async/flows/base_import.py +++ /dev/null @@ -1,473 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os - -import glance_store as store_api -from glance_store import backend -from oslo_concurrency import processutils as putils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -import six -from stevedore import named -from taskflow.patterns import linear_flow as lf -from taskflow import retry -from taskflow import task -from taskflow.types import failure - -from glance.async import utils -from glance.common import exception -from glance.common.scripts.image_import import main as image_import -from glance.common.scripts import utils as script_utils -from glance.i18n import _, _LE, _LI - - -LOG = logging.getLogger(__name__) - - -CONF = cfg.CONF - - -class _CreateImage(task.Task): - - default_provides = 'image_id' - - def __init__(self, task_id, task_type, task_repo, image_repo, - image_factory): - self.task_id = task_id - self.task_type = task_type - self.task_repo = task_repo - self.image_repo = image_repo - self.image_factory = image_factory - super(_CreateImage, self).__init__( - name='%s-CreateImage-%s' % (task_type, task_id)) - - def execute(self): - task = script_utils.get_task(self.task_repo, self.task_id) - if task is None: - return - task_input = script_utils.unpack_task_input(task) - image = image_import.create_image( - self.image_repo, self.image_factory, - task_input.get('image_properties'), self.task_id) - - LOG.debug("Task %(task_id)s created image %(image_id)s", - {'task_id': task.task_id, 'image_id': image.image_id}) - return image.image_id - - def revert(self, *args, **kwargs): - # TODO(flaper87): Define the revert rules for images on failures. - # Deleting the image may not be what we want since users could upload - # the image data in a separate step. However, it really depends on - # when the failure happened. I guess we should check if data has been - # written, although at that point failures are (should be) unexpected, - # at least image-workflow wise. - pass - - -class _ImportToFS(task.Task): - - default_provides = 'file_path' - - def __init__(self, task_id, task_type, task_repo, uri): - self.task_id = task_id - self.task_type = task_type - self.task_repo = task_repo - self.uri = uri - super(_ImportToFS, self).__init__( - name='%s-ImportToFS-%s' % (task_type, task_id)) - - if CONF.task.work_dir is None: - msg = (_("%(task_id)s of %(task_type)s not configured " - "properly. Missing work dir: %(work_dir)s") % - {'task_id': self.task_id, - 'task_type': self.task_type, - 'work_dir': CONF.task.work_dir}) - raise exception.BadTaskConfiguration(msg) - - self.store = self._build_store() - - def _build_store(self): - # NOTE(flaper87): Due to the nice glance_store api (#sarcasm), we're - # forced to build our own config object, register the required options - # (and by required I mean *ALL* of them, even the ones we don't want), - # and create our own store instance by calling a private function. - # This is certainly unfortunate but it's the best we can do until the - # glance_store refactor is done. A good thing is that glance_store is - # under our team's management and it gates on Glance so changes to - # this API will (should?) break task's tests. - conf = cfg.ConfigOpts() - backend.register_opts(conf) - conf.set_override('filesystem_store_datadir', - CONF.task.work_dir, - group='glance_store') - - # NOTE(flaper87): Do not even try to judge me for this... :( - # With the glance_store refactor, this code will change, until - # that happens, we don't have a better option and this is the - # least worst one, IMHO. - store = backend._load_store(conf, 'file') - - if store is None: - msg = (_("%(task_id)s of %(task_type)s not configured " - "properly. Could not load the filesystem store") % - {'task_id': self.task_id, 'task_type': self.task_type}) - raise exception.BadTaskConfiguration(msg) - - store.configure() - return store - - def execute(self, image_id): - """Create temp file into store and return path to it - - :param image_id: Glance Image ID - """ - # NOTE(flaper87): We've decided to use a separate `work_dir` for - # this task - and tasks coming after this one - as a way to expect - # users to configure a local store for pre-import works on the image - # to happen. - # - # While using any path should be "technically" fine, it's not what - # we recommend as the best solution. For more details on this, please - # refer to the comment in the `_ImportToStore.execute` method. - data = script_utils.get_image_data_iter(self.uri) - - path = self.store.add(image_id, data, 0, context=None)[0] - - try: - # NOTE(flaper87): Consider moving this code to a common - # place that other tasks can consume as well. - stdout, stderr = putils.trycmd('qemu-img', 'info', - '--output=json', path, - prlimit=utils.QEMU_IMG_PROC_LIMITS, - log_errors=putils.LOG_ALL_ERRORS) - except OSError as exc: - with excutils.save_and_reraise_exception(): - exc_message = encodeutils.exception_to_unicode(exc) - msg = _LE('Failed to execute security checks on the image ' - '%(task_id)s: %(exc)s') - LOG.error(msg, {'task_id': self.task_id, 'exc': exc_message}) - - metadata = json.loads(stdout) - - backing_file = metadata.get('backing-filename') - if backing_file is not None: - msg = _("File %(path)s has invalid backing file " - "%(bfile)s, aborting.") % {'path': path, - 'bfile': backing_file} - raise RuntimeError(msg) - - return path - - def revert(self, image_id, result, **kwargs): - if isinstance(result, failure.Failure): - LOG.exception(_LE('Task: %(task_id)s failed to import image ' - '%(image_id)s to the filesystem.'), - {'task_id': self.task_id, 'image_id': image_id}) - return - - if os.path.exists(result.split("file://")[-1]): - store_api.delete_from_backend(result) - - -class _DeleteFromFS(task.Task): - - def __init__(self, task_id, task_type): - self.task_id = task_id - self.task_type = task_type - super(_DeleteFromFS, self).__init__( - name='%s-DeleteFromFS-%s' % (task_type, task_id)) - - def execute(self, file_path): - """Remove file from the backend - - :param file_path: path to the file being deleted - """ - store_api.delete_from_backend(file_path) - - -class _ImportToStore(task.Task): - - def __init__(self, task_id, task_type, image_repo, uri): - self.task_id = task_id - self.task_type = task_type - self.image_repo = image_repo - self.uri = uri - super(_ImportToStore, self).__init__( - name='%s-ImportToStore-%s' % (task_type, task_id)) - - def execute(self, image_id, file_path=None): - """Bringing the introspected image to back end store - - :param image_id: Glance Image ID - :param file_path: path to the image file - """ - # NOTE(flaper87): There are a couple of interesting bits in the - # interaction between this task and the `_ImportToFS` one. I'll try - # to cover them in this comment. - # - # NOTE(flaper87): - # `_ImportToFS` downloads the image to a dedicated `work_dir` which - # needs to be configured in advance (please refer to the config option - # docs for more info). The motivation behind this is also explained in - # the `_ImportToFS.execute` method. - # - # Due to the fact that we have an `_ImportToFS` task which downloads - # the image data already, we need to be as smart as we can in this task - # to avoid downloading the data several times and reducing the copy or - # write times. There are several scenarios where the interaction - # between this task and `_ImportToFS` could be improved. All these - # scenarios assume the `_ImportToFS` task has been executed before - # and/or in a more abstract scenario, that `file_path` is being - # provided. - # - # Scenario 1: FS Store is Remote, introspection enabled, - # conversion disabled - # - # In this scenario, the user would benefit from having the scratch path - # being the same path as the fs store. Only one write would happen and - # an extra read will happen in order to introspect the image. Note that - # this read is just for the image headers and not the entire file. - # - # Scenario 2: FS Store is remote, introspection enabled, - # conversion enabled - # - # In this scenario, the user would benefit from having a *local* store - # into which the image can be converted. This will require downloading - # the image locally, converting it and then copying the converted image - # to the remote store. - # - # Scenario 3: FS Store is local, introspection enabled, - # conversion disabled - # Scenario 4: FS Store is local, introspection enabled, - # conversion enabled - # - # In both these scenarios the user shouldn't care if the FS - # store path and the work dir are the same, therefore probably - # benefit, about the scratch path and the FS store being the - # same from a performance perspective. Space wise, regardless - # of the scenario, the user will have to account for it in - # advance. - # - # Lets get to it and identify the different scenarios in the - # implementation - image = self.image_repo.get(image_id) - image.status = 'saving' - self.image_repo.save(image) - - # NOTE(flaper87): Let's dance... and fall - # - # Unfortunatelly, because of the way our domain layers work and - # the checks done in the FS store, we can't simply rename the file - # and set the location. To do that, we'd have to duplicate the logic - # of every and each of the domain factories (quota, location, etc) - # and we'd also need to hack the FS store to prevent it from raising - # a "duplication path" error. I'd rather have this task copying the - # image bits one more time than duplicating all that logic. - # - # Since I don't think this should be the definitive solution, I'm - # leaving the code below as a reference for what should happen here - # once the FS store and domain code will be able to handle this case. - # - # if file_path is None: - # image_import.set_image_data(image, self.uri, None) - # return - - # NOTE(flaper87): Don't assume the image was stored in the - # work_dir. Think in the case this path was provided by another task. - # Also, lets try to neither assume things nor create "logic" - # dependencies between this task and `_ImportToFS` - # - # base_path = os.path.dirname(file_path.split("file://")[-1]) - - # NOTE(flaper87): Hopefully just scenarios #3 and #4. I say - # hopefully because nothing prevents the user to use the same - # FS store path as a work dir - # - # image_path = os.path.join(base_path, image_id) - # - # if (base_path == CONF.glance_store.filesystem_store_datadir or - # base_path in CONF.glance_store.filesystem_store_datadirs): - # os.rename(file_path, image_path) - # - # image_import.set_image_data(image, image_path, None) - - image_import.set_image_data(image, file_path or self.uri, self.task_id) - - # NOTE(flaper87): We need to save the image again after the locations - # have been set in the image. - self.image_repo.save(image) - - -class _SaveImage(task.Task): - - def __init__(self, task_id, task_type, image_repo): - self.task_id = task_id - self.task_type = task_type - self.image_repo = image_repo - super(_SaveImage, self).__init__( - name='%s-SaveImage-%s' % (task_type, task_id)) - - def execute(self, image_id): - """Transition image status to active - - :param image_id: Glance Image ID - """ - new_image = self.image_repo.get(image_id) - if new_image.status == 'saving': - # NOTE(flaper87): THIS IS WRONG! - # we should be doing atomic updates to avoid - # race conditions. This happens in other places - # too. - new_image.status = 'active' - self.image_repo.save(new_image) - - -class _CompleteTask(task.Task): - - def __init__(self, task_id, task_type, task_repo): - self.task_id = task_id - self.task_type = task_type - self.task_repo = task_repo - super(_CompleteTask, self).__init__( - name='%s-CompleteTask-%s' % (task_type, task_id)) - - def execute(self, image_id): - """Finishing the task flow - - :param image_id: Glance Image ID - """ - task = script_utils.get_task(self.task_repo, self.task_id) - if task is None: - return - try: - task.succeed({'image_id': image_id}) - except Exception as e: - # Note: The message string contains Error in it to indicate - # in the task.message that it's a error message for the user. - - # TODO(nikhil): need to bring back save_and_reraise_exception when - # necessary - log_msg = _LE("Task ID %(task_id)s failed. Error: %(exc_type)s: " - "%(e)s") - LOG.exception(log_msg, {'exc_type': six.text_type(type(e)), - 'e': encodeutils.exception_to_unicode(e), - 'task_id': task.task_id}) - - err_msg = _("Error: %(exc_type)s: %(e)s") - task.fail(err_msg % {'exc_type': six.text_type(type(e)), - 'e': encodeutils.exception_to_unicode(e)}) - finally: - self.task_repo.save(task) - - LOG.info(_LI("%(task_id)s of %(task_type)s completed"), - {'task_id': self.task_id, 'task_type': self.task_type}) - - -def _get_import_flows(**kwargs): - # NOTE(flaper87): Until we have a better infrastructure to enable - # and disable tasks plugins, hard-code the tasks we know exist, - # instead of loading everything from the namespace. This guarantees - # both, the load order of these plugins and the fact that no random - # plugins will be added/loaded until we feel comfortable with this. - # Future patches will keep using NamedExtensionManager but they'll - # rely on a config option to control this process. - extensions = named.NamedExtensionManager('glance.flows.import', - names=['ovf_process', - 'convert', - 'introspect'], - name_order=True, - invoke_on_load=True, - invoke_kwds=kwargs) - - for ext in extensions.extensions: - yield ext.obj - - -def get_flow(**kwargs): - """Return task flow - - :param task_id: Task ID - :param task_type: Type of the task - :param task_repo: Task repo - :param image_repo: Image repository used - :param image_factory: Glance Image Factory - :param uri: uri for the image file - """ - task_id = kwargs.get('task_id') - task_type = kwargs.get('task_type') - task_repo = kwargs.get('task_repo') - image_repo = kwargs.get('image_repo') - image_factory = kwargs.get('image_factory') - uri = kwargs.get('uri') - - flow = lf.Flow(task_type, retry=retry.AlwaysRevert()).add( - _CreateImage(task_id, task_type, task_repo, image_repo, image_factory)) - - import_to_store = _ImportToStore(task_id, task_type, image_repo, uri) - - try: - # NOTE(flaper87): ImportToLocal and DeleteFromLocal shouldn't be here. - # Ideally, we should have the different import flows doing this for us - # and this function should clean up duplicated tasks. For example, say - # 2 flows need to have a local copy of the image - ImportToLocal - in - # order to be able to complete the task - i.e Introspect-. In that - # case, the introspect.get_flow call should add both, ImportToLocal and - # DeleteFromLocal, to the flow and this function will reduce the - # duplicated calls to those tasks by creating a linear flow that - # ensures those are called before the other tasks. For now, I'm - # keeping them here, though. - limbo = lf.Flow(task_type).add(_ImportToFS(task_id, - task_type, - task_repo, - uri)) - - for subflow in _get_import_flows(**kwargs): - limbo.add(subflow) - - # NOTE(flaper87): We have hard-coded 2 tasks, - # if there aren't more than 2, it means that - # no subtask has been registered. - if len(limbo) > 1: - flow.add(limbo) - - # NOTE(flaper87): Until this implementation gets smarter, - # make sure ImportToStore is called *after* the imported - # flow stages. If not, the image will be set to saving state - # invalidating tasks like Introspection or Convert. - flow.add(import_to_store) - - # NOTE(flaper87): Since this is an "optional" task but required - # when `limbo` is executed, we're adding it in its own subflow - # to isolate it from the rest of the flow. - delete_flow = lf.Flow(task_type).add(_DeleteFromFS(task_id, - task_type)) - flow.add(delete_flow) - else: - flow.add(import_to_store) - except exception.BadTaskConfiguration as exc: - # NOTE(flaper87): If something goes wrong with the load of - # import tasks, make sure we go on. - LOG.error(_LE('Bad task configuration: %s'), exc.message) - flow.add(import_to_store) - - flow.add( - _SaveImage(task_id, task_type, image_repo), - _CompleteTask(task_id, task_type, task_repo) - ) - return flow diff --git a/glance/async/flows/convert.py b/glance/async/flows/convert.py deleted file mode 100644 index d1b9f1aa..00000000 --- a/glance/async/flows/convert.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_concurrency import processutils as putils -from oslo_config import cfg -from oslo_log import log as logging -from taskflow.patterns import linear_flow as lf -from taskflow import task - -from glance.i18n import _, _LW - -LOG = logging.getLogger(__name__) - -convert_task_opts = [ - # NOTE: This configuration option requires the operator to explicitly set - # an image conversion format. There being no sane default due to the - # dependency on the environment in which OpenStack is running, we do not - # mark this configuration option as "required". Rather a warning message - # is given to the operator, prompting for an image conversion format to - # be set. - cfg.StrOpt('conversion_format', - sample_default='raw', - choices=('qcow2', 'raw', 'vmdk'), - help=_(""" -Set the desired image conversion format. - -Provide a valid image format to which you want images to be -converted before they are stored for consumption by Glance. -Appropriate image format conversions are desirable for specific -storage backends in order to facilitate efficient handling of -bandwidth and usage of the storage infrastructure. - -By default, ``conversion_format`` is not set and must be set -explicitly in the configuration file. - -The allowed values for this option are ``raw``, ``qcow2`` and -``vmdk``. The ``raw`` format is the unstructured disk format and -should be chosen when RBD or Ceph storage backends are used for -image storage. ``qcow2`` is supported by the QEMU emulator that -expands dynamically and supports Copy on Write. The ``vmdk`` is -another common disk format supported by many common virtual machine -monitors like VMWare Workstation. - -Possible values: - * qcow2 - * raw - * vmdk - -Related options: - * disk_formats - -""")), -] - -CONF = cfg.CONF - -# NOTE(flaper87): Registering under the taskflow_executor section -# for now. It seems a waste to have a whole section dedicated to a -# single task with a single option. -CONF.register_opts(convert_task_opts, group='taskflow_executor') - - -class _Convert(task.Task): - - conversion_missing_warned = False - - def __init__(self, task_id, task_type, image_repo): - self.task_id = task_id - self.task_type = task_type - self.image_repo = image_repo - super(_Convert, self).__init__( - name='%s-Convert-%s' % (task_type, task_id)) - - def execute(self, image_id, file_path): - - # NOTE(flaper87): A format must be explicitly - # specified. There's no "sane" default for this - # because the dest format may work differently depending - # on the environment OpenStack is running in. - conversion_format = CONF.taskflow_executor.conversion_format - if conversion_format is None: - if not _Convert.conversion_missing_warned: - msg = _LW('The conversion format is None, please add a value ' - 'for it in the config file for this task to ' - 'work: %s') - LOG.warn(msg, self.task_id) - _Convert.conversion_missing_warned = True - return - - image_obj = self.image_repo.get(image_id) - src_format = image_obj.disk_format - - # TODO(flaper87): Check whether the image is in the desired - # format already. Probably using `qemu-img` just like the - # `Introspection` task. - - # NOTE(hemanthm): We add '-f' parameter to the convert command here so - # that the image format need not be inferred by qemu utils. This - # shields us from being vulnerable to an attack vector described here - # https://bugs.launchpad.net/glance/+bug/1449062 - - dest_path = os.path.join(CONF.task.work_dir, "%s.converted" % image_id) - stdout, stderr = putils.trycmd('qemu-img', 'convert', - '-f', src_format, - '-O', conversion_format, - file_path, dest_path, - log_errors=putils.LOG_ALL_ERRORS) - - if stderr: - raise RuntimeError(stderr) - - os.rename(dest_path, file_path.split("file://")[-1]) - return file_path - - def revert(self, image_id, result=None, **kwargs): - # NOTE(flaper87): If result is None, it probably - # means this task failed. Otherwise, we would have - # a result from its execution. - if result is None: - return - - fs_path = result.split("file://")[-1] - if os.path.exists(fs_path): - os.remove(fs_path) - - -def get_flow(**kwargs): - """Return task flow for converting images to different formats. - - :param task_id: Task ID. - :param task_type: Type of the task. - :param image_repo: Image repository used. - """ - task_id = kwargs.get('task_id') - task_type = kwargs.get('task_type') - image_repo = kwargs.get('image_repo') - - return lf.Flow(task_type).add( - _Convert(task_id, task_type, image_repo), - ) diff --git a/glance/async/flows/introspect.py b/glance/async/flows/introspect.py deleted file mode 100644 index 4d39dafc..00000000 --- a/glance/async/flows/introspect.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from oslo_concurrency import processutils as putils -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -from taskflow.patterns import linear_flow as lf - -from glance.async import utils -from glance.i18n import _LE - - -LOG = logging.getLogger(__name__) - - -class _Introspect(utils.OptionalTask): - """Taskflow to pull the embedded metadata out of image file""" - - def __init__(self, task_id, task_type, image_repo): - self.task_id = task_id - self.task_type = task_type - self.image_repo = image_repo - super(_Introspect, self).__init__( - name='%s-Introspect-%s' % (task_type, task_id)) - - def execute(self, image_id, file_path): - """Does the actual introspection - - :param image_id: Glance image ID - :param file_path: Path to the file being introspected - """ - - try: - stdout, stderr = putils.trycmd('qemu-img', 'info', - '--output=json', file_path, - prlimit=utils.QEMU_IMG_PROC_LIMITS, - log_errors=putils.LOG_ALL_ERRORS) - except OSError as exc: - # NOTE(flaper87): errno == 2 means the executable file - # was not found. For now, log an error and move forward - # until we have a better way to enable/disable optional - # tasks. - if exc.errno != 2: - with excutils.save_and_reraise_exception(): - exc_message = encodeutils.exception_to_unicode(exc) - msg = _LE('Failed to execute introspection ' - '%(task_id)s: %(exc)s') - LOG.error(msg, {'task_id': self.task_id, - 'exc': exc_message}) - return - - if stderr: - raise RuntimeError(stderr) - - metadata = json.loads(stdout) - new_image = self.image_repo.get(image_id) - new_image.virtual_size = metadata.get('virtual-size', 0) - new_image.disk_format = metadata.get('format') - self.image_repo.save(new_image) - LOG.debug("%(task_id)s: Introspection successful: %(file)s", - {'task_id': self.task_id, 'file': file_path}) - return new_image - - -def get_flow(**kwargs): - """Return task flow for introspecting images to obtain metadata about the - image. - - :param task_id: Task ID - :param task_type: Type of the task. - :param image_repo: Image repository used. - """ - task_id = kwargs.get('task_id') - task_type = kwargs.get('task_type') - image_repo = kwargs.get('image_repo') - - LOG.debug("Flow: %(task_type)s with ID %(id)s on %(repo)s", - {'task_type': task_type, 'id': task_id, 'repo': image_repo}) - - return lf.Flow(task_type).add( - _Introspect(task_id, task_type, image_repo), - ) diff --git a/glance/async/flows/ovf_process.py b/glance/async/flows/ovf_process.py deleted file mode 100644 index 4f463f67..00000000 --- a/glance/async/flows/ovf_process.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright 2015 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import shutil -import tarfile - -try: - import xml.etree.cElementTree as ET -except ImportError: - import xml.etree.ElementTree as ET - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils as json -from six.moves import urllib -from taskflow.patterns import linear_flow as lf -from taskflow import task - -from glance.i18n import _, _LW - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF -# Define the CIM namespaces here. Currently we will be supporting extracting -# properties only from CIM_ProcessorAllocationSettingData -CIM_NS = {'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' - 'CIM_ProcessorAllocationSettingData': 'cim_pasd'} - - -class _OVF_Process(task.Task): - """ - Extracts the single disk image from an OVA tarball and saves it to the - Glance image store. It also parses the included OVF file for selected - metadata which it then saves in the image store as the previously saved - image's properties. - """ - - default_provides = 'file_path' - - def __init__(self, task_id, task_type, image_repo): - self.task_id = task_id - self.task_type = task_type - self.image_repo = image_repo - super(_OVF_Process, self).__init__( - name='%s-OVF_Process-%s' % (task_type, task_id)) - - def _get_extracted_file_path(self, image_id): - return os.path.join(CONF.task.work_dir, - "%s.extracted" % image_id) - - def _get_ova_iter_objects(self, uri): - """Returns iterable object either for local file or uri - - :param uri: uri (remote or local) to the ova package we want to iterate - """ - - if uri.startswith("file://"): - uri = uri.split("file://")[-1] - return open(uri, "rb") - - return urllib.request.urlopen(uri) - - def execute(self, image_id, file_path): - """ - :param image_id: Id to use when storing extracted image to Glance - image store. It is assumed that some other task has already - created a row in the store with this id. - :param file_path: Path to the OVA package - """ - - image = self.image_repo.get(image_id) - # Expect 'ova' as image container format for OVF_Process task - if image.container_format == 'ova': - # FIXME(dramakri): This is an admin-only feature for security - # reasons. Ideally this should be achieved by making the import - # task API admin only. This is one of the items that the upcoming - # import refactoring work plans to do. Until then, we will check - # the context as a short-cut. - if image.context and image.context.is_admin: - extractor = OVAImageExtractor() - data_iter = self._get_ova_iter_objects(file_path) - disk, properties = extractor.extract(data_iter) - image.extra_properties.update(properties) - image.container_format = 'bare' - self.image_repo.save(image) - dest_path = self._get_extracted_file_path(image_id) - with open(dest_path, 'wb') as f: - shutil.copyfileobj(disk, f, 4096) - - # Overwrite the input ova file since it is no longer needed - os.rename(dest_path, file_path.split("file://")[-1]) - - else: - raise RuntimeError(_('OVA extract is limited to admin')) - - return file_path - - def revert(self, image_id, result, **kwargs): - fs_path = self._get_extracted_file_path(image_id) - if os.path.exists(fs_path): - os.path.remove(fs_path) - - -class OVAImageExtractor(object): - """Extracts and parses the uploaded OVA package - - A class that extracts the disk image and OVF file from an OVA - tar archive. Parses the OVF file for metadata of interest. - """ - - def __init__(self): - self.interested_properties = [] - self._load_interested_properties() - - def extract(self, ova): - """Extracts disk image and OVF file from OVA package - - Extracts a single disk image and OVF from OVA tar archive and calls - OVF parser method. - - :param ova: a file object containing the OVA file - :returns: a tuple of extracted disk file object and dictionary of - properties parsed from the OVF file - :raises RuntimeError: an error for malformed OVA and OVF files - """ - with tarfile.open(fileobj=ova) as tar_file: - filenames = tar_file.getnames() - ovf_filename = next((filename for filename in filenames - if filename.endswith('.ovf')), None) - if ovf_filename: - ovf = tar_file.extractfile(ovf_filename) - disk_name, properties = self._parse_OVF(ovf) - ovf.close() - else: - raise RuntimeError(_('Could not find OVF file in OVA archive ' - 'file.')) - - disk = tar_file.extractfile(disk_name) - - return (disk, properties) - - def _parse_OVF(self, ovf): - """Parses the OVF file - - Parses the OVF file for specified metadata properties. Interested - properties must be specified in ovf-metadata.json conf file. - - The OVF file's qualified namespaces are removed from the included - properties. - - :param ovf: a file object containing the OVF file - :returns: a tuple of disk filename and a properties dictionary - :raises RuntimeError: an error for malformed OVF file - """ - - def _get_namespace_and_tag(tag): - """Separate and return the namespace and tag elements. - - There is no native support for this operation in elementtree - package. See http://bugs.python.org/issue18304 for details. - """ - m = re.match(r'\{(.+)\}(.+)', tag) - if m: - return m.group(1), m.group(2) - else: - return '', tag - - disk_filename, file_elements, file_ref = None, None, None - properties = {} - for event, elem in ET.iterparse(ovf): - if event == 'end': - ns, tag = _get_namespace_and_tag(elem.tag) - if ns in CIM_NS and tag in self.interested_properties: - properties[CIM_NS[ns] + '_' + tag] = (elem.text.strip() - if elem.text else '') - - if tag == 'DiskSection': - disks = [child for child in list(elem) - if _get_namespace_and_tag(child.tag)[1] == - 'Disk'] - if len(disks) > 1: - """ - Currently only single disk image extraction is - supported. - FIXME(dramakri): Support multiple images in OVA package - """ - raise RuntimeError(_('Currently, OVA packages ' - 'containing multiple disk are ' - 'not supported.')) - disk = next(iter(disks)) - file_ref = next(value for key, value in disk.items() if - _get_namespace_and_tag(key)[1] == - 'fileRef') - - if tag == 'References': - file_elements = list(elem) - - # Clears elements to save memory except for 'File' and 'Disk' - # references, which we will need to later access - if tag != 'File' and tag != 'Disk': - elem.clear() - - for file_element in file_elements: - file_id = next(value for key, value in file_element.items() - if _get_namespace_and_tag(key)[1] == 'id') - if file_id != file_ref: - continue - disk_filename = next(value for key, value in file_element.items() - if _get_namespace_and_tag(key)[1] == 'href') - - return (disk_filename, properties) - - def _load_interested_properties(self): - """Find the OVF properties config file and load it. - - OVF properties config file specifies which metadata of interest to - extract. Reads in a JSON file named 'ovf-metadata.json' if available. - See example file at etc/ovf-metadata.json.sample. - """ - filename = 'ovf-metadata.json' - match = CONF.find_file(filename) - if match: - with open(match, 'r') as properties_file: - properties = json.loads(properties_file.read()) - self.interested_properties = properties.get( - 'cim_pasd', []) - if not self.interested_properties: - LOG.warn(_LW('OVF metadata of interest was not specified ' - 'in ovf-metadata.json config file. Please ' - 'set "cim_pasd" to a list of interested ' - 'CIM_ProcessorAllocationSettingData ' - 'properties.')) - else: - LOG.warn(_LW('OVF properties config file "ovf-metadata.json" was ' - 'not found.')) - - -def get_flow(**kwargs): - """Returns task flow for OVF Process. - - :param task_id: Task ID - :param task_type: Type of the task. - :param image_repo: Image repository used. - """ - task_id = kwargs.get('task_id') - task_type = kwargs.get('task_type') - image_repo = kwargs.get('image_repo') - - LOG.debug("Flow: %(task_type)s with ID %(id)s on %(repo)s" % - {'task_type': task_type, 'id': task_id, 'repo': image_repo}) - - return lf.Flow(task_type).add( - _OVF_Process(task_id, task_type, image_repo), - ) diff --git a/glance/async/taskflow_executor.py b/glance/async/taskflow_executor.py deleted file mode 100644 index ab252fbd..00000000 --- a/glance/async/taskflow_executor.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import futurist -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -from six.moves import urllib -from stevedore import driver -from taskflow import engines -from taskflow.listeners import logging as llistener - -import glance.async -from glance.common import exception -from glance.common.scripts import utils as script_utils -from glance.i18n import _, _LE - -LOG = logging.getLogger(__name__) - -_deprecated_opt = cfg.DeprecatedOpt('eventlet_executor_pool_size', - group='task') - -taskflow_executor_opts = [ - cfg.StrOpt('engine_mode', - default='parallel', - choices=('serial', 'parallel'), - help=_(""" -Set the taskflow engine mode. - -Provide a string type value to set the mode in which the taskflow -engine would schedule tasks to the workers on the hosts. Based on -this mode, the engine executes tasks either in single or multiple -threads. The possible values for this configuration option are: -``serial`` and ``parallel``. When set to ``serial``, the engine runs -all the tasks in a single thread which results in serial execution -of tasks. Setting this to ``parallel`` makes the engine run tasks in -multiple threads. This results in parallel execution of tasks. - -Possible values: - * serial - * parallel - -Related options: - * max_workers - -""")), - - cfg.IntOpt('max_workers', - default=10, - min=1, - help=_(""" -Set the number of engine executable tasks. - -Provide an integer value to limit the number of workers that can be -instantiated on the hosts. In other words, this number defines the -number of parallel tasks that can be executed at the same time by -the taskflow engine. This value can be greater than one when the -engine mode is set to parallel. - -Possible values: - * Integer value greater than or equal to 1 - -Related options: - * engine_mode - -"""), - deprecated_opts=[_deprecated_opt]) -] - - -CONF = cfg.CONF -CONF.register_opts(taskflow_executor_opts, group='taskflow_executor') - - -class TaskExecutor(glance.async.TaskExecutor): - - def __init__(self, context, task_repo, image_repo, image_factory): - self.context = context - self.task_repo = task_repo - self.image_repo = image_repo - self.image_factory = image_factory - super(TaskExecutor, self).__init__(context, task_repo, image_repo, - image_factory) - - @staticmethod - def _fetch_an_executor(): - if CONF.taskflow_executor.engine_mode != 'parallel': - return None - else: - max_workers = CONF.taskflow_executor.max_workers - try: - return futurist.GreenThreadPoolExecutor( - max_workers=max_workers) - except RuntimeError: - # NOTE(harlowja): I guess eventlet isn't being made - # useable, well just use native threads then (or try to). - return futurist.ThreadPoolExecutor(max_workers=max_workers) - - def _get_flow(self, task): - try: - task_input = script_utils.unpack_task_input(task) - - kwds = { - 'task_id': task.task_id, - 'task_type': task.type, - 'context': self.context, - 'task_repo': self.task_repo, - 'image_repo': self.image_repo, - 'image_factory': self.image_factory - } - - if task.type == "import": - uri = script_utils.validate_location_uri( - task_input.get('import_from')) - kwds['uri'] = uri - return driver.DriverManager('glance.flows', task.type, - invoke_on_load=True, - invoke_kwds=kwds).driver - except urllib.error.URLError as exc: - raise exception.ImportTaskError(message=exc.reason) - except (exception.BadStoreUri, exception.Invalid) as exc: - raise exception.ImportTaskError(message=exc.msg) - except RuntimeError: - raise NotImplementedError() - - def begin_processing(self, task_id): - try: - super(TaskExecutor, self).begin_processing(task_id) - except exception.ImportTaskError as exc: - LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s') % - {'task_id': task_id, 'exc': exc.msg}) - task = self.task_repo.get(task_id) - task.fail(exc.msg) - self.task_repo.save(task) - - def _run(self, task_id, task_type): - LOG.debug('Taskflow executor picked up the execution of task ID ' - '%(task_id)s of task type ' - '%(task_type)s', {'task_id': task_id, - 'task_type': task_type}) - - task = script_utils.get_task(self.task_repo, task_id) - if task is None: - # NOTE: This happens if task is not found in the database. In - # such cases, there is no way to update the task status so, - # it's ignored here. - return - - flow = self._get_flow(task) - executor = self._fetch_an_executor() - try: - engine = engines.load( - flow, - engine=CONF.taskflow_executor.engine_mode, executor=executor, - max_workers=CONF.taskflow_executor.max_workers) - with llistener.DynamicLoggingListener(engine, log=LOG): - engine.run() - except Exception as exc: - with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s') % - {'task_id': task_id, - 'exc': encodeutils.exception_to_unicode(exc)}) - # TODO(sabari): Check for specific exceptions and update the - # task failure message. - task.fail(_('Task failed due to Internal Error')) - self.task_repo.save(task) - finally: - if executor is not None: - executor.shutdown() diff --git a/glance/async/utils.py b/glance/async/utils.py deleted file mode 100644 index 6a5054ec..00000000 --- a/glance/async/utils.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_concurrency import processutils as putils -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import units -from taskflow import task - -from glance.i18n import _LW - - -LOG = logging.getLogger(__name__) - -# NOTE(hemanthm): As reported in the bug #1449062, "qemu-img info" calls can -# be exploited to craft DoS attacks by providing malicious input. The process -# limits defined here are protections against such attacks. This essentially -# limits the CPU time and address space used by the process that executes -# "qemu-img info" command to 2 seconds and 1 GB respectively. -QEMU_IMG_PROC_LIMITS = putils.ProcessLimits(cpu_time=2, - address_space=1 * units.Gi) - - -class OptionalTask(task.Task): - - def __init__(self, *args, **kwargs): - super(OptionalTask, self).__init__(*args, **kwargs) - self.execute = self._catch_all(self.execute) - - def _catch_all(self, func): - # NOTE(flaper87): Read this comment before calling the MI6 - # Here's the thing, there's no nice way to define "optional" - # tasks. That is, tasks whose failure shouldn't affect the execution - # of the flow. The only current "sane" way to do this, is by catching - # everything and logging. This seems harmless from a taskflow - # perspective but it is not. There are some issues related to this - # "workaround": - # - # - Task's states will shamelessly lie to us saying the task succeeded. - # - # - No revert procedure will be triggered, which means optional tasks, - # for now, mustn't cause any side-effects because they won't be able to - # clean them up. If these tasks depend on other task that do cause side - # effects, a task that cleans those side effects most be registered as - # well. For example, _ImportToFS, _MyDumbTask, _DeleteFromFS. - # - # - Ideally, optional tasks shouldn't `provide` new values unless they - # are part of an optional flow. Due to the decoration of the execute - # method, these tasks will need to define the provided methods at - # class level using `default_provides`. - # - # - # The taskflow team is working on improving this and on something that - # will provide the ability of defining optional tasks. For now, to lie - # ourselves we must. - # - # NOTE(harlowja): The upstream change that is hopefully going to make - # this easier/built-in is at: https://review.openstack.org/#/c/271116/ - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception as exc: - msg = (_LW("An optional task has failed, " - "the failure was: %s") % - encodeutils.exception_to_unicode(exc)) - LOG.warn(msg) - return wrapper diff --git a/glance/cmd/__init__.py b/glance/cmd/__init__.py deleted file mode 100644 index e1aeec3b..00000000 --- a/glance/cmd/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance import i18n - -i18n.enable_lazy() diff --git a/glance/cmd/api.py b/glance/cmd/api.py deleted file mode 100644 index 48d431c8..00000000 --- a/glance/cmd/api.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Glance API Server -""" - -import os -import sys - -import eventlet -from oslo_utils import encodeutils - - -# Monkey patch socket, time, select, threads -eventlet.patcher.monkey_patch(all=False, socket=True, time=True, - select=True, thread=True, os=True) - -# If ../glance/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): - sys.path.insert(0, possible_topdir) - -import glance_store -from oslo_config import cfg -from oslo_log import log as logging -import osprofiler.initializer - -from glance.common import config -from glance.common import exception -from glance.common import wsgi -from glance import notifier - -CONF = cfg.CONF -CONF.import_group("profiler", "glance.common.wsgi") -logging.register_options(CONF) - -KNOWN_EXCEPTIONS = (RuntimeError, - exception.WorkerCreationFailure, - glance_store.exceptions.BadStoreConfiguration, - ValueError) - - -def fail(e): - global KNOWN_EXCEPTIONS - return_code = KNOWN_EXCEPTIONS.index(type(e)) + 1 - sys.stderr.write("ERROR: %s\n" % encodeutils.exception_to_unicode(e)) - sys.exit(return_code) - - -def main(): - try: - config.parse_args() - config.set_config_defaults() - wsgi.set_eventlet_hub() - logging.setup(CONF, 'glance') - notifier.set_defaults() - - if CONF.profiler.enabled: - osprofiler.initializer.init_from_conf( - conf=CONF, - context={}, - project="glance", - service="api", - host=CONF.bind_host - ) - - server = wsgi.Server(initialize_glance_store=True) - server.start(config.load_paste_app('glance-api'), default_port=9292) - server.wait() - except KNOWN_EXCEPTIONS as e: - fail(e) - - -if __name__ == '__main__': - main() diff --git a/glance/cmd/cache_cleaner.py b/glance/cmd/cache_cleaner.py deleted file mode 100644 index 3e304495..00000000 --- a/glance/cmd/cache_cleaner.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Glance Image Cache Invalid Cache Entry and Stalled Image cleaner - -This is meant to be run as a periodic task from cron. - -If something goes wrong while we're caching an image (for example the fetch -times out, or an exception is raised), we create an 'invalid' entry. These -entires are left around for debugging purposes. However, after some period of -time, we want to clean these up. - -Also, if an incomplete image hangs around past the image_cache_stall_time -period, we automatically sweep it up. -""" - -import os -import sys - -from oslo_log import log as logging - -# If ../glance/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): - sys.path.insert(0, possible_topdir) - -from glance.common import config -from glance.image_cache import cleaner - -CONF = config.CONF -logging.register_options(CONF) -CONF.set_default(name='use_stderr', default=True) - - -def main(): - try: - config.parse_cache_args() - logging.setup(CONF, 'glance') - - app = cleaner.Cleaner() - app.run() - except RuntimeError as e: - sys.exit("ERROR: %s" % e) diff --git a/glance/cmd/cache_manage.py b/glance/cmd/cache_manage.py deleted file mode 100644 index 997e0e71..00000000 --- a/glance/cmd/cache_manage.py +++ /dev/null @@ -1,522 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A simple cache management utility for Glance. -""" -from __future__ import print_function - -import datetime -import functools -import optparse -import os -import sys -import time - -from oslo_utils import encodeutils -import prettytable - -from six.moves import input - -# If ../glance/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): - sys.path.insert(0, possible_topdir) - -from glance.common import exception -import glance.image_cache.client -from glance.version import version_info as version - - -SUCCESS = 0 -FAILURE = 1 - - -def catch_error(action): - """Decorator to provide sensible default error handling for actions.""" - def wrap(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - ret = func(*args, **kwargs) - return SUCCESS if ret is None else ret - except exception.NotFound: - options = args[0] - print("Cache management middleware not enabled on host %s" % - options.host) - return FAILURE - except exception.Forbidden: - print("Not authorized to make this request.") - return FAILURE - except Exception as e: - options = args[0] - if options.debug: - raise - print("Failed to %s. Got error:" % action) - pieces = encodeutils.exception_to_unicode(e).split('\n') - for piece in pieces: - print(piece) - return FAILURE - - return wrapper - return wrap - - -@catch_error('show cached images') -def list_cached(options, args): - """%(prog)s list-cached [options] - -List all images currently cached. - """ - client = get_client(options) - images = client.get_cached_images() - if not images: - print("No cached images.") - return SUCCESS - - print("Found %d cached images..." % len(images)) - - pretty_table = prettytable.PrettyTable(("ID", - "Last Accessed (UTC)", - "Last Modified (UTC)", - "Size", - "Hits")) - pretty_table.align['Size'] = "r" - pretty_table.align['Hits'] = "r" - - for image in images: - last_accessed = image['last_accessed'] - if last_accessed == 0: - last_accessed = "N/A" - else: - last_accessed = datetime.datetime.utcfromtimestamp( - last_accessed).isoformat() - - pretty_table.add_row(( - image['image_id'], - last_accessed, - datetime.datetime.utcfromtimestamp( - image['last_modified']).isoformat(), - image['size'], - image['hits'])) - - print(pretty_table.get_string()) - - -@catch_error('show queued images') -def list_queued(options, args): - """%(prog)s list-queued [options] - -List all images currently queued for caching. - """ - client = get_client(options) - images = client.get_queued_images() - if not images: - print("No queued images.") - return SUCCESS - - print("Found %d queued images..." % len(images)) - - pretty_table = prettytable.PrettyTable(("ID",)) - - for image in images: - pretty_table.add_row((image,)) - - print(pretty_table.get_string()) - - -@catch_error('queue the specified image for caching') -def queue_image(options, args): - """%(prog)s queue-image [options] - -Queues an image for caching -""" - if len(args) == 1: - image_id = args.pop() - else: - print("Please specify one and only ID of the image you wish to ") - print("queue from the cache as the first argument") - return FAILURE - - if (not options.force and - not user_confirm("Queue image %(image_id)s for caching?" % - {'image_id': image_id}, default=False)): - return SUCCESS - - client = get_client(options) - client.queue_image_for_caching(image_id) - - if options.verbose: - print("Queued image %(image_id)s for caching" % - {'image_id': image_id}) - - return SUCCESS - - -@catch_error('delete the specified cached image') -def delete_cached_image(options, args): - """ -%(prog)s delete-cached-image [options] - -Deletes an image from the cache - """ - if len(args) == 1: - image_id = args.pop() - else: - print("Please specify one and only ID of the image you wish to ") - print("delete from the cache as the first argument") - return FAILURE - - if (not options.force and - not user_confirm("Delete cached image %(image_id)s?" % - {'image_id': image_id}, default=False)): - return SUCCESS - - client = get_client(options) - client.delete_cached_image(image_id) - - if options.verbose: - print("Deleted cached image %(image_id)s" % {'image_id': image_id}) - - return SUCCESS - - -@catch_error('Delete all cached images') -def delete_all_cached_images(options, args): - """%(prog)s delete-all-cached-images [options] - -Remove all images from the cache. - """ - if (not options.force and - not user_confirm("Delete all cached images?", default=False)): - return SUCCESS - - client = get_client(options) - num_deleted = client.delete_all_cached_images() - - if options.verbose: - print("Deleted %(num_deleted)s cached images" % - {'num_deleted': num_deleted}) - - return SUCCESS - - -@catch_error('delete the specified queued image') -def delete_queued_image(options, args): - """ -%(prog)s delete-queued-image [options] - -Deletes an image from the cache - """ - if len(args) == 1: - image_id = args.pop() - else: - print("Please specify one and only ID of the image you wish to ") - print("delete from the cache as the first argument") - return FAILURE - - if (not options.force and - not user_confirm("Delete queued image %(image_id)s?" % - {'image_id': image_id}, default=False)): - return SUCCESS - - client = get_client(options) - client.delete_queued_image(image_id) - - if options.verbose: - print("Deleted queued image %(image_id)s" % {'image_id': image_id}) - - return SUCCESS - - -@catch_error('Delete all queued images') -def delete_all_queued_images(options, args): - """%(prog)s delete-all-queued-images [options] - -Remove all images from the cache queue. - """ - if (not options.force and - not user_confirm("Delete all queued images?", default=False)): - return SUCCESS - - client = get_client(options) - num_deleted = client.delete_all_queued_images() - - if options.verbose: - print("Deleted %(num_deleted)s queued images" % - {'num_deleted': num_deleted}) - - return SUCCESS - - -def get_client(options): - """Return a new client object to a Glance server. - - specified by the --host and --port options - supplied to the CLI - """ - return glance.image_cache.client.get_client( - host=options.host, - port=options.port, - username=options.os_username, - password=options.os_password, - tenant=options.os_tenant_name, - auth_url=options.os_auth_url, - auth_strategy=options.os_auth_strategy, - auth_token=options.os_auth_token, - region=options.os_region_name, - insecure=options.insecure) - - -def env(*vars, **kwargs): - """Search for the first defined of possibly many env vars. - - Returns the first environment variable defined in vars, or - returns the default defined in kwargs. - """ - for v in vars: - value = os.environ.get(v) - if value: - return value - return kwargs.get('default', '') - - -def create_options(parser): - """Set up the CLI and config-file options that may be - parsed and program commands. - - :param parser: The option parser - """ - parser.add_option('-v', '--verbose', default=False, action="store_true", - help="Print more verbose output.") - parser.add_option('-d', '--debug', default=False, action="store_true", - help="Print debugging output.") - parser.add_option('-H', '--host', metavar="ADDRESS", default="0.0.0.0", - help="Address of Glance API host. " - "Default: %default.") - parser.add_option('-p', '--port', dest="port", metavar="PORT", - type=int, default=9292, - help="Port the Glance API host listens on. " - "Default: %default.") - parser.add_option('-k', '--insecure', dest="insecure", - default=False, action="store_true", - help="Explicitly allow glance to perform \"insecure\" " - "SSL (https) requests. The server's certificate will " - "not be verified against any certificate authorities. " - "This option should be used with caution.") - parser.add_option('-f', '--force', dest="force", metavar="FORCE", - default=False, action="store_true", - help="Prevent select actions from requesting " - "user confirmation.") - - parser.add_option('--os-auth-token', - dest='os_auth_token', - default=env('OS_AUTH_TOKEN'), - help='Defaults to env[OS_AUTH_TOKEN].') - parser.add_option('-A', '--os_auth_token', '--auth_token', - dest='os_auth_token', - help=optparse.SUPPRESS_HELP) - - parser.add_option('--os-username', - dest='os_username', - default=env('OS_USERNAME'), - help='Defaults to env[OS_USERNAME].') - parser.add_option('-I', '--os_username', - dest='os_username', - help=optparse.SUPPRESS_HELP) - - parser.add_option('--os-password', - dest='os_password', - default=env('OS_PASSWORD'), - help='Defaults to env[OS_PASSWORD].') - parser.add_option('-K', '--os_password', - dest='os_password', - help=optparse.SUPPRESS_HELP) - - parser.add_option('--os-region-name', - dest='os_region_name', - default=env('OS_REGION_NAME'), - help='Defaults to env[OS_REGION_NAME].') - parser.add_option('-R', '--os_region_name', - dest='os_region_name', - help=optparse.SUPPRESS_HELP) - - parser.add_option('--os-tenant-id', - dest='os_tenant_id', - default=env('OS_TENANT_ID'), - help='Defaults to env[OS_TENANT_ID].') - parser.add_option('--os_tenant_id', - dest='os_tenant_id', - help=optparse.SUPPRESS_HELP) - - parser.add_option('--os-tenant-name', - dest='os_tenant_name', - default=env('OS_TENANT_NAME'), - help='Defaults to env[OS_TENANT_NAME].') - parser.add_option('-T', '--os_tenant_name', - dest='os_tenant_name', - help=optparse.SUPPRESS_HELP) - - parser.add_option('--os-auth-url', - default=env('OS_AUTH_URL'), - help='Defaults to env[OS_AUTH_URL].') - parser.add_option('-N', '--os_auth_url', - dest='os_auth_url', - help=optparse.SUPPRESS_HELP) - - parser.add_option('-S', '--os_auth_strategy', dest="os_auth_strategy", - metavar="STRATEGY", - help="Authentication strategy (keystone or noauth).") - - -def parse_options(parser, cli_args): - """ - Returns the parsed CLI options, command to run and its arguments, merged - with any same-named options found in a configuration file - - :param parser: The option parser - """ - if not cli_args: - cli_args.append('-h') # Show options in usage output... - - (options, args) = parser.parse_args(cli_args) - - # HACK(sirp): Make the parser available to the print_help method - # print_help is a command, so it only accepts (options, args); we could - # one-off have it take (parser, options, args), however, for now, I think - # this little hack will suffice - options.__parser = parser - - if not args: - parser.print_usage() - sys.exit(0) - - command_name = args.pop(0) - command = lookup_command(parser, command_name) - - return (options, command, args) - - -def print_help(options, args): - """ - Print help specific to a command - """ - parser = options.__parser - - if not args: - parser.print_help() - else: - number_of_commands = len(args) - if number_of_commands == 1: - command_name = args.pop() - command = lookup_command(parser, command_name) - print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])}) - else: - sys.exit("Please specify one command") - - -def lookup_command(parser, command_name): - BASE_COMMANDS = {'help': print_help} - - CACHE_COMMANDS = { - 'list-cached': list_cached, - 'list-queued': list_queued, - 'queue-image': queue_image, - 'delete-cached-image': delete_cached_image, - 'delete-all-cached-images': delete_all_cached_images, - 'delete-queued-image': delete_queued_image, - 'delete-all-queued-images': delete_all_queued_images, - } - - commands = {} - for command_set in (BASE_COMMANDS, CACHE_COMMANDS): - commands.update(command_set) - - try: - command = commands[command_name] - except KeyError: - parser.print_usage() - sys.exit("Unknown command: %(cmd_name)s" % {'cmd_name': command_name}) - - return command - - -def user_confirm(prompt, default=False): - """Yes/No question dialog with user. - - :param prompt: question/statement to present to user (string) - :param default: boolean value to return if empty string - is received as response to prompt - - """ - if default: - prompt_default = "[Y/n]" - else: - prompt_default = "[y/N]" - - answer = input("%s %s " % (prompt, prompt_default)) - - if answer == "": - return default - else: - return answer.lower() in ("yes", "y") - - -def main(): - usage = """ -%prog [options] [args] - -Commands: - - help Output help for one of the commands below - - list-cached List all images currently cached - - list-queued List all images currently queued for caching - - queue-image Queue an image for caching - - delete-cached-image Purges an image from the cache - - delete-all-cached-images Removes all images from the cache - - delete-queued-image Deletes an image from the cache queue - - delete-all-queued-images Deletes all images from the cache queue -""" - - version_string = version.cached_version_string() - oparser = optparse.OptionParser(version=version_string, - usage=usage.strip()) - create_options(oparser) - (options, command, args) = parse_options(oparser, sys.argv[1:]) - - try: - start_time = time.time() - result = command(options, args) - end_time = time.time() - if options.verbose: - print("Completed in %-0.4f sec." % (end_time - start_time)) - sys.exit(result) - except (RuntimeError, NotImplementedError) as e: - sys.exit("ERROR: %s" % e) - -if __name__ == '__main__': - main() diff --git a/glance/cmd/cache_prefetcher.py b/glance/cmd/cache_prefetcher.py deleted file mode 100644 index 5310f7a3..00000000 --- a/glance/cmd/cache_prefetcher.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2011-2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Glance Image Cache Pre-fetcher - -This is meant to be run from the command line after queueing -images to be pretched. -""" - -import os -import sys - -# If ../glance/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): - sys.path.insert(0, possible_topdir) - -import glance_store -from oslo_log import log as logging - -from glance.common import config -from glance.image_cache import prefetcher - -CONF = config.CONF -logging.register_options(CONF) -CONF.set_default(name='use_stderr', default=True) - - -def main(): - try: - config.parse_cache_args() - logging.setup(CONF, 'glance') - - glance_store.register_opts(config.CONF) - glance_store.create_stores(config.CONF) - glance_store.verify_default_store() - - app = prefetcher.Prefetcher() - app.run() - except RuntimeError as e: - sys.exit("ERROR: %s" % e) - - -if __name__ == '__main__': - main() diff --git a/glance/cmd/cache_pruner.py b/glance/cmd/cache_pruner.py deleted file mode 100644 index 45d1bc96..00000000 --- a/glance/cmd/cache_pruner.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Glance Image Cache Pruner - -This is meant to be run as a periodic task, perhaps every half-hour. -""" - -import os -import sys - -from oslo_log import log as logging - -# If ../glance/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): - sys.path.insert(0, possible_topdir) - -from glance.common import config -from glance.image_cache import pruner - -CONF = config.CONF -logging.register_options(CONF) -CONF.set_default(name='use_stderr', default=True) - - -def main(): - try: - config.parse_cache_args() - logging.setup(CONF, 'glance') - - app = pruner.Pruner() - app.run() - except RuntimeError as e: - sys.exit("ERROR: %s" % e) diff --git a/glance/cmd/control.py b/glance/cmd/control.py deleted file mode 100644 index 64a26635..00000000 --- a/glance/cmd/control.py +++ /dev/null @@ -1,410 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Helper script for starting/stopping/reloading Glance server programs. -Thanks for some of the code, Swifties ;) -""" - -from __future__ import print_function -from __future__ import with_statement - -import argparse -import fcntl -import os -import resource -import signal -import subprocess -import sys -import tempfile -import time - -# If ../glance/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): - sys.path.insert(0, possible_topdir) - -from oslo_config import cfg -from oslo_utils import units -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.common import config -from glance.i18n import _ - -CONF = cfg.CONF - -ALL_COMMANDS = ['start', 'status', 'stop', 'shutdown', 'restart', - 'reload', 'force-reload'] -ALL_SERVERS = ['api', 'registry', 'scrubber'] -RELOAD_SERVERS = ['glance-api', 'glance-registry'] -GRACEFUL_SHUTDOWN_SERVERS = ['glance-api', 'glance-registry', - 'glance-scrubber'] -MAX_DESCRIPTORS = 32768 -MAX_MEMORY = 2 * units.Gi # 2 GB -USAGE = """%(prog)s [options] [CONFPATH] - -Where is one of: - - all, {0} - -And command is one of: - - {1} - -And CONFPATH is the optional configuration file to use.""".format( - ', '.join(ALL_SERVERS), ', '.join(ALL_COMMANDS)) - -exitcode = 0 - - -def gated_by(predicate): - def wrap(f): - def wrapped_f(*args): - if predicate: - return f(*args) - else: - return None - return wrapped_f - return wrap - - -def pid_files(server, pid_file): - pid_files = [] - if pid_file: - if os.path.exists(os.path.abspath(pid_file)): - pid_files = [os.path.abspath(pid_file)] - else: - if os.path.exists('/var/run/glance/%s.pid' % server): - pid_files = ['/var/run/glance/%s.pid' % server] - for pid_file in pid_files: - pid = int(open(pid_file).read().strip()) - yield pid_file, pid - - -def do_start(verb, pid_file, server, args): - if verb != 'Respawn' and pid_file == CONF.pid_file: - for pid_file, pid in pid_files(server, pid_file): - if os.path.exists('/proc/%s' % pid): - print(_("%(serv)s appears to already be running: %(pid)s") % - {'serv': server, 'pid': pid_file}) - return - else: - print(_("Removing stale pid file %s") % pid_file) - os.unlink(pid_file) - - try: - resource.setrlimit(resource.RLIMIT_NOFILE, - (MAX_DESCRIPTORS, MAX_DESCRIPTORS)) - resource.setrlimit(resource.RLIMIT_DATA, - (MAX_MEMORY, MAX_MEMORY)) - except ValueError: - print(_('Unable to increase file descriptor limit. ' - 'Running as non-root?')) - os.environ['PYTHON_EGG_CACHE'] = '/tmp' - - def write_pid_file(pid_file, pid): - with open(pid_file, 'w') as fp: - fp.write('%d\n' % pid) - - def redirect_to_null(fds): - with open(os.devnull, 'r+b') as nullfile: - for desc in fds: # close fds - try: - os.dup2(nullfile.fileno(), desc) - except OSError: - pass - - def redirect_to_syslog(fds, server): - log_cmd = 'logger' - log_cmd_params = '-t "%s[%d]"' % (server, os.getpid()) - process = subprocess.Popen([log_cmd, log_cmd_params], - stdin=subprocess.PIPE) - for desc in fds: # pipe to logger command - try: - os.dup2(process.stdin.fileno(), desc) - except OSError: - pass - - def redirect_stdio(server, capture_output): - input = [sys.stdin.fileno()] - output = [sys.stdout.fileno(), sys.stderr.fileno()] - - redirect_to_null(input) - if capture_output: - redirect_to_syslog(output, server) - else: - redirect_to_null(output) - - @gated_by(CONF.capture_output) - def close_stdio_on_exec(): - fds = [sys.stdin.fileno(), sys.stdout.fileno(), sys.stderr.fileno()] - for desc in fds: # set close on exec flag - fcntl.fcntl(desc, fcntl.F_SETFD, fcntl.FD_CLOEXEC) - - def launch(pid_file, conf_file=None, capture_output=False, await_time=0): - args = [server] - if conf_file: - args += ['--config-file', conf_file] - msg = (_('%(verb)sing %(serv)s with %(conf)s') % - {'verb': verb, 'serv': server, 'conf': conf_file}) - else: - msg = (_('%(verb)sing %(serv)s') % {'verb': verb, 'serv': server}) - print(msg) - - close_stdio_on_exec() - - pid = os.fork() - if pid == 0: - os.setsid() - redirect_stdio(server, capture_output) - try: - os.execlp('%s' % server, *args) - except OSError as e: - msg = (_('unable to launch %(serv)s. Got error: %(e)s') % - {'serv': server, 'e': e}) - sys.exit(msg) - sys.exit(0) - else: - write_pid_file(pid_file, pid) - await_child(pid, await_time) - return pid - - @gated_by(CONF.await_child) - def await_child(pid, await_time): - bail_time = time.time() + await_time - while time.time() < bail_time: - reported_pid, status = os.waitpid(pid, os.WNOHANG) - if reported_pid == pid: - global exitcode - exitcode = os.WEXITSTATUS(status) - break - time.sleep(0.05) - - conf_file = None - if args and os.path.exists(args[0]): - conf_file = os.path.abspath(os.path.expanduser(args[0])) - - return launch(pid_file, conf_file, CONF.capture_output, CONF.await_child) - - -def do_check_status(pid_file, server): - if os.path.exists(pid_file): - with open(pid_file, 'r') as pidfile: - pid = pidfile.read().strip() - print(_("%(serv)s (pid %(pid)s) is running...") % - {'serv': server, 'pid': pid}) - else: - print(_("%s is stopped") % server) - - -def get_pid_file(server, pid_file): - pid_file = (os.path.abspath(pid_file) if pid_file else - '/var/run/glance/%s.pid' % server) - dir, file = os.path.split(pid_file) - - if not os.path.exists(dir): - try: - os.makedirs(dir) - except OSError: - pass - - if not os.access(dir, os.W_OK): - fallback = os.path.join(tempfile.mkdtemp(), '%s.pid' % server) - msg = (_('Unable to create pid file %(pid)s. Running as non-root?\n' - 'Falling back to a temp file, you can stop %(service)s ' - 'service using:\n' - ' %(file)s %(server)s stop --pid-file %(fb)s') % - {'pid': pid_file, - 'service': server, - 'file': __file__, - 'server': server, - 'fb': fallback}) - print(msg) - pid_file = fallback - - return pid_file - - -def do_reload(pid_file, server): - if server not in RELOAD_SERVERS: - msg = (_('Reload of %(serv)s not supported') % {'serv': server}) - sys.exit(msg) - - pid = None - if os.path.exists(pid_file): - with open(pid_file, 'r') as pidfile: - pid = int(pidfile.read().strip()) - else: - msg = (_('Server %(serv)s is stopped') % {'serv': server}) - sys.exit(msg) - - sig = signal.SIGHUP - try: - print(_('Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)') - % {'serv': server, 'pid': pid, 'sig': sig}) - os.kill(pid, sig) - except OSError: - print(_("Process %d not running") % pid) - - -def do_stop(server, args, graceful=False): - if graceful and server in GRACEFUL_SHUTDOWN_SERVERS: - sig = signal.SIGHUP - else: - sig = signal.SIGTERM - - did_anything = False - pfiles = pid_files(server, CONF.pid_file) - for pid_file, pid in pfiles: - did_anything = True - try: - os.unlink(pid_file) - except OSError: - pass - try: - print(_('Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)') - % {'serv': server, 'pid': pid, 'sig': sig}) - os.kill(pid, sig) - except OSError: - print(_("Process %d not running") % pid) - for pid_file, pid in pfiles: - for _junk in range(150): # 15 seconds - if not os.path.exists('/proc/%s' % pid): - break - time.sleep(0.1) - else: - print(_('Waited 15 seconds for pid %(pid)s (%(file)s) to die;' - ' giving up') % {'pid': pid, 'file': pid_file}) - if not did_anything: - print(_('%s is already stopped') % server) - - -def add_command_parsers(subparsers): - cmd_parser = argparse.ArgumentParser(add_help=False) - cmd_subparsers = cmd_parser.add_subparsers(dest='command') - for cmd in ALL_COMMANDS: - parser = cmd_subparsers.add_parser(cmd) - parser.add_argument('args', nargs=argparse.REMAINDER) - - for server in ALL_SERVERS: - full_name = 'glance-' + server - - parser = subparsers.add_parser(server, parents=[cmd_parser]) - parser.set_defaults(servers=[full_name]) - - parser = subparsers.add_parser(full_name, parents=[cmd_parser]) - parser.set_defaults(servers=[full_name]) - - parser = subparsers.add_parser('all', parents=[cmd_parser]) - parser.set_defaults(servers=['glance-' + s for s in ALL_SERVERS]) - - -def main(): - global exitcode - - opts = [ - cfg.SubCommandOpt('server', - title='Server types', - help='Available server types', - handler=add_command_parsers), - cfg.StrOpt('pid-file', - metavar='PATH', - help='File to use as pid file. Default: ' - '/var/run/glance/$server.pid.'), - cfg.IntOpt('await-child', - metavar='DELAY', - default=0, - help='Period to wait for service death ' - 'in order to report exit code ' - '(default is to not wait at all).'), - cfg.BoolOpt('capture-output', - default=False, - help='Capture stdout/err in syslog ' - 'instead of discarding it.'), - cfg.BoolOpt('respawn', - default=False, - help='Restart service on unexpected death.'), - ] - CONF.register_cli_opts(opts) - - config.parse_args(usage=USAGE) - - @gated_by(CONF.await_child) - @gated_by(CONF.respawn) - def mutually_exclusive(): - sys.stderr.write('--await-child and --respawn are mutually exclusive') - sys.exit(1) - - mutually_exclusive() - - @gated_by(CONF.respawn) - def anticipate_respawn(children): - while children: - pid, status = os.wait() - if pid in children: - (pid_file, server, args) = children.pop(pid) - running = os.path.exists(pid_file) - one_second_ago = time.time() - 1 - bouncing = (running and - os.path.getmtime(pid_file) >= one_second_ago) - if running and not bouncing: - args = (pid_file, server, args) - new_pid = do_start('Respawn', *args) - children[new_pid] = args - else: - rsn = 'bouncing' if bouncing else 'deliberately stopped' - print(_('Suppressed respawn as %(serv)s was %(rsn)s.') - % {'serv': server, 'rsn': rsn}) - - if CONF.server.command == 'start': - children = {} - for server in CONF.server.servers: - pid_file = get_pid_file(server, CONF.pid_file) - args = (pid_file, server, CONF.server.args) - pid = do_start('Start', *args) - children[pid] = args - - anticipate_respawn(children) - - if CONF.server.command == 'status': - for server in CONF.server.servers: - pid_file = get_pid_file(server, CONF.pid_file) - do_check_status(pid_file, server) - - if CONF.server.command == 'stop': - for server in CONF.server.servers: - do_stop(server, CONF.server.args) - - if CONF.server.command == 'shutdown': - for server in CONF.server.servers: - do_stop(server, CONF.server.args, graceful=True) - - if CONF.server.command == 'restart': - for server in CONF.server.servers: - do_stop(server, CONF.server.args) - for server in CONF.server.servers: - pid_file = get_pid_file(server, CONF.pid_file) - do_start('Restart', pid_file, server, CONF.server.args) - - if CONF.server.command in ('reload', 'force-reload'): - for server in CONF.server.servers: - pid_file = get_pid_file(server, CONF.pid_file) - do_reload(pid_file, server) - - sys.exit(exitcode) diff --git a/glance/cmd/manage.py b/glance/cmd/manage.py deleted file mode 100644 index 0daac4e5..00000000 --- a/glance/cmd/manage.py +++ /dev/null @@ -1,470 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Glance Management Utility -""" - -from __future__ import print_function - -# FIXME(sirp): When we have glance-admin we can consider merging this into it -# Perhaps for consistency with Nova, we would then rename glance-admin -> -# glance-manage (or the other way around) - -import os -import sys -import time - -# If ../glance/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): - sys.path.insert(0, possible_topdir) - -from alembic import command as alembic_command - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_log import log as logging -from oslo_utils import encodeutils -import six - -from glance.common import config -from glance.common import exception -from glance import context -from glance.db import migration as db_migration -from glance.db.sqlalchemy import alembic_migrations -from glance.db.sqlalchemy.alembic_migrations import data_migrations -from glance.db.sqlalchemy import api as db_api -from glance.db.sqlalchemy import metadata -from glance.i18n import _ - - -CONF = cfg.CONF - - -# Decorators for actions -def args(*args, **kwargs): - def _decorator(func): - func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) - return func - return _decorator - - -class DbCommands(object): - """Class for managing the db""" - - def __init__(self): - pass - - def version(self): - """Print database's current migration level""" - current_heads = alembic_migrations.get_current_alembic_heads() - if current_heads: - # Migrations are managed by alembic - for head in current_heads: - print(head) - else: - # Migrations are managed by legacy versioning scheme - print(_('Database is either not under migration control or under ' - 'legacy migration control, please run ' - '"glance-manage db sync" to place the database under ' - 'alembic migration control.')) - - @args('--version', metavar='', help='Database version') - def upgrade(self, version=db_migration.LATEST_REVISION): - """Upgrade the database's migration level""" - self.sync(version) - - @args('--version', metavar='', help='Database version') - def version_control(self, version=db_migration.ALEMBIC_INIT_VERSION): - """Place a database under migration control""" - - if version is None: - version = db_migration.ALEMBIC_INIT_VERSION - - a_config = alembic_migrations.get_alembic_config() - alembic_command.stamp(a_config, version) - print(_("Placed database under migration control at " - "revision:"), version) - - @args('--version', metavar='', help='Database version') - def sync(self, version=db_migration.LATEST_REVISION): - """ - Place an existing database under migration control and upgrade it. - """ - if version is None: - version = db_migration.LATEST_REVISION - - alembic_migrations.place_database_under_alembic_control() - - a_config = alembic_migrations.get_alembic_config() - alembic_command.upgrade(a_config, version) - heads = alembic_migrations.get_current_alembic_heads() - if heads is None: - raise exception.GlanceException("Database sync failed") - revs = ", ".join(heads) - if version == 'heads': - print(_("Upgraded database, current revision(s):"), revs) - else: - print(_('Upgraded database to: %(v)s, current revision(s): %(r)s') - % {'v': version, 'r': revs}) - - def expand(self): - """Run the expansion phase of a rolling upgrade procedure.""" - engine = db_api.get_engine() - if engine.engine.name != 'mysql': - sys.exit(_('Rolling upgrades are currently supported only for ' - 'MySQL')) - - expand_head = alembic_migrations.get_alembic_branch_head( - db_migration.EXPAND_BRANCH) - if not expand_head: - sys.exit(_('Database expansion failed. Couldn\'t find head ' - 'revision of expand branch.')) - - self.sync(version=expand_head) - - curr_heads = alembic_migrations.get_current_alembic_heads() - if expand_head not in curr_heads: - sys.exit(_('Database expansion failed. Database expansion should ' - 'have brought the database version up to "%(e_rev)s" ' - 'revision. But, current revisions are: %(curr_revs)s ') - % {'e_rev': expand_head, 'curr_revs': curr_heads}) - - def contract(self): - """Run the contraction phase of a rolling upgrade procedure.""" - engine = db_api.get_engine() - if engine.engine.name != 'mysql': - sys.exit(_('Rolling upgrades are currently supported only for ' - 'MySQL')) - - contract_head = alembic_migrations.get_alembic_branch_head( - db_migration.CONTRACT_BRANCH) - if not contract_head: - sys.exit(_('Database contraction failed. Couldn\'t find head ' - 'revision of contract branch.')) - - curr_heads = alembic_migrations.get_current_alembic_heads() - expand_head = alembic_migrations.get_alembic_branch_head( - db_migration.EXPAND_BRANCH) - if expand_head not in curr_heads: - sys.exit(_('Database contraction did not run. Database ' - 'contraction cannot be run before database expansion. ' - 'Run database expansion first using ' - '"glance-manage db expand"')) - - if data_migrations.has_pending_migrations(db_api.get_engine()): - sys.exit(_('Database contraction did not run. Database ' - 'contraction cannot be run before data migration is ' - 'complete. Run data migration using "glance-manage db ' - 'migrate".')) - - self.sync(version=contract_head) - - curr_heads = alembic_migrations.get_current_alembic_heads() - if contract_head not in curr_heads: - sys.exit(_('Database contraction failed. Database contraction ' - 'should have brought the database version up to ' - '"%(e_rev)s" revision. But, current revisions are: ' - '%(curr_revs)s ') % {'e_rev': expand_head, - 'curr_revs': curr_heads}) - - def migrate(self): - engine = db_api.get_engine() - if engine.engine.name != 'mysql': - sys.exit(_('Rolling upgrades are currently supported only for ' - 'MySQL')) - - curr_heads = alembic_migrations.get_current_alembic_heads() - expand_head = alembic_migrations.get_alembic_branch_head( - db_migration.EXPAND_BRANCH) - if expand_head not in curr_heads: - sys.exit(_('Data migration did not run. Data migration cannot be ' - 'run before database expansion. Run database ' - 'expansion first using "glance-manage db expand"')) - - rows_migrated = data_migrations.migrate(db_api.get_engine()) - print(_('Migrated %s rows') % rows_migrated) - - @args('--path', metavar='', help='Path to the directory or file ' - 'where json metadata is stored') - @args('--merge', action='store_true', - help='Merge files with data that is in the database. By default it ' - 'prefers existing data over new. This logic can be changed by ' - 'combining --merge option with one of these two options: ' - '--prefer_new or --overwrite.') - @args('--prefer_new', action='store_true', - help='Prefer new metadata over existing. Existing metadata ' - 'might be overwritten. Needs to be combined with --merge ' - 'option.') - @args('--overwrite', action='store_true', - help='Drop and rewrite metadata. Needs to be combined with --merge ' - 'option') - def load_metadefs(self, path=None, merge=False, - prefer_new=False, overwrite=False): - """Load metadefinition json files to database""" - metadata.db_load_metadefs(db_api.get_engine(), path, merge, - prefer_new, overwrite) - - def unload_metadefs(self): - """Unload metadefinitions from database""" - metadata.db_unload_metadefs(db_api.get_engine()) - - @args('--path', metavar='', help='Path to the directory where ' - 'json metadata files should be ' - 'saved.') - def export_metadefs(self, path=None): - """Export metadefinitions data from database to files""" - metadata.db_export_metadefs(db_api.get_engine(), - path) - - @args('--age_in_days', type=int, - help='Purge deleted rows older than age in days') - @args('--max_rows', type=int, - help='Limit number of records to delete') - def purge(self, age_in_days=30, max_rows=100): - """Purge deleted rows older than a given age from glance tables.""" - try: - age_in_days = int(age_in_days) - except ValueError: - sys.exit(_("Invalid int value for age_in_days: " - "%(age_in_days)s") % {'age_in_days': age_in_days}) - - try: - max_rows = int(max_rows) - except ValueError: - sys.exit(_("Invalid int value for max_rows: " - "%(max_rows)s") % {'max_rows': max_rows}) - - if age_in_days < 0: - sys.exit(_("Must supply a non-negative value for age.")) - if age_in_days >= (int(time.time()) / 86400): - sys.exit(_("Maximal age is count of days since epoch.")) - if max_rows < 1: - sys.exit(_("Minimal rows limit is 1.")) - ctx = context.get_admin_context(show_deleted=True) - - try: - db_api.purge_deleted_rows(ctx, age_in_days, max_rows) - except exception.Invalid as exc: - sys.exit(exc.msg) - except db_exc.DBReferenceError: - sys.exit(_("Purge command failed, check glance-manage" - " logs for more details.")) - - -class DbLegacyCommands(object): - """Class for managing the db using legacy commands""" - - def __init__(self, command_object): - self.command_object = command_object - - def version(self): - self.command_object.version() - - def upgrade(self, version=db_migration.LATEST_REVISION): - self.command_object.upgrade(CONF.command.version) - - def version_control(self, version=db_migration.ALEMBIC_INIT_VERSION): - self.command_object.version_control(CONF.command.version) - - def sync(self, version=db_migration.LATEST_REVISION): - self.command_object.sync(CONF.command.version) - - def expand(self): - self.command_object.expand() - - def contract(self): - self.command_object.contract() - - def migrate(self): - self.command_object.migrate() - - def load_metadefs(self, path=None, merge=False, - prefer_new=False, overwrite=False): - self.command_object.load_metadefs(CONF.command.path, - CONF.command.merge, - CONF.command.prefer_new, - CONF.command.overwrite) - - def unload_metadefs(self): - self.command_object.unload_metadefs() - - def export_metadefs(self, path=None): - self.command_object.export_metadefs(CONF.command.path) - - -def add_legacy_command_parsers(command_object, subparsers): - - legacy_command_object = DbLegacyCommands(command_object) - - parser = subparsers.add_parser('db_version') - parser.set_defaults(action_fn=legacy_command_object.version) - parser.set_defaults(action='db_version') - - parser = subparsers.add_parser('db_upgrade') - parser.set_defaults(action_fn=legacy_command_object.upgrade) - parser.add_argument('version', nargs='?') - parser.set_defaults(action='db_upgrade') - - parser = subparsers.add_parser('db_version_control') - parser.set_defaults(action_fn=legacy_command_object.version_control) - parser.add_argument('version', nargs='?') - parser.set_defaults(action='db_version_control') - - parser = subparsers.add_parser('db_sync') - parser.set_defaults(action_fn=legacy_command_object.sync) - parser.add_argument('version', nargs='?') - parser.set_defaults(action='db_sync') - - parser = subparsers.add_parser('db_expand') - parser.set_defaults(action_fn=legacy_command_object.expand) - parser.set_defaults(action='db_expand') - - parser = subparsers.add_parser('db_contract') - parser.set_defaults(action_fn=legacy_command_object.contract) - parser.set_defaults(action='db_contract') - - parser = subparsers.add_parser('db_migrate') - parser.set_defaults(action_fn=legacy_command_object.migrate) - parser.set_defaults(action='db_migrate') - - parser = subparsers.add_parser('db_load_metadefs') - parser.set_defaults(action_fn=legacy_command_object.load_metadefs) - parser.add_argument('path', nargs='?') - parser.add_argument('merge', nargs='?') - parser.add_argument('prefer_new', nargs='?') - parser.add_argument('overwrite', nargs='?') - parser.set_defaults(action='db_load_metadefs') - - parser = subparsers.add_parser('db_unload_metadefs') - parser.set_defaults(action_fn=legacy_command_object.unload_metadefs) - parser.set_defaults(action='db_unload_metadefs') - - parser = subparsers.add_parser('db_export_metadefs') - parser.set_defaults(action_fn=legacy_command_object.export_metadefs) - parser.add_argument('path', nargs='?') - parser.set_defaults(action='db_export_metadefs') - - -def add_command_parsers(subparsers): - command_object = DbCommands() - - parser = subparsers.add_parser('db') - parser.set_defaults(command_object=command_object) - - category_subparsers = parser.add_subparsers(dest='action') - - for (action, action_fn) in methods_of(command_object): - parser = category_subparsers.add_parser(action) - - action_kwargs = [] - for args, kwargs in getattr(action_fn, 'args', []): - # FIXME(basha): hack to assume dest is the arg name without - # the leading hyphens if no dest is supplied - kwargs.setdefault('dest', args[0][2:]) - if kwargs['dest'].startswith('action_kwarg_'): - action_kwargs.append( - kwargs['dest'][len('action_kwarg_'):]) - else: - action_kwargs.append(kwargs['dest']) - kwargs['dest'] = 'action_kwarg_' + kwargs['dest'] - - parser.add_argument(*args, **kwargs) - - parser.set_defaults(action_fn=action_fn) - parser.set_defaults(action_kwargs=action_kwargs) - - parser.add_argument('action_args', nargs='*') - - add_legacy_command_parsers(command_object, subparsers) - - -command_opt = cfg.SubCommandOpt('command', - title='Commands', - help='Available commands', - handler=add_command_parsers) - - -CATEGORIES = { - 'db': DbCommands, -} - - -def methods_of(obj): - """Get all callable methods of an object that don't start with underscore - - returns a list of tuples of the form (method_name, method) - """ - result = [] - for i in dir(obj): - if callable(getattr(obj, i)) and not i.startswith('_'): - result.append((i, getattr(obj, i))) - return result - - -def main(): - CONF.register_cli_opt(command_opt) - if len(sys.argv) < 2: - script_name = sys.argv[0] - print("%s category action []" % script_name) - print(_("Available categories:")) - for category in CATEGORIES: - print(_("\t%s") % category) - sys.exit(2) - - try: - logging.register_options(CONF) - CONF.set_default(name='use_stderr', default=True) - cfg_files = cfg.find_config_files(project='glance', - prog='glance-registry') - cfg_files.extend(cfg.find_config_files(project='glance', - prog='glance-api')) - cfg_files.extend(cfg.find_config_files(project='glance', - prog='glance-manage')) - config.parse_args(default_config_files=cfg_files) - config.set_config_defaults() - logging.setup(CONF, 'glance') - except RuntimeError as e: - sys.exit("ERROR: %s" % e) - - try: - if CONF.command.action.startswith('db'): - return CONF.command.action_fn() - else: - func_kwargs = {} - for k in CONF.command.action_kwargs: - v = getattr(CONF.command, 'action_kwarg_' + k) - if v is None: - continue - if isinstance(v, six.string_types): - v = encodeutils.safe_decode(v) - func_kwargs[k] = v - func_args = [encodeutils.safe_decode(arg) - for arg in CONF.command.action_args] - return CONF.command.action_fn(*func_args, **func_kwargs) - except exception.GlanceException as e: - sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) - - -if __name__ == '__main__': - main() diff --git a/glance/cmd/registry.py b/glance/cmd/registry.py deleted file mode 100644 index b5e3c5a7..00000000 --- a/glance/cmd/registry.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Reference implementation server for Glance Registry -""" - -import os -import sys - -import eventlet -from oslo_utils import encodeutils - -# Monkey patch socket and time -eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) - -# If ../glance/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): - sys.path.insert(0, possible_topdir) - -from oslo_config import cfg -from oslo_log import log as logging -import osprofiler.initializer - -from glance.common import config -from glance.common import wsgi -from glance import notifier - -CONF = cfg.CONF -CONF.import_group("profiler", "glance.common.wsgi") -logging.register_options(CONF) - - -def main(): - try: - config.parse_args() - config.set_config_defaults() - wsgi.set_eventlet_hub() - logging.setup(CONF, 'glance') - notifier.set_defaults() - - if CONF.profiler.enabled: - osprofiler.initializer.init_from_conf( - conf=CONF, - context={}, - project="glance", - service="registry", - host=CONF.bind_host - ) - - server = wsgi.Server() - server.start(config.load_paste_app('glance-registry'), - default_port=9191) - server.wait() - except RuntimeError as e: - sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) - - -if __name__ == '__main__': - main() diff --git a/glance/cmd/replicator.py b/glance/cmd/replicator.py deleted file mode 100644 index 771f9ad6..00000000 --- a/glance/cmd/replicator.py +++ /dev/null @@ -1,781 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 Michael Still and Canonical Inc -# Copyright 2014 SoftLayer Technologies, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -import os -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import uuidutils -import six -from six.moves import http_client as http -import six.moves.urllib.parse as urlparse -from webob import exc - -from glance.common import config -from glance.common import exception -from glance.common import utils -from glance.i18n import _, _LE, _LI, _LW - -LOG = logging.getLogger(__name__) - - -# NOTE: positional arguments will be parsed before until -# this bug is corrected https://bugs.launchpad.net/oslo.config/+bug/1392428 -cli_opts = [ - cfg.IntOpt('chunksize', - short='c', - default=65536, - help="Amount of data to transfer per HTTP write."), - cfg.StrOpt('dontreplicate', - short='D', - default=('created_at date deleted_at location updated_at'), - help="List of fields to not replicate."), - cfg.BoolOpt('metaonly', - short='m', - default=False, - help="Only replicate metadata, not images."), - cfg.StrOpt('token', - short='t', - default='', - help=("Pass in your authentication token if you have " - "one. If you use this option the same token is " - "used for both the source and the target.")), - cfg.StrOpt('mastertoken', - short='M', - default='', - deprecated_since='Pike', - deprecated_reason='use sourcetoken instead', - help=("Pass in your authentication token if you have " - "one. This is the token used for the source system.")), - cfg.StrOpt('slavetoken', - short='S', - default='', - deprecated_since='Pike', - deprecated_reason='use targettoken instead', - help=("Pass in your authentication token if you have " - "one. This is the token used for the target system.")), - cfg.StrOpt('command', - positional=True, - help="Command to be given to replicator"), - cfg.MultiStrOpt('args', - positional=True, - help="Arguments for the command"), -] - -CONF = cfg.CONF -CONF.register_cli_opts(cli_opts) - -# TODO(stevelle) Remove deprecated opts some time after Queens -CONF.register_opt( - cfg.StrOpt('sourcetoken', - default='', - deprecated_opts=[cfg.DeprecatedOpt('mastertoken')], - help=("Pass in your authentication token if you have " - "one. This is the token used for the source."))) -CONF.register_opt( - cfg.StrOpt('targettoken', - default='', - deprecated_opts=[cfg.DeprecatedOpt('slavetoken')], - help=("Pass in your authentication token if you have " - "one. This is the token used for the target."))) - -logging.register_options(CONF) -CONF.set_default(name='use_stderr', default=True) - -# If ../glance/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): - sys.path.insert(0, possible_topdir) - - -COMMANDS = """Commands: - - help Output help for one of the commands below - - compare What is missing from the target glance? - dump Dump the contents of a glance instance to local disk. - livecopy Load the contents of one glance instance into another. - load Load the contents of a local directory into glance. - size Determine the size of a glance instance if dumped to disk. -""" - - -IMAGE_ALREADY_PRESENT_MESSAGE = _('The image %s is already present on ' - 'the target, but our check for it did ' - 'not find it. This indicates that we ' - 'do not have permissions to see all ' - 'the images on the target server.') - - -class ImageService(object): - def __init__(self, conn, auth_token): - """Initialize the ImageService. - - conn: a http_client.HTTPConnection to the glance server - auth_token: authentication token to pass in the x-auth-token header - """ - self.auth_token = auth_token - self.conn = conn - - def _http_request(self, method, url, headers, body, - ignore_result_body=False): - """Perform an HTTP request against the server. - - method: the HTTP method to use - url: the URL to request (not including server portion) - headers: headers for the request - body: body to send with the request - ignore_result_body: the body of the result will be ignored - - Returns: a http_client response object - """ - if self.auth_token: - headers.setdefault('x-auth-token', self.auth_token) - - LOG.debug('Request: %(method)s http://%(server)s:%(port)s' - '%(url)s with headers %(headers)s', - {'method': method, - 'server': self.conn.host, - 'port': self.conn.port, - 'url': url, - 'headers': repr(headers)}) - self.conn.request(method, url, body, headers) - - response = self.conn.getresponse() - headers = self._header_list_to_dict(response.getheaders()) - code = response.status - code_description = http.responses[code] - LOG.debug('Response: %(code)s %(status)s %(headers)s', - {'code': code, - 'status': code_description, - 'headers': repr(headers)}) - - if code == http.BAD_REQUEST: - raise exc.HTTPBadRequest( - explanation=response.read()) - - if code == http.INTERNAL_SERVER_ERROR: - raise exc.HTTPInternalServerError( - explanation=response.read()) - - if code == http.UNAUTHORIZED: - raise exc.HTTPUnauthorized( - explanation=response.read()) - - if code == http.FORBIDDEN: - raise exc.HTTPForbidden( - explanation=response.read()) - - if code == http.CONFLICT: - raise exc.HTTPConflict( - explanation=response.read()) - - if ignore_result_body: - # NOTE: because we are pipelining requests through a single HTTP - # connection, http_client requires that we read the response body - # before we can make another request. If the caller knows they - # don't care about the body, they can ask us to do that for them. - response.read() - return response - - def get_images(self): - """Return a detailed list of images. - - Yields a series of images as dicts containing metadata. - """ - params = {'is_public': None} - - while True: - url = '/v1/images/detail' - query = urlparse.urlencode(params) - if query: - url += '?%s' % query - - response = self._http_request('GET', url, {}, '') - result = jsonutils.loads(response.read()) - - if not result or 'images' not in result or not result['images']: - return - for image in result.get('images', []): - params['marker'] = image['id'] - yield image - - def get_image(self, image_uuid): - """Fetch image data from glance. - - image_uuid: the id of an image - - Returns: a http_client Response object where the body is the image. - """ - url = '/v1/images/%s' % image_uuid - return self._http_request('GET', url, {}, '') - - @staticmethod - def _header_list_to_dict(headers): - """Expand a list of headers into a dictionary. - - headers: a list of [(key, value), (key, value), (key, value)] - - Returns: a dictionary representation of the list - """ - d = {} - for (header, value) in headers: - if header.startswith('x-image-meta-property-'): - prop = header.replace('x-image-meta-property-', '') - d.setdefault('properties', {}) - d['properties'][prop] = value - else: - d[header.replace('x-image-meta-', '')] = value - return d - - def get_image_meta(self, image_uuid): - """Return the metadata for a single image. - - image_uuid: the id of an image - - Returns: image metadata as a dictionary - """ - url = '/v1/images/%s' % image_uuid - response = self._http_request('HEAD', url, {}, '', - ignore_result_body=True) - return self._header_list_to_dict(response.getheaders()) - - @staticmethod - def _dict_to_headers(d): - """Convert a dictionary into one suitable for a HTTP request. - - d: a dictionary - - Returns: the same dictionary, with x-image-meta added to every key - """ - h = {} - for key in d: - if key == 'properties': - for subkey in d[key]: - if d[key][subkey] is None: - h['x-image-meta-property-%s' % subkey] = '' - else: - h['x-image-meta-property-%s' % subkey] = d[key][subkey] - - else: - h['x-image-meta-%s' % key] = d[key] - return h - - def add_image(self, image_meta, image_data): - """Upload an image. - - image_meta: image metadata as a dictionary - image_data: image data as a object with a read() method - - Returns: a tuple of (http response headers, http response body) - """ - - url = '/v1/images' - headers = self._dict_to_headers(image_meta) - headers['Content-Type'] = 'application/octet-stream' - headers['Content-Length'] = int(image_meta['size']) - - response = self._http_request('POST', url, headers, image_data) - headers = self._header_list_to_dict(response.getheaders()) - - LOG.debug('Image post done') - body = response.read() - return headers, body - - def add_image_meta(self, image_meta): - """Update image metadata. - - image_meta: image metadata as a dictionary - - Returns: a tuple of (http response headers, http response body) - """ - - url = '/v1/images/%s' % image_meta['id'] - headers = self._dict_to_headers(image_meta) - headers['Content-Type'] = 'application/octet-stream' - - response = self._http_request('PUT', url, headers, '') - headers = self._header_list_to_dict(response.getheaders()) - - LOG.debug('Image post done') - body = response.read() - return headers, body - - -def get_image_service(): - """Get a copy of the image service. - - This is done like this to make it easier to mock out ImageService. - """ - return ImageService - - -def _human_readable_size(num, suffix='B'): - for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: - if abs(num) < 1024.0: - return "%3.1f %s%s" % (num, unit, suffix) - num /= 1024.0 - return "%.1f %s%s" % (num, 'Yi', suffix) - - -def replication_size(options, args): - """%(prog)s size - - Determine the size of a glance instance if dumped to disk. - - server:port: the location of the glance instance. - """ - - # Make sure server info is provided - if args is None or len(args) < 1: - raise TypeError(_("Too few arguments.")) - - server, port = utils.parse_valid_host_port(args.pop()) - - total_size = 0 - count = 0 - - imageservice = get_image_service() - client = imageservice(http.HTTPConnection(server, port), - options.targettoken) - for image in client.get_images(): - LOG.debug('Considering image: %(image)s', {'image': image}) - if image['status'] == 'active': - total_size += int(image['size']) - count += 1 - - print(_('Total size is %(size)d bytes (%(human_size)s) across ' - '%(img_count)d images') % - {'size': total_size, - 'human_size': _human_readable_size(total_size), - 'img_count': count}) - - -def replication_dump(options, args): - """%(prog)s dump - - Dump the contents of a glance instance to local disk. - - server:port: the location of the glance instance. - path: a directory on disk to contain the data. - """ - - # Make sure server and path are provided - if len(args) < 2: - raise TypeError(_("Too few arguments.")) - - path = args.pop() - server, port = utils.parse_valid_host_port(args.pop()) - - imageservice = get_image_service() - client = imageservice(http.HTTPConnection(server, port), - options.sourcetoken) - for image in client.get_images(): - LOG.debug('Considering: %(image_id)s (%(image_name)s) ' - '(%(image_size)d bytes)', - {'image_id': image['id'], - 'image_name': image.get('name', '--unnamed--'), - 'image_size': image['size']}) - - data_path = os.path.join(path, image['id']) - data_filename = data_path + '.img' - if not os.path.exists(data_path): - LOG.info(_LI('Storing: %(image_id)s (%(image_name)s)' - ' (%(image_size)d bytes) in %(data_filename)s'), - {'image_id': image['id'], - 'image_name': image.get('name', '--unnamed--'), - 'image_size': image['size'], - 'data_filename': data_filename}) - - # Dump glance information - if six.PY3: - f = open(data_path, 'w', encoding='utf-8') - else: - f = open(data_path, 'w') - with f: - f.write(jsonutils.dumps(image)) - - if image['status'] == 'active' and not options.metaonly: - # Now fetch the image. The metadata returned in headers here - # is the same as that which we got from the detailed images - # request earlier, so we can ignore it here. Note that we also - # only dump active images. - LOG.debug('Image %s is active', image['id']) - image_response = client.get_image(image['id']) - with open(data_filename, 'wb') as f: - while True: - chunk = image_response.read(options.chunksize) - if not chunk: - break - f.write(chunk) - - -def _dict_diff(a, b): - """A one way dictionary diff. - - a: a dictionary - b: a dictionary - - Returns: True if the dictionaries are different - """ - # Only things the source has which the target lacks matter - if set(a.keys()) - set(b.keys()): - LOG.debug('metadata diff -- source has extra keys: %(keys)s', - {'keys': ' '.join(set(a.keys()) - set(b.keys()))}) - return True - - for key in a: - if str(a[key]) != str(b[key]): - LOG.debug('metadata diff -- value differs for key ' - '%(key)s: source "%(source_value)s" vs ' - 'target "%(target_value)s"', - {'key': key, - 'source_value': a[key], - 'target_value': b[key]}) - return True - - return False - - -def replication_load(options, args): - """%(prog)s load - - Load the contents of a local directory into glance. - - server:port: the location of the glance instance. - path: a directory on disk containing the data. - """ - - # Make sure server and path are provided - if len(args) < 2: - raise TypeError(_("Too few arguments.")) - - path = args.pop() - server, port = utils.parse_valid_host_port(args.pop()) - - imageservice = get_image_service() - client = imageservice(http.HTTPConnection(server, port), - options.targettoken) - - updated = [] - - for ent in os.listdir(path): - if uuidutils.is_uuid_like(ent): - image_uuid = ent - LOG.info(_LI('Considering: %s'), image_uuid) - - meta_file_name = os.path.join(path, image_uuid) - with open(meta_file_name) as meta_file: - meta = jsonutils.loads(meta_file.read()) - - # Remove keys which don't make sense for replication - for key in options.dontreplicate.split(' '): - if key in meta: - LOG.debug('Stripping %(header)s from saved ' - 'metadata', {'header': key}) - del meta[key] - - if _image_present(client, image_uuid): - # NOTE(mikal): Perhaps we just need to update the metadata? - # Note that we don't attempt to change an image file once it - # has been uploaded. - LOG.debug('Image %s already present', image_uuid) - headers = client.get_image_meta(image_uuid) - for key in options.dontreplicate.split(' '): - if key in headers: - LOG.debug('Stripping %(header)s from target ' - 'metadata', {'header': key}) - del headers[key] - - if _dict_diff(meta, headers): - LOG.info(_LI('Image %s metadata has changed'), image_uuid) - headers, body = client.add_image_meta(meta) - _check_upload_response_headers(headers, body) - updated.append(meta['id']) - - else: - if not os.path.exists(os.path.join(path, image_uuid + '.img')): - LOG.debug('%s dump is missing image data, skipping', - image_uuid) - continue - - # Upload the image itself - with open(os.path.join(path, image_uuid + '.img')) as img_file: - try: - headers, body = client.add_image(meta, img_file) - _check_upload_response_headers(headers, body) - updated.append(meta['id']) - except exc.HTTPConflict: - LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE) - % image_uuid) # noqa - - return updated - - -def replication_livecopy(options, args): - """%(prog)s livecopy - - Load the contents of one glance instance into another. - - fromserver:port: the location of the source glance instance. - toserver:port: the location of the target glance instance. - """ - - # Make sure from-server and to-server are provided - if len(args) < 2: - raise TypeError(_("Too few arguments.")) - - imageservice = get_image_service() - - target_server, target_port = utils.parse_valid_host_port(args.pop()) - target_conn = http.HTTPConnection(target_server, target_port) - target_client = imageservice(target_conn, options.targettoken) - - source_server, source_port = utils.parse_valid_host_port(args.pop()) - source_conn = http.HTTPConnection(source_server, source_port) - source_client = imageservice(source_conn, options.sourcetoken) - - updated = [] - - for image in source_client.get_images(): - LOG.debug('Considering %(id)s', {'id': image['id']}) - for key in options.dontreplicate.split(' '): - if key in image: - LOG.debug('Stripping %(header)s from source metadata', - {'header': key}) - del image[key] - - if _image_present(target_client, image['id']): - # NOTE(mikal): Perhaps we just need to update the metadata? - # Note that we don't attempt to change an image file once it - # has been uploaded. - headers = target_client.get_image_meta(image['id']) - if headers['status'] == 'active': - for key in options.dontreplicate.split(' '): - if key in image: - LOG.debug('Stripping %(header)s from source ' - 'metadata', {'header': key}) - del image[key] - if key in headers: - LOG.debug('Stripping %(header)s from target ' - 'metadata', {'header': key}) - del headers[key] - - if _dict_diff(image, headers): - LOG.info(_LI('Image %(image_id)s (%(image_name)s) ' - 'metadata has changed'), - {'image_id': image['id'], - 'image_name': image.get('name', '--unnamed--')}) - headers, body = target_client.add_image_meta(image) - _check_upload_response_headers(headers, body) - updated.append(image['id']) - - elif image['status'] == 'active': - LOG.info(_LI('Image %(image_id)s (%(image_name)s) ' - '(%(image_size)d bytes) ' - 'is being synced'), - {'image_id': image['id'], - 'image_name': image.get('name', '--unnamed--'), - 'image_size': image['size']}) - if not options.metaonly: - image_response = source_client.get_image(image['id']) - try: - headers, body = target_client.add_image(image, - image_response) - _check_upload_response_headers(headers, body) - updated.append(image['id']) - except exc.HTTPConflict: - LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image['id']) # noqa - - return updated - - -def replication_compare(options, args): - """%(prog)s compare - - Compare the contents of fromserver with those of toserver. - - fromserver:port: the location of the source glance instance. - toserver:port: the location of the target glance instance. - """ - - # Make sure from-server and to-server are provided - if len(args) < 2: - raise TypeError(_("Too few arguments.")) - - imageservice = get_image_service() - - target_server, target_port = utils.parse_valid_host_port(args.pop()) - target_conn = http.HTTPConnection(target_server, target_port) - target_client = imageservice(target_conn, options.targettoken) - - source_server, source_port = utils.parse_valid_host_port(args.pop()) - source_conn = http.HTTPConnection(source_server, source_port) - source_client = imageservice(source_conn, options.sourcetoken) - - differences = {} - - for image in source_client.get_images(): - if _image_present(target_client, image['id']): - headers = target_client.get_image_meta(image['id']) - for key in options.dontreplicate.split(' '): - if key in image: - LOG.debug('Stripping %(header)s from source metadata', - {'header': key}) - del image[key] - if key in headers: - LOG.debug('Stripping %(header)s from target metadata', - {'header': key}) - del headers[key] - - for key in image: - if image[key] != headers.get(key): - LOG.warn(_LW('%(image_id)s: field %(key)s differs ' - '(source is %(source_value)s, destination ' - 'is %(target_value)s)') - % {'image_id': image['id'], - 'key': key, - 'source_value': image[key], - 'target_value': headers.get(key, - 'undefined')}) - differences[image['id']] = 'diff' - else: - LOG.debug('%(image_id)s is identical', - {'image_id': image['id']}) - - elif image['status'] == 'active': - LOG.warn(_LW('Image %(image_id)s ("%(image_name)s") ' - 'entirely missing from the destination') - % {'image_id': image['id'], - 'image_name': image.get('name', '--unnamed')}) - differences[image['id']] = 'missing' - - return differences - - -def _check_upload_response_headers(headers, body): - """Check that the headers of an upload are reasonable. - - headers: the headers from the upload - body: the body from the upload - """ - - if 'status' not in headers: - try: - d = jsonutils.loads(body) - if 'image' in d and 'status' in d['image']: - return - - except Exception: - raise exception.UploadException(body) - - -def _image_present(client, image_uuid): - """Check if an image is present in glance. - - client: the ImageService - image_uuid: the image uuid to check - - Returns: True if the image is present - """ - headers = client.get_image_meta(image_uuid) - return 'status' in headers - - -def print_help(options, args): - """Print help specific to a command. - - options: the parsed command line options - args: the command line - """ - if not args: - print(COMMANDS) - else: - command_name = args.pop() - command = lookup_command(command_name) - print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])}) - - -def lookup_command(command_name): - """Lookup a command. - - command_name: the command name - - Returns: a method which implements that command - """ - BASE_COMMANDS = {'help': print_help} - - REPLICATION_COMMANDS = {'compare': replication_compare, - 'dump': replication_dump, - 'livecopy': replication_livecopy, - 'load': replication_load, - 'size': replication_size} - - commands = {} - for command_set in (BASE_COMMANDS, REPLICATION_COMMANDS): - commands.update(command_set) - - try: - command = commands[command_name] - except KeyError: - if command_name: - sys.exit(_("Unknown command: %s") % command_name) - else: - command = commands['help'] - return command - - -def main(): - """The main function.""" - - try: - config.parse_args() - except RuntimeError as e: - sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) - except SystemExit as e: - sys.exit("Please specify one command") - - # Setup logging - logging.setup(CONF, 'glance') - - if CONF.token: - CONF.sourcetoken = CONF.token - CONF.targettoken = CONF.token - - command = lookup_command(CONF.command) - - try: - command(CONF, CONF.args) - except TypeError as e: - LOG.error(_LE(command.__doc__) % {'prog': command.__name__}) # noqa - sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) - except ValueError as e: - LOG.error(_LE(command.__doc__) % {'prog': command.__name__}) # noqa - sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) - - -if __name__ == '__main__': - main() diff --git a/glance/cmd/scrubber.py b/glance/cmd/scrubber.py deleted file mode 100644 index 8a39c32e..00000000 --- a/glance/cmd/scrubber.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2011-2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Glance Scrub Service -""" - -import os -import sys - -# If ../glance/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): - sys.path.insert(0, possible_topdir) -import eventlet - -import glance_store -from oslo_config import cfg -from oslo_log import log as logging - -from glance.common import config -from glance import scrubber - -eventlet.patcher.monkey_patch(all=False, socket=True, time=True, select=True, - thread=True, os=True) - -CONF = cfg.CONF -logging.register_options(CONF) -CONF.set_default(name='use_stderr', default=True) - - -def main(): - CONF.register_cli_opts(scrubber.scrubber_cmd_cli_opts) - CONF.register_opts(scrubber.scrubber_cmd_opts) - - try: - config.parse_args() - logging.setup(CONF, 'glance') - - glance_store.register_opts(config.CONF) - glance_store.create_stores(config.CONF) - glance_store.verify_default_store() - - app = scrubber.Scrubber(glance_store) - - if CONF.daemon: - server = scrubber.Daemon(CONF.wakeup_time) - server.start(app) - server.wait() - else: - app.run() - except RuntimeError as e: - sys.exit("ERROR: %s" % e) - - -if __name__ == '__main__': - main() diff --git a/glance/common/__init__.py b/glance/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/common/auth.py b/glance/common/auth.py deleted file mode 100644 index 3a5e94a0..00000000 --- a/glance/common/auth.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -This auth module is intended to allow OpenStack client-tools to select from a -variety of authentication strategies, including NoAuth (the default), and -Keystone (an identity management system). - - > auth_plugin = AuthPlugin(creds) - - > auth_plugin.authenticate() - - > auth_plugin.auth_token - abcdefg - - > auth_plugin.management_url - http://service_endpoint/ -""" -import httplib2 -from keystoneclient import service_catalog as ks_service_catalog -from oslo_serialization import jsonutils -from six.moves import http_client as http -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range -import six.moves.urllib.parse as urlparse - -from glance.common import exception -from glance.i18n import _ - - -class BaseStrategy(object): - def __init__(self): - self.auth_token = None - # TODO(sirp): Should expose selecting public/internal/admin URL. - self.management_url = None - - def authenticate(self): - raise NotImplementedError - - @property - def is_authenticated(self): - raise NotImplementedError - - @property - def strategy(self): - raise NotImplementedError - - -class NoAuthStrategy(BaseStrategy): - def authenticate(self): - pass - - @property - def is_authenticated(self): - return True - - @property - def strategy(self): - return 'noauth' - - -class KeystoneStrategy(BaseStrategy): - MAX_REDIRECTS = 10 - - def __init__(self, creds, insecure=False, configure_via_auth=True): - self.creds = creds - self.insecure = insecure - self.configure_via_auth = configure_via_auth - super(KeystoneStrategy, self).__init__() - - def check_auth_params(self): - # Ensure that supplied credential parameters are as required - for required in ('username', 'password', 'auth_url', - 'strategy'): - if self.creds.get(required) is None: - raise exception.MissingCredentialError(required=required) - if self.creds['strategy'] != 'keystone': - raise exception.BadAuthStrategy(expected='keystone', - received=self.creds['strategy']) - # For v2.0 also check tenant is present - if self.creds['auth_url'].rstrip('/').endswith('v2.0'): - if self.creds.get("tenant") is None: - raise exception.MissingCredentialError(required='tenant') - - def authenticate(self): - """Authenticate with the Keystone service. - - There are a few scenarios to consider here: - - 1. Which version of Keystone are we using? v1 which uses headers to - pass the credentials, or v2 which uses a JSON encoded request body? - - 2. Keystone may respond back with a redirection using a 305 status - code. - - 3. We may attempt a v1 auth when v2 is what's called for. In this - case, we rewrite the url to contain /v2.0/ and retry using the v2 - protocol. - """ - def _authenticate(auth_url): - # If OS_AUTH_URL is missing a trailing slash add one - if not auth_url.endswith('/'): - auth_url += '/' - token_url = urlparse.urljoin(auth_url, "tokens") - # 1. Check Keystone version - is_v2 = auth_url.rstrip('/').endswith('v2.0') - if is_v2: - self._v2_auth(token_url) - else: - self._v1_auth(token_url) - - self.check_auth_params() - auth_url = self.creds['auth_url'] - for redirect_iter in range(self.MAX_REDIRECTS): - try: - _authenticate(auth_url) - except exception.AuthorizationRedirect as e: - # 2. Keystone may redirect us - auth_url = e.url - except exception.AuthorizationFailure: - # 3. In some configurations nova makes redirection to - # v2.0 keystone endpoint. Also, new location does not - # contain real endpoint, only hostname and port. - if 'v2.0' not in auth_url: - auth_url = urlparse.urljoin(auth_url, 'v2.0/') - else: - # If we successfully auth'd, then memorize the correct auth_url - # for future use. - self.creds['auth_url'] = auth_url - break - else: - # Guard against a redirection loop - raise exception.MaxRedirectsExceeded(redirects=self.MAX_REDIRECTS) - - def _v1_auth(self, token_url): - creds = self.creds - - headers = { - 'X-Auth-User': creds['username'], - 'X-Auth-Key': creds['password'] - } - - tenant = creds.get('tenant') - if tenant: - headers['X-Auth-Tenant'] = tenant - - resp, resp_body = self._do_request(token_url, 'GET', headers=headers) - - def _management_url(self, resp): - for url_header in ('x-image-management-url', - 'x-server-management-url', - 'x-glance'): - try: - return resp[url_header] - except KeyError as e: - not_found = e - raise not_found - - if resp.status in (http.OK, http.NO_CONTENT): - try: - if self.configure_via_auth: - self.management_url = _management_url(self, resp) - self.auth_token = resp['x-auth-token'] - except KeyError: - raise exception.AuthorizationFailure() - elif resp.status == http.USE_PROXY: - raise exception.AuthorizationRedirect(uri=resp['location']) - elif resp.status == http.BAD_REQUEST: - raise exception.AuthBadRequest(url=token_url) - elif resp.status == http.UNAUTHORIZED: - raise exception.NotAuthenticated() - elif resp.status == http.NOT_FOUND: - raise exception.AuthUrlNotFound(url=token_url) - else: - raise Exception(_('Unexpected response: %s') % resp.status) - - def _v2_auth(self, token_url): - - creds = self.creds - - creds = { - "auth": { - "tenantName": creds['tenant'], - "passwordCredentials": { - "username": creds['username'], - "password": creds['password'] - } - } - } - - headers = {'Content-Type': 'application/json'} - req_body = jsonutils.dumps(creds) - - resp, resp_body = self._do_request( - token_url, 'POST', headers=headers, body=req_body) - - if resp.status == http.OK: - resp_auth = jsonutils.loads(resp_body)['access'] - creds_region = self.creds.get('region') - if self.configure_via_auth: - endpoint = get_endpoint(resp_auth['serviceCatalog'], - endpoint_region=creds_region) - self.management_url = endpoint - self.auth_token = resp_auth['token']['id'] - elif resp.status == http.USE_PROXY: - raise exception.RedirectException(resp['location']) - elif resp.status == http.BAD_REQUEST: - raise exception.AuthBadRequest(url=token_url) - elif resp.status == http.UNAUTHORIZED: - raise exception.NotAuthenticated() - elif resp.status == http.NOT_FOUND: - raise exception.AuthUrlNotFound(url=token_url) - else: - raise Exception(_('Unexpected response: %s') % resp.status) - - @property - def is_authenticated(self): - return self.auth_token is not None - - @property - def strategy(self): - return 'keystone' - - def _do_request(self, url, method, headers=None, body=None): - headers = headers or {} - conn = httplib2.Http() - conn.force_exception_to_status_code = True - conn.disable_ssl_certificate_validation = self.insecure - headers['User-Agent'] = 'glance-client' - resp, resp_body = conn.request(url, method, headers=headers, body=body) - return resp, resp_body - - -def get_plugin_from_strategy(strategy, creds=None, insecure=False, - configure_via_auth=True): - if strategy == 'noauth': - return NoAuthStrategy() - elif strategy == 'keystone': - return KeystoneStrategy(creds, insecure, - configure_via_auth=configure_via_auth) - else: - raise Exception(_("Unknown auth strategy '%s'") % strategy) - - -def get_endpoint(service_catalog, service_type='image', endpoint_region=None, - endpoint_type='publicURL'): - """ - Select an endpoint from the service catalog - - We search the full service catalog for services - matching both type and region. If the client - supplied no region then any 'image' endpoint - is considered a match. There must be one -- and - only one -- successful match in the catalog, - otherwise we will raise an exception. - """ - endpoints = ks_service_catalog.ServiceCatalogV2( - {'serviceCatalog': service_catalog} - ).get_urls(service_type=service_type, - region_name=endpoint_region, - endpoint_type=endpoint_type) - if endpoints is None: - raise exception.NoServiceEndpoint() - elif len(endpoints) == 1: - return endpoints[0] - else: - raise exception.RegionAmbiguity(region=endpoint_region) diff --git a/glance/common/client.py b/glance/common/client.py deleted file mode 100644 index 53a3691d..00000000 --- a/glance/common/client.py +++ /dev/null @@ -1,603 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# HTTPSClientAuthConnection code comes courtesy of ActiveState website: -# http://code.activestate.com/recipes/ -# 577548-https-httplib-client-connection-with-certificate-v/ - -import collections -import copy -import errno -import functools -import os -import re - -try: - from eventlet.green import socket - from eventlet.green import ssl -except ImportError: - import socket - import ssl - -import osprofiler.web - -try: - import sendfile # noqa - SENDFILE_SUPPORTED = True -except ImportError: - SENDFILE_SUPPORTED = False - -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import netutils -import six -from six.moves import http_client -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range -import six.moves.urllib.parse as urlparse - -from glance.common import auth -from glance.common import exception -from glance.common import utils -from glance.i18n import _ - -LOG = logging.getLogger(__name__) - -# common chunk size for get and put -CHUNKSIZE = 65536 - -VERSION_REGEX = re.compile(r"/?v[0-9\.]+") - - -def handle_unauthenticated(func): - """ - Wrap a function to re-authenticate and retry. - """ - @functools.wraps(func) - def wrapped(self, *args, **kwargs): - try: - return func(self, *args, **kwargs) - except exception.NotAuthenticated: - self._authenticate(force_reauth=True) - return func(self, *args, **kwargs) - return wrapped - - -def handle_redirects(func): - """ - Wrap the _do_request function to handle HTTP redirects. - """ - MAX_REDIRECTS = 5 - - @functools.wraps(func) - def wrapped(self, method, url, body, headers): - for i in range(MAX_REDIRECTS): - try: - return func(self, method, url, body, headers) - except exception.RedirectException as redirect: - if redirect.url is None: - raise exception.InvalidRedirect() - url = redirect.url - raise exception.MaxRedirectsExceeded(redirects=MAX_REDIRECTS) - return wrapped - - -class HTTPSClientAuthConnection(http_client.HTTPSConnection): - """ - Class to make a HTTPS connection, with support for - full client-based SSL Authentication - - :see http://code.activestate.com/recipes/ - 577548-https-httplib-client-connection-with-certificate-v/ - """ - - def __init__(self, host, port, key_file, cert_file, - ca_file, timeout=None, insecure=False): - http_client.HTTPSConnection.__init__(self, host, port, - key_file=key_file, - cert_file=cert_file) - self.key_file = key_file - self.cert_file = cert_file - self.ca_file = ca_file - self.timeout = timeout - self.insecure = insecure - - def connect(self): - """ - Connect to a host on a given (SSL) port. - If ca_file is pointing somewhere, use it to check Server Certificate. - - Redefined/copied and extended from httplib.py:1105 (Python 2.6.x). - This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to - ssl.wrap_socket(), which forces SSL to check server certificate against - our client certificate. - """ - sock = socket.create_connection((self.host, self.port), self.timeout) - if self._tunnel_host: - self.sock = sock - self._tunnel() - # Check CA file unless 'insecure' is specified - if self.insecure is True: - self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, - cert_reqs=ssl.CERT_NONE) - else: - self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, - ca_certs=self.ca_file, - cert_reqs=ssl.CERT_REQUIRED) - - -class BaseClient(object): - - """A base client class""" - - DEFAULT_PORT = 80 - DEFAULT_DOC_ROOT = None - # Standard CA file locations for Debian/Ubuntu, RedHat/Fedora, - # Suse, FreeBSD/OpenBSD - DEFAULT_CA_FILE_PATH = ('/etc/ssl/certs/ca-certificates.crt:' - '/etc/pki/tls/certs/ca-bundle.crt:' - '/etc/ssl/ca-bundle.pem:' - '/etc/ssl/cert.pem') - - OK_RESPONSE_CODES = ( - http_client.OK, - http_client.CREATED, - http_client.ACCEPTED, - http_client.NO_CONTENT, - ) - - REDIRECT_RESPONSE_CODES = ( - http_client.MOVED_PERMANENTLY, - http_client.FOUND, - http_client.SEE_OTHER, - http_client.USE_PROXY, - http_client.TEMPORARY_REDIRECT, - ) - - def __init__(self, host, port=None, timeout=None, use_ssl=False, - auth_token=None, creds=None, doc_root=None, key_file=None, - cert_file=None, ca_file=None, insecure=False, - configure_via_auth=True): - """ - Creates a new client to some service. - - :param host: The host where service resides - :param port: The port where service resides - :param timeout: Connection timeout. - :param use_ssl: Should we use HTTPS? - :param auth_token: The auth token to pass to the server - :param creds: The credentials to pass to the auth plugin - :param doc_root: Prefix for all URLs we request from host - :param key_file: Optional PEM-formatted file that contains the private - key. - If use_ssl is True, and this param is None (the - default), then an environ variable - GLANCE_CLIENT_KEY_FILE is looked for. If no such - environ variable is found, ClientConnectionError - will be raised. - :param cert_file: Optional PEM-formatted certificate chain file. - If use_ssl is True, and this param is None (the - default), then an environ variable - GLANCE_CLIENT_CERT_FILE is looked for. If no such - environ variable is found, ClientConnectionError - will be raised. - :param ca_file: Optional CA cert file to use in SSL connections - If use_ssl is True, and this param is None (the - default), then an environ variable - GLANCE_CLIENT_CA_FILE is looked for. - :param insecure: Optional. If set then the server's certificate - will not be verified. - :param configure_via_auth: Optional. Defaults to True. If set, the - URL returned from the service catalog for the image - endpoint will **override** the URL supplied to in - the host parameter. - """ - self.host = host - self.port = port or self.DEFAULT_PORT - self.timeout = timeout - # A value of '0' implies never timeout - if timeout == 0: - self.timeout = None - self.use_ssl = use_ssl - self.auth_token = auth_token - self.creds = creds or {} - self.connection = None - self.configure_via_auth = configure_via_auth - # doc_root can be a nullstring, which is valid, and why we - # cannot simply do doc_root or self.DEFAULT_DOC_ROOT below. - self.doc_root = (doc_root if doc_root is not None - else self.DEFAULT_DOC_ROOT) - - self.key_file = key_file - self.cert_file = cert_file - self.ca_file = ca_file - self.insecure = insecure - self.auth_plugin = self.make_auth_plugin(self.creds, self.insecure) - self.connect_kwargs = self.get_connect_kwargs() - - def get_connect_kwargs(self): - # Both secure and insecure connections have a timeout option - connect_kwargs = {'timeout': self.timeout} - - if self.use_ssl: - if self.key_file is None: - self.key_file = os.environ.get('GLANCE_CLIENT_KEY_FILE') - if self.cert_file is None: - self.cert_file = os.environ.get('GLANCE_CLIENT_CERT_FILE') - if self.ca_file is None: - self.ca_file = os.environ.get('GLANCE_CLIENT_CA_FILE') - - # Check that key_file/cert_file are either both set or both unset - if self.cert_file is not None and self.key_file is None: - msg = _("You have selected to use SSL in connecting, " - "and you have supplied a cert, " - "however you have failed to supply either a " - "key_file parameter or set the " - "GLANCE_CLIENT_KEY_FILE environ variable") - raise exception.ClientConnectionError(msg) - - if self.key_file is not None and self.cert_file is None: - msg = _("You have selected to use SSL in connecting, " - "and you have supplied a key, " - "however you have failed to supply either a " - "cert_file parameter or set the " - "GLANCE_CLIENT_CERT_FILE environ variable") - raise exception.ClientConnectionError(msg) - - if (self.key_file is not None and - not os.path.exists(self.key_file)): - msg = _("The key file you specified %s does not " - "exist") % self.key_file - raise exception.ClientConnectionError(msg) - connect_kwargs['key_file'] = self.key_file - - if (self.cert_file is not None and - not os.path.exists(self.cert_file)): - msg = _("The cert file you specified %s does not " - "exist") % self.cert_file - raise exception.ClientConnectionError(msg) - connect_kwargs['cert_file'] = self.cert_file - - if (self.ca_file is not None and - not os.path.exists(self.ca_file)): - msg = _("The CA file you specified %s does not " - "exist") % self.ca_file - raise exception.ClientConnectionError(msg) - - if self.ca_file is None: - for ca in self.DEFAULT_CA_FILE_PATH.split(":"): - if os.path.exists(ca): - self.ca_file = ca - break - - connect_kwargs['ca_file'] = self.ca_file - connect_kwargs['insecure'] = self.insecure - - return connect_kwargs - - def configure_from_url(self, url): - """ - Setups the connection based on the given url. - - The form is: - - ://:port/doc_root - """ - LOG.debug("Configuring from URL: %s", url) - parsed = urlparse.urlparse(url) - self.use_ssl = parsed.scheme == 'https' - self.host = parsed.hostname - self.port = parsed.port or 80 - self.doc_root = parsed.path.rstrip('/') - - # We need to ensure a version identifier is appended to the doc_root - if not VERSION_REGEX.match(self.doc_root): - if self.DEFAULT_DOC_ROOT: - doc_root = self.DEFAULT_DOC_ROOT.lstrip('/') - self.doc_root += '/' + doc_root - LOG.debug("Appending doc_root %(doc_root)s to URL %(url)s", - {'doc_root': doc_root, 'url': url}) - - # ensure connection kwargs are re-evaluated after the service catalog - # publicURL is parsed for potential SSL usage - self.connect_kwargs = self.get_connect_kwargs() - - def make_auth_plugin(self, creds, insecure): - """ - Returns an instantiated authentication plugin. - """ - strategy = creds.get('strategy', 'noauth') - plugin = auth.get_plugin_from_strategy(strategy, creds, insecure, - self.configure_via_auth) - return plugin - - def get_connection_type(self): - """ - Returns the proper connection type - """ - if self.use_ssl: - return HTTPSClientAuthConnection - else: - return http_client.HTTPConnection - - def _authenticate(self, force_reauth=False): - """ - Use the authentication plugin to authenticate and set the auth token. - - :param force_reauth: For re-authentication to bypass cache. - """ - auth_plugin = self.auth_plugin - - if not auth_plugin.is_authenticated or force_reauth: - auth_plugin.authenticate() - - self.auth_token = auth_plugin.auth_token - - management_url = auth_plugin.management_url - if management_url and self.configure_via_auth: - self.configure_from_url(management_url) - - @handle_unauthenticated - def do_request(self, method, action, body=None, headers=None, - params=None): - """ - Make a request, returning an HTTP response object. - - :param method: HTTP verb (GET, POST, PUT, etc.) - :param action: Requested path to append to self.doc_root - :param body: Data to send in the body of the request - :param headers: Headers to send with the request - :param params: Key/value pairs to use in query string - :returns: HTTP response object - """ - if not self.auth_token: - self._authenticate() - - url = self._construct_url(action, params) - # NOTE(ameade): We need to copy these kwargs since they can be altered - # in _do_request but we need the originals if handle_unauthenticated - # calls this function again. - return self._do_request(method=method, url=url, - body=copy.deepcopy(body), - headers=copy.deepcopy(headers)) - - def _construct_url(self, action, params=None): - """ - Create a URL object we can use to pass to _do_request(). - """ - action = urlparse.quote(action) - path = '/'.join([self.doc_root or '', action.lstrip('/')]) - scheme = "https" if self.use_ssl else "http" - if netutils.is_valid_ipv6(self.host): - netloc = "[%s]:%d" % (self.host, self.port) - else: - netloc = "%s:%d" % (self.host, self.port) - - if isinstance(params, dict): - for (key, value) in list(params.items()): - if value is None: - del params[key] - continue - if not isinstance(value, six.string_types): - value = str(value) - params[key] = encodeutils.safe_encode(value) - query = urlparse.urlencode(params) - else: - query = None - - url = urlparse.ParseResult(scheme, netloc, path, '', query, '') - log_msg = _("Constructed URL: %s") - LOG.debug(log_msg, url.geturl()) - return url - - def _encode_headers(self, headers): - """ - Encodes headers. - - Note: This should be used right before - sending anything out. - - :param headers: Headers to encode - :returns: Dictionary with encoded headers' - names and values - """ - if six.PY3: - to_str = str - else: - to_str = encodeutils.safe_encode - return {to_str(h): to_str(v) for h, v in six.iteritems(headers)} - - @handle_redirects - def _do_request(self, method, url, body, headers): - """ - Connects to the server and issues a request. Handles converting - any returned HTTP error status codes to OpenStack/Glance exceptions - and closing the server connection. Returns the result data, or - raises an appropriate exception. - - :param method: HTTP method ("GET", "POST", "PUT", etc...) - :param url: urlparse.ParsedResult object with URL information - :param body: data to send (as string, filelike or iterable), - or None (default) - :param headers: mapping of key/value pairs to add as headers - - :note - - If the body param has a read attribute, and method is either - POST or PUT, this method will automatically conduct a chunked-transfer - encoding and use the body as a file object or iterable, transferring - chunks of data using the connection's send() method. This allows large - objects to be transferred efficiently without buffering the entire - body in memory. - """ - if url.query: - path = url.path + "?" + url.query - else: - path = url.path - - try: - connection_type = self.get_connection_type() - headers = self._encode_headers(headers or {}) - headers.update(osprofiler.web.get_trace_id_headers()) - - if 'x-auth-token' not in headers and self.auth_token: - headers['x-auth-token'] = self.auth_token - - c = connection_type(url.hostname, url.port, **self.connect_kwargs) - - def _pushing(method): - return method.lower() in ('post', 'put') - - def _simple(body): - return body is None or isinstance(body, bytes) - - def _filelike(body): - return hasattr(body, 'read') - - def _sendbody(connection, iter): - connection.endheaders() - for sent in iter: - # iterator has done the heavy lifting - pass - - def _chunkbody(connection, iter): - connection.putheader('Transfer-Encoding', 'chunked') - connection.endheaders() - for chunk in iter: - connection.send('%x\r\n%s\r\n' % (len(chunk), chunk)) - connection.send('0\r\n\r\n') - - # Do a simple request or a chunked request, depending - # on whether the body param is file-like or iterable and - # the method is PUT or POST - # - if not _pushing(method) or _simple(body): - # Simple request... - c.request(method, path, body, headers) - elif _filelike(body) or self._iterable(body): - c.putrequest(method, path) - - use_sendfile = self._sendable(body) - - # According to HTTP/1.1, Content-Length and Transfer-Encoding - # conflict. - for header, value in headers.items(): - if use_sendfile or header.lower() != 'content-length': - c.putheader(header, str(value)) - - iter = utils.chunkreadable(body) - - if use_sendfile: - # send actual file without copying into userspace - _sendbody(c, iter) - else: - # otherwise iterate and chunk - _chunkbody(c, iter) - else: - raise TypeError('Unsupported image type: %s' % body.__class__) - - res = c.getresponse() - - def _retry(res): - return res.getheader('Retry-After') - - def read_body(res): - body = res.read() - if six.PY3: - body = body.decode('utf-8') - return body - - status_code = self.get_status_code(res) - if status_code in self.OK_RESPONSE_CODES: - return res - elif status_code in self.REDIRECT_RESPONSE_CODES: - raise exception.RedirectException(res.getheader('Location')) - elif status_code == http_client.UNAUTHORIZED: - raise exception.NotAuthenticated(read_body(res)) - elif status_code == http_client.FORBIDDEN: - raise exception.Forbidden(read_body(res)) - elif status_code == http_client.NOT_FOUND: - raise exception.NotFound(read_body(res)) - elif status_code == http_client.CONFLICT: - raise exception.Duplicate(read_body(res)) - elif status_code == http_client.BAD_REQUEST: - raise exception.Invalid(read_body(res)) - elif status_code == http_client.MULTIPLE_CHOICES: - raise exception.MultipleChoices(body=read_body(res)) - elif status_code == http_client.REQUEST_ENTITY_TOO_LARGE: - raise exception.LimitExceeded(retry=_retry(res), - body=read_body(res)) - elif status_code == http_client.INTERNAL_SERVER_ERROR: - raise exception.ServerError() - elif status_code == http_client.SERVICE_UNAVAILABLE: - raise exception.ServiceUnavailable(retry=_retry(res)) - else: - raise exception.UnexpectedStatus(status=status_code, - body=read_body(res)) - - except (socket.error, IOError) as e: - raise exception.ClientConnectionError(e) - - def _seekable(self, body): - # pipes are not seekable, avoids sendfile() failure on e.g. - # cat /path/to/image | glance add ... - # or where add command is launched via popen - try: - os.lseek(body.fileno(), 0, os.SEEK_CUR) - return True - except OSError as e: - return (e.errno != errno.ESPIPE) - - def _sendable(self, body): - return (SENDFILE_SUPPORTED and - hasattr(body, 'fileno') and - self._seekable(body) and - not self.use_ssl) - - def _iterable(self, body): - return isinstance(body, collections.Iterable) - - def get_status_code(self, response): - """ - Returns the integer status code from the response, which - can be either a Webob.Response (used in testing) or httplib.Response - """ - if hasattr(response, 'status_int'): - return response.status_int - else: - return response.status - - def _extract_params(self, actual_params, allowed_params): - """ - Extract a subset of keys from a dictionary. The filters key - will also be extracted, and each of its values will be returned - as an individual param. - - :param actual_params: dict of keys to filter - :param allowed_params: list of keys that 'actual_params' will be - reduced to - :returns: subset of 'params' dict - """ - try: - # expect 'filters' param to be a dict here - result = dict(actual_params.get('filters')) - except TypeError: - result = {} - - for allowed_param in allowed_params: - if allowed_param in actual_params: - result[allowed_param] = actual_params[allowed_param] - - return result diff --git a/glance/common/config.py b/glance/common/config.py deleted file mode 100644 index 094a1011..00000000 --- a/glance/common/config.py +++ /dev/null @@ -1,847 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Routines for configuring Glance -""" - -import logging -import os - -from oslo_config import cfg -from oslo_middleware import cors -from oslo_policy import policy -from paste import deploy - -from glance.i18n import _ -from glance.version import version_info as version - -paste_deploy_opts = [ - cfg.StrOpt('flavor', - sample_default='keystone', - help=_(""" -Deployment flavor to use in the server application pipeline. - -Provide a string value representing the appropriate deployment -flavor used in the server application pipleline. This is typically -the partial name of a pipeline in the paste configuration file with -the service name removed. - -For example, if your paste section name in the paste configuration -file is [pipeline:glance-api-keystone], set ``flavor`` to -``keystone``. - -Possible values: - * String value representing a partial pipeline name. - -Related Options: - * config_file - -""")), - cfg.StrOpt('config_file', - sample_default='glance-api-paste.ini', - help=_(""" -Name of the paste configuration file. - -Provide a string value representing the name of the paste -configuration file to use for configuring piplelines for -server application deployments. - -NOTES: - * Provide the name or the path relative to the glance directory - for the paste configuration file and not the absolute path. - * The sample paste configuration file shipped with Glance need - not be edited in most cases as it comes with ready-made - pipelines for all common deployment flavors. - -If no value is specified for this option, the ``paste.ini`` file -with the prefix of the corresponding Glance service's configuration -file name will be searched for in the known configuration -directories. (For example, if this option is missing from or has no -value set in ``glance-api.conf``, the service will look for a file -named ``glance-api-paste.ini``.) If the paste configuration file is -not found, the service will not start. - -Possible values: - * A string value representing the name of the paste configuration - file. - -Related Options: - * flavor - -""")), -] -image_format_opts = [ - cfg.ListOpt('container_formats', - default=['ami', 'ari', 'aki', 'bare', 'ovf', 'ova', 'docker'], - help=_("Supported values for the 'container_format' " - "image attribute"), - deprecated_opts=[cfg.DeprecatedOpt('container_formats', - group='DEFAULT')]), - cfg.ListOpt('disk_formats', - default=['ami', 'ari', 'aki', 'vhd', 'vhdx', 'vmdk', 'raw', - 'qcow2', 'vdi', 'iso', 'ploop'], - help=_("Supported values for the 'disk_format' " - "image attribute"), - deprecated_opts=[cfg.DeprecatedOpt('disk_formats', - group='DEFAULT')]), -] -task_opts = [ - cfg.IntOpt('task_time_to_live', - default=48, - help=_("Time in hours for which a task lives after, either " - "succeeding or failing"), - deprecated_opts=[cfg.DeprecatedOpt('task_time_to_live', - group='DEFAULT')]), - cfg.StrOpt('task_executor', - default='taskflow', - help=_(""" -Task executor to be used to run task scripts. - -Provide a string value representing the executor to use for task -executions. By default, ``TaskFlow`` executor is used. - -``TaskFlow`` helps make task executions easy, consistent, scalable -and reliable. It also enables creation of lightweight task objects -and/or functions that are combined together into flows in a -declarative manner. - -Possible values: - * taskflow - -Related Options: - * None - -""")), - cfg.StrOpt('work_dir', - sample_default='/work_dir', - help=_(""" -Absolute path to the work directory to use for asynchronous -task operations. - -The directory set here will be used to operate over images - -normally before they are imported in the destination store. - -NOTE: When providing a value for ``work_dir``, please make sure -that enough space is provided for concurrent tasks to run -efficiently without running out of space. - -A rough estimation can be done by multiplying the number of -``max_workers`` with an average image size (e.g 500MB). The image -size estimation should be done based on the average size in your -deployment. Note that depending on the tasks running you may need -to multiply this number by some factor depending on what the task -does. For example, you may want to double the available size if -image conversion is enabled. All this being said, remember these -are just estimations and you should do them based on the worst -case scenario and be prepared to act in case they were wrong. - -Possible values: - * String value representing the absolute path to the working - directory - -Related Options: - * None - -""")), -] - -_DEPRECATE_GLANCE_V1_MSG = _('The Images (Glance) version 1 API has been ' - 'DEPRECATED in the Newton release and will be ' - 'removed on or after Pike release, following ' - 'the standard OpenStack deprecation policy. ' - 'Hence, the configuration options specific to ' - 'the Images (Glance) v1 API are hereby ' - 'deprecated and subject to removal. Operators ' - 'are advised to deploy the Images (Glance) v2 ' - 'API.') - -common_opts = [ - cfg.BoolOpt('allow_additional_image_properties', default=True, - help=_(""" -Allow users to add additional/custom properties to images. - -Glance defines a standard set of properties (in its schema) that -appear on every image. These properties are also known as -``base properties``. In addition to these properties, Glance -allows users to add custom properties to images. These are known -as ``additional properties``. - -By default, this configuration option is set to ``True`` and users -are allowed to add additional properties. The number of additional -properties that can be added to an image can be controlled via -``image_property_quota`` configuration option. - -Possible values: - * True - * False - -Related options: - * image_property_quota - -""")), - cfg.IntOpt('image_member_quota', default=128, - help=_(""" -Maximum number of image members per image. - -This limits the maximum of users an image can be shared with. Any negative -value is interpreted as unlimited. - -Related options: - * None - -""")), - cfg.IntOpt('image_property_quota', default=128, - help=_(""" -Maximum number of properties allowed on an image. - -This enforces an upper limit on the number of additional properties an image -can have. Any negative value is interpreted as unlimited. - -NOTE: This won't have any impact if additional properties are disabled. Please -refer to ``allow_additional_image_properties``. - -Related options: - * ``allow_additional_image_properties`` - -""")), - cfg.IntOpt('image_tag_quota', default=128, - help=_(""" -Maximum number of tags allowed on an image. - -Any negative value is interpreted as unlimited. - -Related options: - * None - -""")), - cfg.IntOpt('image_location_quota', default=10, - help=_(""" -Maximum number of locations allowed on an image. - -Any negative value is interpreted as unlimited. - -Related options: - * None - -""")), - # TODO(abashmak): Add choices parameter to this option: - # choices('glance.db.sqlalchemy.api', - # 'glance.db.registry.api', - # 'glance.db.simple.api') - # This will require a fix to the functional tests which - # set this option to a test version of the registry api module: - # (glance.tests.functional.v2.registry_data_api), in order to - # bypass keystone authentication for the Registry service. - # All such tests are contained in: - # glance/tests/functional/v2/test_images.py - cfg.StrOpt('data_api', - default='glance.db.sqlalchemy.api', - help=_(""" -Python module path of data access API. - -Specifies the path to the API to use for accessing the data model. -This option determines how the image catalog data will be accessed. - -Possible values: - * glance.db.sqlalchemy.api - * glance.db.registry.api - * glance.db.simple.api - -If this option is set to ``glance.db.sqlalchemy.api`` then the image -catalog data is stored in and read from the database via the -SQLAlchemy Core and ORM APIs. - -Setting this option to ``glance.db.registry.api`` will force all -database access requests to be routed through the Registry service. -This avoids data access from the Glance API nodes for an added layer -of security, scalability and manageability. - -NOTE: In v2 OpenStack Images API, the registry service is optional. -In order to use the Registry API in v2, the option -``enable_v2_registry`` must be set to ``True``. - -Finally, when this configuration option is set to -``glance.db.simple.api``, image catalog data is stored in and read -from an in-memory data structure. This is primarily used for testing. - -Related options: - * enable_v2_api - * enable_v2_registry - -""")), - cfg.IntOpt('limit_param_default', default=25, min=1, - help=_(""" -The default number of results to return for a request. - -Responses to certain API requests, like list images, may return -multiple items. The number of results returned can be explicitly -controlled by specifying the ``limit`` parameter in the API request. -However, if a ``limit`` parameter is not specified, this -configuration value will be used as the default number of results to -be returned for any API request. - -NOTES: - * The value of this configuration option may not be greater than - the value specified by ``api_limit_max``. - * Setting this to a very large value may slow down database - queries and increase response times. Setting this to a - very low value may result in poor user experience. - -Possible values: - * Any positive integer - -Related options: - * api_limit_max - -""")), - cfg.IntOpt('api_limit_max', default=1000, min=1, - help=_(""" -Maximum number of results that could be returned by a request. - -As described in the help text of ``limit_param_default``, some -requests may return multiple results. The number of results to be -returned are governed either by the ``limit`` parameter in the -request or the ``limit_param_default`` configuration option. -The value in either case, can't be greater than the absolute maximum -defined by this configuration option. Anything greater than this -value is trimmed down to the maximum value defined here. - -NOTE: Setting this to a very large value may slow down database - queries and increase response times. Setting this to a - very low value may result in poor user experience. - -Possible values: - * Any positive integer - -Related options: - * limit_param_default - -""")), - cfg.BoolOpt('show_image_direct_url', default=False, - help=_(""" -Show direct image location when returning an image. - -This configuration option indicates whether to show the direct image -location when returning image details to the user. The direct image -location is where the image data is stored in backend storage. This -image location is shown under the image property ``direct_url``. - -When multiple image locations exist for an image, the best location -is displayed based on the location strategy indicated by the -configuration option ``location_strategy``. - -NOTES: - * Revealing image locations can present a GRAVE SECURITY RISK as - image locations can sometimes include credentials. Hence, this - is set to ``False`` by default. Set this to ``True`` with - EXTREME CAUTION and ONLY IF you know what you are doing! - * If an operator wishes to avoid showing any image location(s) - to the user, then both this option and - ``show_multiple_locations`` MUST be set to ``False``. - -Possible values: - * True - * False - -Related options: - * show_multiple_locations - * location_strategy - -""")), - # NOTE(flaper87): The policy.json file should be updated and the locaiton - # related rules set to admin only once this option is finally removed. - cfg.BoolOpt('show_multiple_locations', default=False, - deprecated_for_removal=True, - deprecated_reason=_('This option will be removed in the Pike ' - 'release or later because the same ' - 'functionality can be achieved with ' - 'greater granularity by using policies. ' - 'Please see the Newton ' - 'release notes for more information.'), - deprecated_since='Newton', - help=_(""" -Show all image locations when returning an image. - -This configuration option indicates whether to show all the image -locations when returning image details to the user. When multiple -image locations exist for an image, the locations are ordered based -on the location strategy indicated by the configuration opt -``location_strategy``. The image locations are shown under the -image property ``locations``. - -NOTES: - * Revealing image locations can present a GRAVE SECURITY RISK as - image locations can sometimes include credentials. Hence, this - is set to ``False`` by default. Set this to ``True`` with - EXTREME CAUTION and ONLY IF you know what you are doing! - * If an operator wishes to avoid showing any image location(s) - to the user, then both this option and - ``show_image_direct_url`` MUST be set to ``False``. - -Possible values: - * True - * False - -Related options: - * show_image_direct_url - * location_strategy - -""")), - cfg.IntOpt('image_size_cap', default=1099511627776, min=1, - max=9223372036854775808, - help=_(""" -Maximum size of image a user can upload in bytes. - -An image upload greater than the size mentioned here would result -in an image creation failure. This configuration option defaults to -1099511627776 bytes (1 TiB). - -NOTES: - * This value should only be increased after careful - consideration and must be set less than or equal to - 8 EiB (9223372036854775808). - * This value must be set with careful consideration of the - backend storage capacity. Setting this to a very low value - may result in a large number of image failures. And, setting - this to a very large value may result in faster consumption - of storage. Hence, this must be set according to the nature of - images created and storage capacity available. - -Possible values: - * Any positive number less than or equal to 9223372036854775808 - -""")), - cfg.StrOpt('user_storage_quota', default='0', - help=_(""" -Maximum amount of image storage per tenant. - -This enforces an upper limit on the cumulative storage consumed by all images -of a tenant across all stores. This is a per-tenant limit. - -The default unit for this configuration option is Bytes. However, storage -units can be specified using case-sensitive literals ``B``, ``KB``, ``MB``, -``GB`` and ``TB`` representing Bytes, KiloBytes, MegaBytes, GigaBytes and -TeraBytes respectively. Note that there should not be any space between the -value and unit. Value ``0`` signifies no quota enforcement. Negative values -are invalid and result in errors. - -Possible values: - * A string that is a valid concatenation of a non-negative integer - representing the storage value and an optional string literal - representing storage units as mentioned above. - -Related options: - * None - -""")), - # NOTE(nikhil): Even though deprecated, the configuration option - # ``enable_v1_api`` is set to True by default on purpose. Having it enabled - # helps the projects that haven't been able to fully move to v2 yet by - # keeping the devstack setup to use glance v1 as well. We need to switch it - # to False by default soon after Newton is cut so that we can identify the - # projects that haven't moved to v2 yet and start having some interesting - # conversations with them. Switching to False in Newton may result into - # destabilizing the gate and affect the release. - cfg.BoolOpt('enable_v1_api', - default=True, - deprecated_reason=_DEPRECATE_GLANCE_V1_MSG, - deprecated_since='Newton', - help=_(""" -Deploy the v1 OpenStack Images API. - -When this option is set to ``True``, Glance service will respond to -requests on registered endpoints conforming to the v1 OpenStack -Images API. - -NOTES: - * If this option is enabled, then ``enable_v1_registry`` must - also be set to ``True`` to enable mandatory usage of Registry - service with v1 API. - - * If this option is disabled, then the ``enable_v1_registry`` - option, which is enabled by default, is also recommended - to be disabled. - - * This option is separate from ``enable_v2_api``, both v1 and v2 - OpenStack Images API can be deployed independent of each - other. - - * If deploying only the v2 Images API, this option, which is - enabled by default, should be disabled. - -Possible values: - * True - * False - -Related options: - * enable_v1_registry - * enable_v2_api - -""")), - cfg.BoolOpt('enable_v2_api', - default=True, - deprecated_reason=_('The Images (Glance) version 1 API has ' - 'been DEPRECATED in the Newton release. ' - 'It will be removed on or after Pike ' - 'release, following the standard ' - 'OpenStack deprecation policy. Once we ' - 'remove the Images (Glance) v1 API, only ' - 'the Images (Glance) v2 API can be ' - 'deployed and will be enabled by default ' - 'making this option redundant.'), - deprecated_since='Newton', - help=_(""" -Deploy the v2 OpenStack Images API. - -When this option is set to ``True``, Glance service will respond -to requests on registered endpoints conforming to the v2 OpenStack -Images API. - -NOTES: - * If this option is disabled, then the ``enable_v2_registry`` - option, which is enabled by default, is also recommended - to be disabled. - - * This option is separate from ``enable_v1_api``, both v1 and v2 - OpenStack Images API can be deployed independent of each - other. - - * If deploying only the v1 Images API, this option, which is - enabled by default, should be disabled. - -Possible values: - * True - * False - -Related options: - * enable_v2_registry - * enable_v1_api - -""")), - cfg.BoolOpt('enable_v1_registry', - default=True, - deprecated_reason=_DEPRECATE_GLANCE_V1_MSG, - deprecated_since='Newton', - help=_(""" -Deploy the v1 API Registry service. - -When this option is set to ``True``, the Registry service -will be enabled in Glance for v1 API requests. - -NOTES: - * Use of Registry is mandatory in v1 API, so this option must - be set to ``True`` if the ``enable_v1_api`` option is enabled. - - * If deploying only the v2 OpenStack Images API, this option, - which is enabled by default, should be disabled. - -Possible values: - * True - * False - -Related options: - * enable_v1_api - -""")), - cfg.BoolOpt('enable_v2_registry', - default=True, - help=_(""" -Deploy the v2 API Registry service. - -When this option is set to ``True``, the Registry service -will be enabled in Glance for v2 API requests. - -NOTES: - * Use of Registry is optional in v2 API, so this option - must only be enabled if both ``enable_v2_api`` is set to - ``True`` and the ``data_api`` option is set to - ``glance.db.registry.api``. - - * If deploying only the v1 OpenStack Images API, this option, - which is enabled by default, should be disabled. - -Possible values: - * True - * False - -Related options: - * enable_v2_api - * data_api - -""")), - cfg.HostAddressOpt('pydev_worker_debug_host', - sample_default='localhost', - help=_(""" -Host address of the pydev server. - -Provide a string value representing the hostname or IP of the -pydev server to use for debugging. The pydev server listens for -debug connections on this address, facilitating remote debugging -in Glance. - -Possible values: - * Valid hostname - * Valid IP address - -Related options: - * None - -""")), - cfg.PortOpt('pydev_worker_debug_port', - default=5678, - help=_(""" -Port number that the pydev server will listen on. - -Provide a port number to bind the pydev server to. The pydev -process accepts debug connections on this port and facilitates -remote debugging in Glance. - -Possible values: - * A valid port number - -Related options: - * None - -""")), - cfg.StrOpt('metadata_encryption_key', - secret=True, - help=_(""" -AES key for encrypting store location metadata. - -Provide a string value representing the AES cipher to use for -encrypting Glance store metadata. - -NOTE: The AES key to use must be set to a random string of length -16, 24 or 32 bytes. - -Possible values: - * String value representing a valid AES key - -Related options: - * None - -""")), - cfg.StrOpt('digest_algorithm', - default='sha256', - help=_(""" -Digest algorithm to use for digital signature. - -Provide a string value representing the digest algorithm to -use for generating digital signatures. By default, ``sha256`` -is used. - -To get a list of the available algorithms supported by the version -of OpenSSL on your platform, run the command: -``openssl list-message-digest-algorithms``. -Examples are 'sha1', 'sha256', and 'sha512'. - -NOTE: ``digest_algorithm`` is not related to Glance's image signing -and verification. It is only used to sign the universally unique -identifier (UUID) as a part of the certificate file and key file -validation. - -Possible values: - * An OpenSSL message digest algorithm identifier - -Relation options: - * None - -""")), - cfg.StrOpt('node_staging_uri', - default='file:///tmp/staging/', - help=_(""" -The URL provides location where the temporary data will be stored - -This option is for Glance internal use only. Glance will save the -image data uploaded by the user to 'staging' endpoint during the -image import process. - -This option does not change the 'staging' API endpoint by any means. - -NOTE: It is discouraged to use same path as [TASKS]/work_dir - -NOTE: 'file://' is the only option -api_image_import flow will support for now. - -NOTE: The staging path must be on shared filesystem available to all -Glance API nodes. - -Possible values: - * String starting with 'file://' followed by absolute FS path - -Related options: - * [TASKS]/work_dir - * [DEFAULT]/enable_image_import (*deprecated*) - -""")), - cfg.BoolOpt('enable_image_import', - default=False, - deprecated_for_removal=True, - deprecated_reason=_(""" -This option is deprecated for removal in Rocky. - -It was introduced to make sure that the API is not enabled -before the '[DEFAULT]/node_staging_uri' is defined and is -long term redundant."""), - deprecated_since='Pike', - help=_(""" -Enables the Image Import workflow introduced in Pike - -As '[DEFAULT]/node_staging_uri' is required for the Image -Import, it's disabled per default in Pike, enabled per -default in Queens and removed in Rocky. This allows Glance to -operate with previous version configs upon upgrade. - -Setting this option to True will enable the endpoints related -to Image Import Refactoring work. - -Related options: - * [DEFUALT]/node_staging_uri""")), -] - -CONF = cfg.CONF -CONF.register_opts(paste_deploy_opts, group='paste_deploy') -CONF.register_opts(image_format_opts, group='image_format') -CONF.register_opts(task_opts, group='task') -CONF.register_opts(common_opts) -policy.Enforcer(CONF) - - -def parse_args(args=None, usage=None, default_config_files=None): - CONF(args=args, - project='glance', - version=version.cached_version_string(), - usage=usage, - default_config_files=default_config_files) - - -def parse_cache_args(args=None): - config_files = cfg.find_config_files(project='glance', prog='glance-cache') - parse_args(args=args, default_config_files=config_files) - - -def _get_deployment_flavor(flavor=None): - """ - Retrieve the paste_deploy.flavor config item, formatted appropriately - for appending to the application name. - - :param flavor: if specified, use this setting rather than the - paste_deploy.flavor configuration setting - """ - if not flavor: - flavor = CONF.paste_deploy.flavor - return '' if not flavor else ('-' + flavor) - - -def _get_paste_config_path(): - paste_suffix = '-paste.ini' - conf_suffix = '.conf' - if CONF.config_file: - # Assume paste config is in a paste.ini file corresponding - # to the last config file - path = CONF.config_file[-1].replace(conf_suffix, paste_suffix) - else: - path = CONF.prog + paste_suffix - return CONF.find_file(os.path.basename(path)) - - -def _get_deployment_config_file(): - """ - Retrieve the deployment_config_file config item, formatted as an - absolute pathname. - """ - path = CONF.paste_deploy.config_file - if not path: - path = _get_paste_config_path() - if not path: - msg = _("Unable to locate paste config file for %s.") % CONF.prog - raise RuntimeError(msg) - return os.path.abspath(path) - - -def load_paste_app(app_name, flavor=None, conf_file=None): - """ - Builds and returns a WSGI app from a paste config file. - - We assume the last config file specified in the supplied ConfigOpts - object is the paste config file, if conf_file is None. - - :param app_name: name of the application to load - :param flavor: name of the variant of the application to load - :param conf_file: path to the paste config file - - :raises RuntimeError: when config file cannot be located or application - cannot be loaded from config file - """ - # append the deployment flavor to the application name, - # in order to identify the appropriate paste pipeline - app_name += _get_deployment_flavor(flavor) - - if not conf_file: - conf_file = _get_deployment_config_file() - - try: - logger = logging.getLogger(__name__) - logger.debug("Loading %(app_name)s from %(conf_file)s", - {'conf_file': conf_file, 'app_name': app_name}) - - app = deploy.loadapp("config:%s" % conf_file, name=app_name) - - # Log the options used when starting if we're in debug mode... - if CONF.debug: - CONF.log_opt_values(logger, logging.DEBUG) - - return app - except (LookupError, ImportError) as e: - msg = (_("Unable to load %(app_name)s from " - "configuration file %(conf_file)s." - "\nGot: %(e)r") % {'app_name': app_name, - 'conf_file': conf_file, - 'e': e}) - logger.error(msg) - raise RuntimeError(msg) - - -def set_config_defaults(): - """This method updates all configuration default values.""" - set_cors_middleware_defaults() - - -def set_cors_middleware_defaults(): - """Update default configuration options for oslo.middleware.""" - cors.set_defaults( - allow_headers=['Content-MD5', - 'X-Image-Meta-Checksum', - 'X-Storage-Token', - 'Accept-Encoding', - 'X-Auth-Token', - 'X-Identity-Status', - 'X-Roles', - 'X-Service-Catalog', - 'X-User-Id', - 'X-Tenant-Id', - 'X-OpenStack-Request-ID'], - expose_headers=['X-Image-Meta-Checksum', - 'X-Auth-Token', - 'X-Subject-Token', - 'X-Service-Token', - 'X-OpenStack-Request-ID'], - allow_methods=['GET', - 'PUT', - 'POST', - 'DELETE', - 'PATCH'] - ) diff --git a/glance/common/crypt.py b/glance/common/crypt.py deleted file mode 100644 index f1ec6db4..00000000 --- a/glance/common/crypt.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Routines for URL-safe encrypting/decrypting -""" - -import base64 -import os -import random - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import algorithms -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers import modes -from oslo_utils import encodeutils -import six -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - - -def urlsafe_encrypt(key, plaintext, blocksize=16): - """ - Encrypts plaintext. Resulting ciphertext will contain URL-safe characters. - If plaintext is Unicode, encode it to UTF-8 before encryption. - - :param key: AES secret key - :param plaintext: Input text to be encrypted - :param blocksize: Non-zero integer multiple of AES blocksize in bytes (16) - - :returns: Resulting ciphertext - """ - def pad(text): - """ - Pads text to be encrypted - """ - pad_length = (blocksize - len(text) % blocksize) - # NOTE(rosmaita): I know this looks stupid, but we can't just - # use os.urandom() to get the bytes because we use char(0) as - # a delimiter - pad = b''.join(six.int2byte(random.SystemRandom().randint(1, 0xFF)) - for i in range(pad_length - 1)) - # We use chr(0) as a delimiter between text and padding - return text + b'\0' + pad - - plaintext = encodeutils.to_utf8(plaintext) - key = encodeutils.to_utf8(key) - # random initial 16 bytes for CBC - init_vector = os.urandom(16) - backend = default_backend() - cypher = Cipher(algorithms.AES(key), modes.CBC(init_vector), - backend=backend) - encryptor = cypher.encryptor() - padded = encryptor.update( - pad(six.binary_type(plaintext))) + encryptor.finalize() - encoded = base64.urlsafe_b64encode(init_vector + padded) - if six.PY3: - encoded = encoded.decode('ascii') - return encoded - - -def urlsafe_decrypt(key, ciphertext): - """ - Decrypts URL-safe base64 encoded ciphertext. - On Python 3, the result is decoded from UTF-8. - - :param key: AES secret key - :param ciphertext: The encrypted text to decrypt - - :returns: Resulting plaintext - """ - # Cast from unicode - ciphertext = encodeutils.to_utf8(ciphertext) - key = encodeutils.to_utf8(key) - ciphertext = base64.urlsafe_b64decode(ciphertext) - backend = default_backend() - cypher = Cipher(algorithms.AES(key), modes.CBC(ciphertext[:16]), - backend=backend) - decryptor = cypher.decryptor() - padded = decryptor.update(ciphertext[16:]) + decryptor.finalize() - text = padded[:padded.rfind(b'\0')] - if six.PY3: - text = text.decode('utf-8') - return text diff --git a/glance/common/exception.py b/glance/common/exception.py deleted file mode 100644 index 097957a7..00000000 --- a/glance/common/exception.py +++ /dev/null @@ -1,458 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Glance exception subclasses""" - -import six -import six.moves.urllib.parse as urlparse - -from glance.i18n import _ - -_FATAL_EXCEPTION_FORMAT_ERRORS = False - - -class RedirectException(Exception): - def __init__(self, url): - self.url = urlparse.urlparse(url) - - -class GlanceException(Exception): - """ - Base Glance Exception - - To correctly use this class, inherit from it and define - a 'message' property. That message will get printf'd - with the keyword arguments provided to the constructor. - """ - message = _("An unknown exception occurred") - - def __init__(self, message=None, *args, **kwargs): - if not message: - message = self.message - try: - if kwargs: - message = message % kwargs - except Exception: - if _FATAL_EXCEPTION_FORMAT_ERRORS: - raise - else: - # at least get the core message out if something happened - pass - self.msg = message - super(GlanceException, self).__init__(message) - - def __unicode__(self): - # NOTE(flwang): By default, self.msg is an instance of Message, which - # can't be converted by str(). Based on the definition of - # __unicode__, it should return unicode always. - return six.text_type(self.msg) - - -class MissingCredentialError(GlanceException): - message = _("Missing required credential: %(required)s") - - -class BadAuthStrategy(GlanceException): - message = _("Incorrect auth strategy, expected \"%(expected)s\" but " - "received \"%(received)s\"") - - -class NotFound(GlanceException): - message = _("An object with the specified identifier was not found.") - - -class BadStoreUri(GlanceException): - message = _("The Store URI was malformed.") - - -class Duplicate(GlanceException): - message = _("An object with the same identifier already exists.") - - -class Conflict(GlanceException): - message = _("An object with the same identifier is currently being " - "operated on.") - - -class StorageQuotaFull(GlanceException): - message = _("The size of the data %(image_size)s will exceed the limit. " - "%(remaining)s bytes remaining.") - - -class AuthBadRequest(GlanceException): - message = _("Connect error/bad request to Auth service at URL %(url)s.") - - -class AuthUrlNotFound(GlanceException): - message = _("Auth service at URL %(url)s not found.") - - -class AuthorizationFailure(GlanceException): - message = _("Authorization failed.") - - -class NotAuthenticated(GlanceException): - message = _("You are not authenticated.") - - -class UploadException(GlanceException): - message = _('Image upload problem: %s') - - -class Forbidden(GlanceException): - message = _("You are not authorized to complete %(action)s action.") - - -class ForbiddenPublicImage(Forbidden): - message = _("You are not authorized to complete this action.") - - -class ProtectedImageDelete(Forbidden): - message = _("Image %(image_id)s is protected and cannot be deleted.") - - -class ProtectedMetadefNamespaceDelete(Forbidden): - message = _("Metadata definition namespace %(namespace)s is protected" - " and cannot be deleted.") - - -class ProtectedMetadefNamespacePropDelete(Forbidden): - message = _("Metadata definition property %(property_name)s is protected" - " and cannot be deleted.") - - -class ProtectedMetadefObjectDelete(Forbidden): - message = _("Metadata definition object %(object_name)s is protected" - " and cannot be deleted.") - - -class ProtectedMetadefResourceTypeAssociationDelete(Forbidden): - message = _("Metadata definition resource-type-association" - " %(resource_type)s is protected and cannot be deleted.") - - -class ProtectedMetadefResourceTypeSystemDelete(Forbidden): - message = _("Metadata definition resource-type %(resource_type_name)s is" - " a seeded-system type and cannot be deleted.") - - -class ProtectedMetadefTagDelete(Forbidden): - message = _("Metadata definition tag %(tag_name)s is protected" - " and cannot be deleted.") - - -class Invalid(GlanceException): - message = _("Data supplied was not valid.") - - -class InvalidSortKey(Invalid): - message = _("Sort key supplied was not valid.") - - -class InvalidSortDir(Invalid): - message = _("Sort direction supplied was not valid.") - - -class InvalidPropertyProtectionConfiguration(Invalid): - message = _("Invalid configuration in property protection file.") - - -class InvalidSwiftStoreConfiguration(Invalid): - message = _("Invalid configuration in glance-swift conf file.") - - -class InvalidFilterOperatorValue(Invalid): - message = _("Unable to filter using the specified operator.") - - -class InvalidFilterRangeValue(Invalid): - message = _("Unable to filter using the specified range.") - - -class InvalidOptionValue(Invalid): - message = _("Invalid value for option %(option)s: %(value)s") - - -class ReadonlyProperty(Forbidden): - message = _("Attribute '%(property)s' is read-only.") - - -class ReservedProperty(Forbidden): - message = _("Attribute '%(property)s' is reserved.") - - -class AuthorizationRedirect(GlanceException): - message = _("Redirecting to %(uri)s for authorization.") - - -class ClientConnectionError(GlanceException): - message = _("There was an error connecting to a server") - - -class ClientConfigurationError(GlanceException): - message = _("There was an error configuring the client.") - - -class MultipleChoices(GlanceException): - message = _("The request returned a 302 Multiple Choices. This generally " - "means that you have not included a version indicator in a " - "request URI.\n\nThe body of response returned:\n%(body)s") - - -class LimitExceeded(GlanceException): - message = _("The request returned a 413 Request Entity Too Large. This " - "generally means that rate limiting or a quota threshold was " - "breached.\n\nThe response body:\n%(body)s") - - def __init__(self, *args, **kwargs): - self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') - else None) - super(LimitExceeded, self).__init__(*args, **kwargs) - - -class ServiceUnavailable(GlanceException): - message = _("The request returned 503 Service Unavailable. This " - "generally occurs on service overload or other transient " - "outage.") - - def __init__(self, *args, **kwargs): - self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') - else None) - super(ServiceUnavailable, self).__init__(*args, **kwargs) - - -class ServerError(GlanceException): - message = _("The request returned 500 Internal Server Error.") - - -class UnexpectedStatus(GlanceException): - message = _("The request returned an unexpected status: %(status)s." - "\n\nThe response body:\n%(body)s") - - -class InvalidContentType(GlanceException): - message = _("Invalid content type %(content_type)s") - - -class BadRegistryConnectionConfiguration(GlanceException): - message = _("Registry was not configured correctly on API server. " - "Reason: %(reason)s") - - -class BadDriverConfiguration(GlanceException): - message = _("Driver %(driver_name)s could not be configured correctly. " - "Reason: %(reason)s") - - -class MaxRedirectsExceeded(GlanceException): - message = _("Maximum redirects (%(redirects)s) was exceeded.") - - -class InvalidRedirect(GlanceException): - message = _("Received invalid HTTP redirect.") - - -class NoServiceEndpoint(GlanceException): - message = _("Response from Keystone does not contain a Glance endpoint.") - - -class RegionAmbiguity(GlanceException): - message = _("Multiple 'image' service matches for region %(region)s. This " - "generally means that a region is required and you have not " - "supplied one.") - - -class WorkerCreationFailure(GlanceException): - message = _("Server worker creation failed: %(reason)s.") - - -class SchemaLoadError(GlanceException): - message = _("Unable to load schema: %(reason)s") - - -class InvalidObject(GlanceException): - message = _("Provided object does not match schema " - "'%(schema)s': %(reason)s") - - -class ImageSizeLimitExceeded(GlanceException): - message = _("The provided image is too large.") - - -class FailedToGetScrubberJobs(GlanceException): - message = _("Scrubber encountered an error while trying to fetch " - "scrub jobs.") - - -class ImageMemberLimitExceeded(LimitExceeded): - message = _("The limit has been exceeded on the number of allowed image " - "members for this image. Attempted: %(attempted)s, " - "Maximum: %(maximum)s") - - -class ImagePropertyLimitExceeded(LimitExceeded): - message = _("The limit has been exceeded on the number of allowed image " - "properties. Attempted: %(attempted)s, Maximum: %(maximum)s") - - -class ImageTagLimitExceeded(LimitExceeded): - message = _("The limit has been exceeded on the number of allowed image " - "tags. Attempted: %(attempted)s, Maximum: %(maximum)s") - - -class ImageLocationLimitExceeded(LimitExceeded): - message = _("The limit has been exceeded on the number of allowed image " - "locations. Attempted: %(attempted)s, Maximum: %(maximum)s") - - -class SIGHUPInterrupt(GlanceException): - message = _("System SIGHUP signal received.") - - -class RPCError(GlanceException): - message = _("%(cls)s exception was raised in the last rpc call: %(val)s") - - -class TaskException(GlanceException): - message = _("An unknown task exception occurred") - - -class BadTaskConfiguration(GlanceException): - message = _("Task was not configured properly") - - -class ImageNotFound(NotFound): - message = _("Image with the given id %(image_id)s was not found") - - -class TaskNotFound(TaskException, NotFound): - message = _("Task with the given id %(task_id)s was not found") - - -class InvalidTaskStatus(TaskException, Invalid): - message = _("Provided status of task is unsupported: %(status)s") - - -class InvalidTaskType(TaskException, Invalid): - message = _("Provided type of task is unsupported: %(type)s") - - -class InvalidTaskStatusTransition(TaskException, Invalid): - message = _("Status transition from %(cur_status)s to" - " %(new_status)s is not allowed") - - -class ImportTaskError(TaskException, Invalid): - message = _("An import task exception occurred") - - -class DuplicateLocation(Duplicate): - message = _("The location %(location)s already exists") - - -class InvalidParameterValue(Invalid): - message = _("Invalid value '%(value)s' for parameter '%(param)s': " - "%(extra_msg)s") - - -class InvalidImageStatusTransition(Invalid): - message = _("Image status transition from %(cur_status)s to" - " %(new_status)s is not allowed") - - -class MetadefDuplicateNamespace(Duplicate): - message = _("The metadata definition namespace=%(namespace_name)s" - " already exists.") - - -class MetadefDuplicateObject(Duplicate): - message = _("A metadata definition object with name=%(object_name)s" - " already exists in namespace=%(namespace_name)s.") - - -class MetadefDuplicateProperty(Duplicate): - message = _("A metadata definition property with name=%(property_name)s" - " already exists in namespace=%(namespace_name)s.") - - -class MetadefDuplicateResourceType(Duplicate): - message = _("A metadata definition resource-type with" - " name=%(resource_type_name)s already exists.") - - -class MetadefDuplicateResourceTypeAssociation(Duplicate): - message = _("The metadata definition resource-type association of" - " resource-type=%(resource_type_name)s to" - " namespace=%(namespace_name)s" - " already exists.") - - -class MetadefDuplicateTag(Duplicate): - message = _("A metadata tag with name=%(name)s" - " already exists in namespace=%(namespace_name)s." - " (Please note that metadata tag names are" - " case insensitive).") - - -class MetadefForbidden(Forbidden): - message = _("You are not authorized to complete this action.") - - -class MetadefIntegrityError(Forbidden): - message = _("The metadata definition %(record_type)s with" - " name=%(record_name)s not deleted." - " Other records still refer to it.") - - -class MetadefNamespaceNotFound(NotFound): - message = _("Metadata definition namespace=%(namespace_name)s" - " was not found.") - - -class MetadefObjectNotFound(NotFound): - message = _("The metadata definition object with" - " name=%(object_name)s was not found in" - " namespace=%(namespace_name)s.") - - -class MetadefPropertyNotFound(NotFound): - message = _("The metadata definition property with" - " name=%(property_name)s was not found in" - " namespace=%(namespace_name)s.") - - -class MetadefResourceTypeNotFound(NotFound): - message = _("The metadata definition resource-type with" - " name=%(resource_type_name)s, was not found.") - - -class MetadefResourceTypeAssociationNotFound(NotFound): - message = _("The metadata definition resource-type association of" - " resource-type=%(resource_type_name)s to" - " namespace=%(namespace_name)s," - " was not found.") - - -class MetadefTagNotFound(NotFound): - message = _("The metadata definition tag with" - " name=%(name)s was not found in" - " namespace=%(namespace_name)s.") - - -class InvalidDataMigrationScript(GlanceException): - message = _("Invalid data migration script '%(script)s'. A valid data " - "migration script must implement functions 'has_migrations' " - "and 'migrate'.") diff --git a/glance/common/location_strategy/__init__.py b/glance/common/location_strategy/__init__.py deleted file mode 100644 index c96f5012..00000000 --- a/glance/common/location_strategy/__init__.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2014 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from oslo_config import cfg -from oslo_log import log as logging -import stevedore - -from glance.i18n import _, _LE - -location_strategy_opts = [ - cfg.StrOpt('location_strategy', - default='location_order', - choices=('location_order', 'store_type'), - help=_(""" -Strategy to determine the preference order of image locations. - -This configuration option indicates the strategy to determine -the order in which an image's locations must be accessed to -serve the image's data. Glance then retrieves the image data -from the first responsive active location it finds in this list. - -This option takes one of two possible values ``location_order`` -and ``store_type``. The default value is ``location_order``, -which suggests that image data be served by using locations in -the order they are stored in Glance. The ``store_type`` value -sets the image location preference based on the order in which -the storage backends are listed as a comma separated list for -the configuration option ``store_type_preference``. - -Possible values: - * location_order - * store_type - -Related options: - * store_type_preference - -""")), -] - -CONF = cfg.CONF -CONF.register_opts(location_strategy_opts) - -LOG = logging.getLogger(__name__) - - -def _load_strategies(): - """Load all strategy modules.""" - modules = {} - namespace = "glance.common.image_location_strategy.modules" - ex = stevedore.extension.ExtensionManager(namespace) - for module_name in ex.names(): - try: - mgr = stevedore.driver.DriverManager( - namespace=namespace, - name=module_name, - invoke_on_load=False) - - # Obtain module name - strategy_name = str(mgr.driver.get_strategy_name()) - if strategy_name in modules: - msg = (_('%(strategy)s is registered as a module twice. ' - '%(module)s is not being used.') % - {'strategy': strategy_name, 'module': module_name}) - LOG.warn(msg) - else: - # Initialize strategy module - mgr.driver.init() - modules[strategy_name] = mgr.driver - except Exception as e: - LOG.error(_LE("Failed to load location strategy module " - "%(module)s: %(e)s") % {'module': module_name, - 'e': e}) - return modules - - -_available_strategies = _load_strategies() - - -# TODO(kadachi): Not used but don't remove this until glance_store -# development/migration stage. -def verify_location_strategy(conf=None, strategies=_available_strategies): - """Validate user configured 'location_strategy' option value.""" - if not conf: - conf = CONF.location_strategy - if conf not in strategies: - msg = (_('Invalid location_strategy option: %(name)s. ' - 'The valid strategy option(s) is(are): %(strategies)s') % - {'name': conf, 'strategies': ", ".join(strategies.keys())}) - LOG.error(msg) - raise RuntimeError(msg) - - -def get_ordered_locations(locations, **kwargs): - """ - Order image location list by configured strategy. - - :param locations: The original image location list. - :param kwargs: Strategy-specific arguments for under layer strategy module. - :returns: The image location list with strategy-specific order. - """ - if not locations: - return [] - strategy_module = _available_strategies[CONF.location_strategy] - return strategy_module.get_ordered_locations(copy.deepcopy(locations), - **kwargs) - - -def choose_best_location(locations, **kwargs): - """ - Choose best location from image location list by configured strategy. - - :param locations: The original image location list. - :param kwargs: Strategy-specific arguments for under layer strategy module. - :returns: The best location from image location list. - """ - locations = get_ordered_locations(locations, **kwargs) - if locations: - return locations[0] - else: - return None diff --git a/glance/common/location_strategy/location_order.py b/glance/common/location_strategy/location_order.py deleted file mode 100644 index c044bc62..00000000 --- a/glance/common/location_strategy/location_order.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2014 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Image location order based location strategy module""" - - -def get_strategy_name(): - """Return strategy module name.""" - return 'location_order' - - -def init(): - """Initialize strategy module.""" - pass - - -def get_ordered_locations(locations, **kwargs): - """ - Order image location list. - - :param locations: The original image location list. - :returns: The image location list with original natural order. - """ - return locations diff --git a/glance/common/location_strategy/store_type.py b/glance/common/location_strategy/store_type.py deleted file mode 100644 index 104381e2..00000000 --- a/glance/common/location_strategy/store_type.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright 2014 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Storage preference based location strategy module""" - -from oslo_config import cfg -import six -import six.moves.urllib.parse as urlparse - -from glance.i18n import _ - -store_type_opts = [ - cfg.ListOpt('store_type_preference', - default=[], - help=_(""" -Preference order of storage backends. - -Provide a comma separated list of store names in the order in -which images should be retrieved from storage backends. -These store names must be registered with the ``stores`` -configuration option. - -NOTE: The ``store_type_preference`` configuration option is applied -only if ``store_type`` is chosen as a value for the -``location_strategy`` configuration option. An empty list will not -change the location order. - -Possible values: - * Empty list - * Comma separated list of registered store names. Legal values are: - * file - * http - * rbd - * swift - * sheepdog - * cinder - * vmware - -Related options: - * location_strategy - * stores - -""")) -] - -CONF = cfg.CONF -CONF.register_opts(store_type_opts, group='store_type_location_strategy') - -_STORE_TO_SCHEME_MAP = {} - - -def get_strategy_name(): - """Return strategy module name.""" - return 'store_type' - - -def init(): - """Initialize strategy module.""" - # NOTE(zhiyan): We have a plan to do a reusable glance client library for - # all clients like Nova and Cinder in near period, it would be able to - # contains common code to provide uniform image service interface for them, - # just like Brick in Cinder, this code can be moved to there and shared - # between Glance and client both side. So this implementation as far as - # possible to prevent make relationships with Glance(server)-specific code, - # for example: using functions within store module to validate - # 'store_type_preference' option. - mapping = {'file': ['file', 'filesystem'], - 'http': ['http', 'https'], - 'rbd': ['rbd'], - 'swift': ['swift', 'swift+https', 'swift+http'], - 'sheepdog': ['sheepdog'], - 'cinder': ['cinder'], - 'vmware': ['vsphere']} - _STORE_TO_SCHEME_MAP.clear() - _STORE_TO_SCHEME_MAP.update(mapping) - - -def get_ordered_locations(locations, uri_key='url', **kwargs): - """ - Order image location list. - - :param locations: The original image location list. - :param uri_key: The key name for location URI in image location dictionary. - :returns: The image location list with preferred store type order. - """ - def _foreach_store_type_preference(): - store_types = CONF.store_type_location_strategy.store_type_preference - for preferred_store in store_types: - preferred_store = str(preferred_store).strip() - if not preferred_store: - continue - yield preferred_store - - if not locations: - return locations - - preferences = {} - others = [] - for preferred_store in _foreach_store_type_preference(): - preferences[preferred_store] = [] - - for location in locations: - uri = location.get(uri_key) - if not uri: - continue - pieces = urlparse.urlparse(uri.strip()) - - store_name = None - for store, schemes in six.iteritems(_STORE_TO_SCHEME_MAP): - if pieces.scheme.strip() in schemes: - store_name = store - break - - if store_name in preferences: - preferences[store_name].append(location) - else: - others.append(location) - - ret = [] - # NOTE(zhiyan): While configuration again since py26 does not support - # ordereddict container. - for preferred_store in _foreach_store_type_preference(): - ret.extend(preferences[preferred_store]) - - ret.extend(others) - - return ret diff --git a/glance/common/property_utils.py b/glance/common/property_utils.py deleted file mode 100644 index 48aec70b..00000000 --- a/glance/common/property_utils.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright 2013 Rackspace -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_policy import policy -from six.moves import configparser - -import glance.api.policy -from glance.common import exception -from glance.i18n import _, _LE, _LW - -# SafeConfigParser was deprecated in Python 3.2 -if sys.version_info >= (3, 2): - CONFIG = configparser.ConfigParser() -else: - CONFIG = configparser.SafeConfigParser() - -LOG = logging.getLogger(__name__) - -property_opts = [ - cfg.StrOpt('property_protection_file', - help=_(""" -The location of the property protection file. - -Provide a valid path to the property protection file which contains -the rules for property protections and the roles/policies associated -with them. - -A property protection file, when set, restricts the Glance image -properties to be created, read, updated and/or deleted by a specific -set of users that are identified by either roles or policies. -If this configuration option is not set, by default, property -protections won't be enforced. If a value is specified and the file -is not found, the glance-api service will fail to start. -More information on property protections can be found at: -http://docs.openstack.org/developer/glance/property-protections.html - -Possible values: - * Empty string - * Valid path to the property protection configuration file - -Related options: - * property_protection_rule_format - -""")), - cfg.StrOpt('property_protection_rule_format', - default='roles', - choices=('roles', 'policies'), - help=_(""" -Rule format for property protection. - -Provide the desired way to set property protection on Glance -image properties. The two permissible values are ``roles`` -and ``policies``. The default value is ``roles``. - -If the value is ``roles``, the property protection file must -contain a comma separated list of user roles indicating -permissions for each of the CRUD operations on each property -being protected. If set to ``policies``, a policy defined in -policy.json is used to express property protections for each -of the CRUD operations. Examples of how property protections -are enforced based on ``roles`` or ``policies`` can be found at: -http://docs.openstack.org/developer/glance/property-protections.html#examples - -Possible values: - * roles - * policies - -Related options: - * property_protection_file - -""")), -] - -CONF = cfg.CONF -CONF.register_opts(property_opts) - -# NOTE (spredzy): Due to the particularly lengthy name of the exception -# and the number of occurrence it is raise in this file, a variable is -# created -InvalidPropProtectConf = exception.InvalidPropertyProtectionConfiguration - - -def is_property_protection_enabled(): - if CONF.property_protection_file: - return True - return False - - -class PropertyRules(object): - - def __init__(self, policy_enforcer=None): - self.rules = [] - self.prop_exp_mapping = {} - self.policies = [] - self.policy_enforcer = policy_enforcer or glance.api.policy.Enforcer() - self.prop_prot_rule_format = CONF.property_protection_rule_format - self.prop_prot_rule_format = self.prop_prot_rule_format.lower() - self._load_rules() - - def _load_rules(self): - try: - conf_file = CONF.find_file(CONF.property_protection_file) - CONFIG.read(conf_file) - except Exception as e: - msg = (_LE("Couldn't find property protection file %(file)s: " - "%(error)s.") % {'file': CONF.property_protection_file, - 'error': e}) - LOG.error(msg) - raise InvalidPropProtectConf() - - if self.prop_prot_rule_format not in ['policies', 'roles']: - msg = _LE("Invalid value '%s' for " - "'property_protection_rule_format'. " - "The permitted values are " - "'roles' and 'policies'") % self.prop_prot_rule_format - LOG.error(msg) - raise InvalidPropProtectConf() - - operations = ['create', 'read', 'update', 'delete'] - properties = CONFIG.sections() - for property_exp in properties: - property_dict = {} - compiled_rule = self._compile_rule(property_exp) - - for operation in operations: - permissions = CONFIG.get(property_exp, operation) - if permissions: - if self.prop_prot_rule_format == 'policies': - if ',' in permissions: - LOG.error( - _LE("Multiple policies '%s' not allowed " - "for a given operation. Policies can be " - "combined in the policy file"), - permissions) - raise InvalidPropProtectConf() - self.prop_exp_mapping[compiled_rule] = property_exp - self._add_policy_rules(property_exp, operation, - permissions) - permissions = [permissions] - else: - permissions = [permission.strip() for permission in - permissions.split(',')] - if '@' in permissions and '!' in permissions: - msg = (_LE( - "Malformed property protection rule in " - "[%(prop)s] %(op)s=%(perm)s: '@' and '!' " - "are mutually exclusive") % - dict(prop=property_exp, - op=operation, - perm=permissions)) - LOG.error(msg) - raise InvalidPropProtectConf() - property_dict[operation] = permissions - else: - property_dict[operation] = [] - LOG.warn( - _LW('Property protection on operation %(operation)s' - ' for rule %(rule)s is not found. No role will be' - ' allowed to perform this operation.') % - {'operation': operation, - 'rule': property_exp}) - - self.rules.append((compiled_rule, property_dict)) - - def _compile_rule(self, rule): - try: - return re.compile(rule) - except Exception as e: - msg = (_LE("Encountered a malformed property protection rule" - " %(rule)s: %(error)s.") % {'rule': rule, - 'error': e}) - LOG.error(msg) - raise InvalidPropProtectConf() - - def _add_policy_rules(self, property_exp, action, rule): - """Add policy rules to the policy enforcer. - - For example, if the file listed as property_protection_file has: - [prop_a] - create = glance_creator - then the corresponding policy rule would be: - "prop_a:create": "rule:glance_creator" - where glance_creator is defined in policy.json. For example: - "glance_creator": "role:admin or role:glance_create_user" - """ - rule = "rule:%s" % rule - rule_name = "%s:%s" % (property_exp, action) - rule_dict = policy.Rules.from_dict({ - rule_name: rule - }) - self.policy_enforcer.add_rules(rule_dict) - - def _check_policy(self, property_exp, action, context): - try: - action = ":".join([property_exp, action]) - self.policy_enforcer.enforce(context, action, {}) - except exception.Forbidden: - return False - return True - - def check_property_rules(self, property_name, action, context): - roles = context.roles - if not self.rules: - return True - - if action not in ['create', 'read', 'update', 'delete']: - return False - - for rule_exp, rule in self.rules: - if rule_exp.search(str(property_name)): - break - else: # no matching rules - return False - - rule_roles = rule.get(action) - if rule_roles: - if '!' in rule_roles: - return False - elif '@' in rule_roles: - return True - if self.prop_prot_rule_format == 'policies': - prop_exp_key = self.prop_exp_mapping[rule_exp] - return self._check_policy(prop_exp_key, action, - context) - if set(roles).intersection(set([role.lower() for role - in rule_roles])): - return True - return False diff --git a/glance/common/rpc.py b/glance/common/rpc.py deleted file mode 100644 index a60cb3c1..00000000 --- a/glance/common/rpc.py +++ /dev/null @@ -1,302 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -RPC Controller -""" -import datetime -import traceback - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -import oslo_utils.importutils as imp -import six -from webob import exc - -from glance.common import client -from glance.common import exception -from glance.common import timeutils -from glance.common import wsgi -from glance.i18n import _, _LE - -LOG = logging.getLogger(__name__) - - -rpc_opts = [ - cfg.ListOpt('allowed_rpc_exception_modules', - default=['glance.common.exception', - 'builtins', - 'exceptions', - ], - help=_(""" -List of allowed exception modules to handle RPC exceptions. - -Provide a comma separated list of modules whose exceptions are -permitted to be recreated upon receiving exception data via an RPC -call made to Glance. The default list includes -``glance.common.exception``, ``builtins``, and ``exceptions``. - -The RPC protocol permits interaction with Glance via calls across a -network or within the same system. Including a list of exception -namespaces with this option enables RPC to propagate the exceptions -back to the users. - -Possible values: - * A comma separated list of valid exception modules - -Related options: - * None -""")), -] - -CONF = cfg.CONF -CONF.register_opts(rpc_opts) - - -class RPCJSONSerializer(wsgi.JSONResponseSerializer): - - @staticmethod - def _to_primitive(_type, _value): - return {"_type": _type, "_value": _value} - - def _sanitizer(self, obj): - if isinstance(obj, datetime.datetime): - return self._to_primitive("datetime", - obj.isoformat()) - - return super(RPCJSONSerializer, self)._sanitizer(obj) - - -class RPCJSONDeserializer(wsgi.JSONRequestDeserializer): - - @staticmethod - def _to_datetime(obj): - return timeutils.normalize_time(timeutils.parse_isotime(obj)) - - def _sanitizer(self, obj): - try: - _type, _value = obj["_type"], obj["_value"] - return getattr(self, "_to_" + _type)(_value) - except (KeyError, AttributeError): - return obj - - -class Controller(object): - """ - Base RPCController. - - This is the base controller for RPC based APIs. Commands - handled by this controller respect the following form: - - :: - - [{ - 'command': 'method_name', - 'kwargs': {...} - }] - - The controller is capable of processing more than one command - per request and will always return a list of results. - - :param bool raise_exc: Specifies whether to raise - exceptions instead of "serializing" them. - - """ - - def __init__(self, raise_exc=False): - self._registered = {} - self.raise_exc = raise_exc - - def register(self, resource, filtered=None, excluded=None, refiner=None): - """ - Exports methods through the RPC Api. - - :param resource: Resource's instance to register. - :param filtered: List of methods that *can* be registered. Read - as "Method must be in this list". - :param excluded: List of methods to exclude. - :param refiner: Callable to use as filter for methods. - - :raises TypeError: If refiner is not callable. - - """ - - funcs = [x for x in dir(resource) if not x.startswith("_")] - - if filtered: - funcs = [f for f in funcs if f in filtered] - - if excluded: - funcs = [f for f in funcs if f not in excluded] - - if refiner: - funcs = filter(refiner, funcs) - - for name in funcs: - meth = getattr(resource, name) - - if not callable(meth): - continue - - self._registered[name] = meth - - def __call__(self, req, body): - """ - Executes the command - """ - - if not isinstance(body, list): - msg = _("Request must be a list of commands") - raise exc.HTTPBadRequest(explanation=msg) - - def validate(cmd): - if not isinstance(cmd, dict): - msg = _("Bad Command: %s") % str(cmd) - raise exc.HTTPBadRequest(explanation=msg) - - command, kwargs = cmd.get("command"), cmd.get("kwargs") - - if (not command or not isinstance(command, six.string_types) or - (kwargs and not isinstance(kwargs, dict))): - msg = _("Wrong command structure: %s") % (str(cmd)) - raise exc.HTTPBadRequest(explanation=msg) - - method = self._registered.get(command) - if not method: - # Just raise 404 if the user tries to - # access a private method. No need for - # 403 here since logically the command - # is not registered to the rpc dispatcher - raise exc.HTTPNotFound(explanation=_("Command not found")) - - return True - - # If more than one command were sent then they might - # be intended to be executed sequentially, that for, - # lets first verify they're all valid before executing - # them. - commands = filter(validate, body) - - results = [] - for cmd in commands: - # kwargs is not required - command, kwargs = cmd["command"], cmd.get("kwargs", {}) - method = self._registered[command] - try: - result = method(req.context, **kwargs) - except Exception as e: - if self.raise_exc: - raise - - cls, val = e.__class__, encodeutils.exception_to_unicode(e) - msg = (_LE("RPC Call Error: %(val)s\n%(tb)s") % - dict(val=val, tb=traceback.format_exc())) - LOG.error(msg) - - # NOTE(flaper87): Don't propagate all exceptions - # but the ones allowed by the user. - module = cls.__module__ - if module not in CONF.allowed_rpc_exception_modules: - cls = exception.RPCError - val = encodeutils.exception_to_unicode( - exception.RPCError(cls=cls, val=val)) - - cls_path = "%s.%s" % (cls.__module__, cls.__name__) - result = {"_error": {"cls": cls_path, "val": val}} - results.append(result) - return results - - -class RPCClient(client.BaseClient): - - def __init__(self, *args, **kwargs): - self._serializer = RPCJSONSerializer() - self._deserializer = RPCJSONDeserializer() - - self.raise_exc = kwargs.pop("raise_exc", True) - self.base_path = kwargs.pop("base_path", '/rpc') - super(RPCClient, self).__init__(*args, **kwargs) - - @client.handle_unauthenticated - def bulk_request(self, commands): - """ - Execute multiple commands in a single request. - - :param commands: List of commands to send. Commands - must respect the following form - - :: - - { - 'command': 'method_name', - 'kwargs': method_kwargs - } - - """ - body = self._serializer.to_json(commands) - response = super(RPCClient, self).do_request('POST', - self.base_path, - body) - return self._deserializer.from_json(response.read()) - - def do_request(self, method, **kwargs): - """ - Simple do_request override. This method serializes - the outgoing body and builds the command that will - be sent. - - :param method: The remote python method to call - :param kwargs: Dynamic parameters that will be - passed to the remote method. - """ - content = self.bulk_request([{'command': method, - 'kwargs': kwargs}]) - - # NOTE(flaper87): Return the first result if - # a single command was executed. - content = content[0] - - # NOTE(flaper87): Check if content is an error - # and re-raise it if raise_exc is True. Before - # checking if content contains the '_error' key, - # verify if it is an instance of dict - since the - # RPC call may have returned something different. - if self.raise_exc and (isinstance(content, dict) - and '_error' in content): - error = content['_error'] - try: - exc_cls = imp.import_class(error['cls']) - raise exc_cls(error['val']) - except ImportError: - # NOTE(flaper87): The exception - # class couldn't be imported, using - # a generic exception. - raise exception.RPCError(**error) - return content - - def __getattr__(self, item): - """ - This method returns a method_proxy that - will execute the rpc call in the registry - service. - """ - if item.startswith('_'): - raise AttributeError(item) - - def method_proxy(**kw): - return self.do_request(item, **kw) - - return method_proxy diff --git a/glance/common/scripts/__init__.py b/glance/common/scripts/__init__.py deleted file mode 100644 index 37a53a2f..00000000 --- a/glance/common/scripts/__init__.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from glance.common.scripts.api_image_import import main as api_image_import -from glance.common.scripts.image_import import main as image_import -from glance.i18n import _LE, _LI - - -LOG = logging.getLogger(__name__) - - -def run_task(task_id, task_type, context, - task_repo=None, image_repo=None, image_factory=None): - # TODO(nikhil): if task_repo is None get new task repo - # TODO(nikhil): if image_repo is None get new image repo - # TODO(nikhil): if image_factory is None get new image factory - LOG.info(_LI("Loading known task scripts for task_id %(task_id)s " - "of type %(task_type)s"), {'task_id': task_id, - 'task_type': task_type}) - if task_type == 'import': - image_import.run(task_id, context, task_repo, - image_repo, image_factory) - - elif task_type == 'api_image_import': - api_image_import.run(task_id, - context, - task_repo, - image_repo, - image_factory) - - else: - msg = _LE("This task type %(task_type)s is not supported by the " - "current deployment of Glance. Please refer the " - "documentation provided by OpenStack or your operator " - "for more information.") % {'task_type': task_type} - LOG.error(msg) - task = task_repo.get(task_id) - task.fail(msg) - if task_repo: - task_repo.save(task) - else: - LOG.error(_LE("Failed to save task %(task_id)s in DB as task_repo " - "is %(task_repo)s"), {"task_id": task_id, - "task_repo": task_repo}) diff --git a/glance/common/scripts/api_image_import/__init__.py b/glance/common/scripts/api_image_import/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/common/scripts/api_image_import/main.py b/glance/common/scripts/api_image_import/main.py deleted file mode 100644 index 4c5bee12..00000000 --- a/glance/common/scripts/api_image_import/main.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'run', -] - -from oslo_concurrency import lockutils -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -import six - -from glance.api.v2 import images as v2_api -from glance.common import exception -from glance.common.scripts import utils as script_utils -from glance.common import store_utils -from glance.i18n import _ - -LOG = logging.getLogger(__name__) - - -def run(t_id, context, task_repo, image_repo, image_factory): - LOG.info('Task %(task_id)s beginning image import ' - 'execution.', {'task_id': t_id}) - _execute(t_id, task_repo, image_repo, image_factory) - - -# NOTE(nikhil): This lock prevents more than N number of threads to be spawn -# simultaneously. The number N represents the number of threads in the -# executor pool. The value is set to 10 in the eventlet executor. -@lockutils.synchronized("glance_image_import") -def _execute(t_id, task_repo, image_repo, image_factory): - task = script_utils.get_task(task_repo, t_id) - - if task is None: - # NOTE: This happens if task is not found in the database. In - # such cases, there is no way to update the task status so, - # it's ignored here. - return - - try: - task_input = script_utils.unpack_task_input(task) - - image_id = task_input.get('image_id') - - task.succeed({'image_id': image_id}) - except Exception as e: - # Note: The message string contains Error in it to indicate - # in the task.message that it's a error message for the user. - - # TODO(nikhil): need to bring back save_and_reraise_exception when - # necessary - err_msg = ("Error: " + six.text_type(type(e)) + ': ' + - encodeutils.exception_to_unicode(e)) - log_msg = err_msg + ("Task ID %s" % task.task_id) - LOG.exception(log_msg) - - task.fail(_(err_msg)) # noqa - finally: - task_repo.save(task) - - -def import_image(image_repo, image_factory, task_input, task_id, uri): - original_image = v2_api.create_image(image_repo, - image_factory, - task_input.get('image_properties'), - task_id) - # NOTE: set image status to saving just before setting data - original_image.status = 'saving' - image_repo.save(original_image) - image_id = original_image.image_id - - # NOTE: Retrieving image from the database because the Image object - # returned from create_image method does not have appropriate factories - # wrapped around it. - new_image = image_repo.get(image_id) - set_image_data(new_image, uri, task_id) - - try: - # NOTE: Check if the Image is not deleted after setting the data - # before saving the active image. Here if image status is - # saving, then new_image is saved as it contains updated location, - # size, virtual_size and checksum information and the status of - # new_image is already set to active in set_image_data() call. - image = image_repo.get(image_id) - if image.status == 'saving': - image_repo.save(new_image) - return image_id - else: - msg = _("The Image %(image_id)s object being created by this task " - "%(task_id)s, is no longer in valid status for further " - "processing.") % {"image_id": image_id, - "task_id": task_id} - raise exception.Conflict(msg) - except (exception.Conflict, exception.NotFound, - exception.NotAuthenticated): - with excutils.save_and_reraise_exception(): - if new_image.locations: - for location in new_image.locations: - store_utils.delete_image_location_from_backend( - new_image.context, - image_id, - location) - - -def set_image_data(image, uri, task_id): - data_iter = None - try: - LOG.info("Task %(task_id)s: Got image data uri %(data_uri)s to be " - "imported", {"data_uri": uri, "task_id": task_id}) - data_iter = script_utils.get_image_data_iter(uri) - image.set_data(data_iter) - except Exception as e: - with excutils.save_and_reraise_exception(): - LOG.warn("Task %(task_id)s failed with exception %(error)s" % - {"error": encodeutils.exception_to_unicode(e), - "task_id": task_id}) - LOG.info("Task %(task_id)s: Could not import image file" - " %(image_data)s", {"image_data": uri, - "task_id": task_id}) - finally: - if hasattr(data_iter, 'close'): - data_iter.close() diff --git a/glance/common/scripts/image_import/__init__.py b/glance/common/scripts/image_import/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/common/scripts/image_import/main.py b/glance/common/scripts/image_import/main.py deleted file mode 100644 index 8906a0c4..00000000 --- a/glance/common/scripts/image_import/main.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'run', -] - -from oslo_concurrency import lockutils -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -import six - -from glance.api.v2 import images as v2_api -from glance.common import exception -from glance.common.scripts import utils as script_utils -from glance.common import store_utils -from glance.i18n import _, _LE, _LI, _LW - -LOG = logging.getLogger(__name__) - - -def run(t_id, context, task_repo, image_repo, image_factory): - LOG.info(_LI('Task %(task_id)s beginning import ' - 'execution.'), {'task_id': t_id}) - _execute(t_id, task_repo, image_repo, image_factory) - - -# NOTE(nikhil): This lock prevents more than N number of threads to be spawn -# simultaneously. The number N represents the number of threads in the -# executor pool. The value is set to 10 in the eventlet executor. -@lockutils.synchronized("glance_import") -def _execute(t_id, task_repo, image_repo, image_factory): - task = script_utils.get_task(task_repo, t_id) - - if task is None: - # NOTE: This happens if task is not found in the database. In - # such cases, there is no way to update the task status so, - # it's ignored here. - return - - try: - task_input = script_utils.unpack_task_input(task) - - uri = script_utils.validate_location_uri(task_input.get('import_from')) - image_id = import_image(image_repo, image_factory, task_input, t_id, - uri) - - task.succeed({'image_id': image_id}) - except Exception as e: - # Note: The message string contains Error in it to indicate - # in the task.message that it's a error message for the user. - - # TODO(nikhil): need to bring back save_and_reraise_exception when - # necessary - err_msg = ("Error: " + six.text_type(type(e)) + ': ' + - encodeutils.exception_to_unicode(e)) - log_msg = _LE(err_msg + ("Task ID %s" % task.task_id)) # noqa - LOG.exception(log_msg) - - task.fail(_LE(err_msg)) # noqa - finally: - task_repo.save(task) - - -def import_image(image_repo, image_factory, task_input, task_id, uri): - original_image = create_image(image_repo, image_factory, - task_input.get('image_properties'), task_id) - # NOTE: set image status to saving just before setting data - original_image.status = 'saving' - image_repo.save(original_image) - image_id = original_image.image_id - - # NOTE: Retrieving image from the database because the Image object - # returned from create_image method does not have appropriate factories - # wrapped around it. - new_image = image_repo.get(image_id) - set_image_data(new_image, uri, task_id) - - try: - # NOTE: Check if the Image is not deleted after setting the data - # before saving the active image. Here if image status is - # saving, then new_image is saved as it contains updated location, - # size, virtual_size and checksum information and the status of - # new_image is already set to active in set_image_data() call. - image = image_repo.get(image_id) - if image.status == 'saving': - image_repo.save(new_image) - return image_id - else: - msg = _("The Image %(image_id)s object being created by this task " - "%(task_id)s, is no longer in valid status for further " - "processing.") % {"image_id": image_id, - "task_id": task_id} - raise exception.Conflict(msg) - except (exception.Conflict, exception.NotFound, - exception.NotAuthenticated): - with excutils.save_and_reraise_exception(): - if new_image.locations: - for location in new_image.locations: - store_utils.delete_image_location_from_backend( - new_image.context, - image_id, - location) - - -def create_image(image_repo, image_factory, image_properties, task_id): - properties = {} - # NOTE: get the base properties - for key in v2_api.get_base_properties(): - try: - properties[key] = image_properties.pop(key) - except KeyError: - LOG.debug("Task ID %(task_id)s: Ignoring property %(k)s for " - "setting base properties while creating " - "Image.", {'task_id': task_id, 'k': key}) - - # NOTE: get the rest of the properties and pass them as - # extra_properties for Image to be created with them. - properties['extra_properties'] = image_properties - script_utils.set_base_image_properties(properties=properties) - - image = image_factory.new_image(**properties) - image_repo.add(image) - return image - - -def set_image_data(image, uri, task_id): - data_iter = None - try: - LOG.info(_LI("Task %(task_id)s: Got image data uri %(data_uri)s to be " - "imported"), {"data_uri": uri, "task_id": task_id}) - data_iter = script_utils.get_image_data_iter(uri) - image.set_data(data_iter) - except Exception as e: - with excutils.save_and_reraise_exception(): - LOG.warn(_LW("Task %(task_id)s failed with exception %(error)s") % - {"error": encodeutils.exception_to_unicode(e), - "task_id": task_id}) - LOG.info(_LI("Task %(task_id)s: Could not import image file" - " %(image_data)s"), {"image_data": uri, - "task_id": task_id}) - finally: - if hasattr(data_iter, 'close'): - data_iter.close() diff --git a/glance/common/scripts/utils.py b/glance/common/scripts/utils.py deleted file mode 100644 index f88d2101..00000000 --- a/glance/common/scripts/utils.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'get_task', - 'unpack_task_input', - 'set_base_image_properties', - 'validate_location_uri', - 'get_image_data_iter', -] - - -from oslo_log import log as logging -from six.moves import urllib - -from glance.common import exception -from glance.i18n import _, _LE - -LOG = logging.getLogger(__name__) - - -def get_task(task_repo, task_id): - """Gets a TaskProxy object. - - :param task_repo: TaskRepo object used to perform DB operations - :param task_id: ID of the Task - """ - task = None - try: - task = task_repo.get(task_id) - except exception.NotFound: - msg = _LE('Task not found for task_id %s') % task_id - LOG.exception(msg) - - return task - - -def unpack_task_input(task): - """Verifies and returns valid task input dictionary. - - :param task: Task domain object - """ - task_type = task.type - task_input = task.task_input - - if task_type == 'api_image_import': - if 'import_method' not in task_input: - msg = _("Input does not contain 'import_method'") - raise exception.Invalid(msg) - else: - for key in ["import_from", "import_from_format", "image_properties"]: - if key not in task_input: - msg = (_("Input does not contain '%(key)s' field") % - {"key": key}) - raise exception.Invalid(msg) - - return task_input - - -def set_base_image_properties(properties=None): - """Sets optional base properties for creating Image. - - :param properties: Input dict to set some base properties - """ - if isinstance(properties, dict) and len(properties) == 0: - # TODO(nikhil): We can make these properties configurable while - # implementing the pipeline logic for the scripts. The below shown - # are placeholders to show that the scripts work on 'devstack' - # environment. - properties['disk_format'] = 'qcow2' - properties['container_format'] = 'bare' - - -def validate_location_uri(location): - """Validate location uri into acceptable format. - - :param location: Location uri to be validated - """ - if not location: - raise exception.BadStoreUri(_('Invalid location: %s') % location) - - elif location.startswith(('http://', 'https://')): - return location - - # NOTE: file type uri is being avoided for security reasons, - # see LP bug #942118 #1400966. - elif location.startswith(("file:///", "filesystem:///")): - msg = _("File based imports are not allowed. Please use a non-local " - "source of image data.") - # NOTE: raise BadStoreUri and let the encompassing block save the error - # msg in the task.message. - raise exception.BadStoreUri(msg) - - else: - # TODO(nikhil): add other supported uris - supported = ['http', ] - msg = _("The given uri is not valid. Please specify a " - "valid uri from the following list of supported uri " - "%(supported)s") % {'supported': supported} - raise urllib.error.URLError(msg) - - -def get_image_data_iter(uri): - """Returns iterable object either for local file or uri - - :param uri: uri (remote or local) to the datasource we want to iterate - - Validation/sanitization of the uri is expected to happen before we get - here. - """ - # NOTE(flaper87): This is safe because the input uri is already - # verified before the task is created. - if uri.startswith("file://"): - uri = uri.split("file://")[-1] - # NOTE(flaper87): The caller of this function expects to have - # an iterable object. FileObjects in python are iterable, therefore - # we are returning it as is. - # The file descriptor will be eventually cleaned up by the garbage - # collector once its ref-count is dropped to 0. That is, when there - # wont be any references pointing to this file. - # - # We're not using StringIO or other tools to avoid reading everything - # into memory. Some images may be quite heavy. - return open(uri, "r") - - return urllib.request.urlopen(uri) diff --git a/glance/common/store_utils.py b/glance/common/store_utils.py deleted file mode 100644 index 45509d31..00000000 --- a/glance/common/store_utils.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -import glance_store as store_api -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -import six.moves.urllib.parse as urlparse - -import glance.db as db_api -from glance.i18n import _LE, _LW -from glance import scrubber - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - -RESTRICTED_URI_SCHEMAS = frozenset(['file', 'filesystem', 'swift+config']) - - -def safe_delete_from_backend(context, image_id, location): - """ - Given a location, delete an image from the store and - update location status to db. - - This function try to handle all known exceptions which might be raised - by those calls on store and DB modules in its implementation. - - :param context: The request context - :param image_id: The image identifier - :param location: The image location entry - """ - - try: - ret = store_api.delete_from_backend(location['url'], context=context) - location['status'] = 'deleted' - if 'id' in location: - db_api.get_api().image_location_delete(context, image_id, - location['id'], 'deleted') - return ret - except store_api.NotFound: - msg = _LW('Failed to delete image %s in store from URI') % image_id - LOG.warn(msg) - except store_api.StoreDeleteNotSupported as e: - LOG.warn(encodeutils.exception_to_unicode(e)) - except store_api.UnsupportedBackend: - exc_type = sys.exc_info()[0].__name__ - msg = (_LE('Failed to delete image %(image_id)s from store: %(exc)s') % - dict(image_id=image_id, exc=exc_type)) - LOG.error(msg) - - -def schedule_delayed_delete_from_backend(context, image_id, location): - """ - Given a location, schedule the deletion of an image location and - update location status to db. - - :param context: The request context - :param image_id: The image identifier - :param location: The image location entry - """ - - db_queue = scrubber.get_scrub_queue() - - if not CONF.use_user_token: - context = None - - ret = db_queue.add_location(image_id, location) - if ret: - location['status'] = 'pending_delete' - if 'id' in location: - # NOTE(zhiyan): New added image location entry will has no 'id' - # field since it has not been saved to DB. - db_api.get_api().image_location_delete(context, image_id, - location['id'], - 'pending_delete') - else: - db_api.get_api().image_location_add(context, image_id, location) - - return ret - - -def delete_image_location_from_backend(context, image_id, location): - """ - Given a location, immediately or schedule the deletion of an image - location and update location status to db. - - :param context: The request context - :param image_id: The image identifier - :param location: The image location entry - """ - - deleted = False - if CONF.delayed_delete: - deleted = schedule_delayed_delete_from_backend(context, - image_id, location) - if not deleted: - # NOTE(zhiyan) If image metadata has not been saved to DB - # such as uploading process failure then we can't use - # location status mechanism to support image pending delete. - safe_delete_from_backend(context, image_id, location) - - -def validate_external_location(uri): - """ - Validate if URI of external location are supported. - - Only over non-local store types are OK, i.e. Swift, - HTTP. Note the absence of 'file://' for security reasons, - see LP bug #942118, 1400966, 'swift+config://' is also - absent for security reasons, see LP bug #1334196. - - :param uri: The URI of external image location. - :returns: Whether given URI of external image location are OK. - """ - if not uri: - return False - - # TODO(zhiyan): This function could be moved to glance_store. - # TODO(gm): Use a whitelist of allowed schemes - scheme = urlparse.urlparse(uri).scheme - return (scheme in store_api.get_known_schemes() and - scheme not in RESTRICTED_URI_SCHEMAS) diff --git a/glance/common/swift_store_utils.py b/glance/common/swift_store_utils.py deleted file mode 100644 index 6981a182..00000000 --- a/glance/common/swift_store_utils.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2014 Rackspace -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from six.moves import configparser - -from glance.common import exception -from glance.i18n import _, _LE - -swift_opts = [ - cfg.StrOpt('default_swift_reference', - default="ref1", - help=_(""" -Reference to default Swift account/backing store parameters. - -Provide a string value representing a reference to the default set -of parameters required for using swift account/backing store for -image storage. The default reference value for this configuration -option is 'ref1'. This configuration option dereferences the -parameters and facilitates image storage in Swift storage backend -every time a new image is added. - -Possible values: - * A valid string value - -Related options: - * None - -""")), - cfg.StrOpt('swift_store_auth_address', - deprecated_reason=(""" -The option auth_address in the Swift back-end configuration file is -used instead. -"""), - help=_('The address where the Swift authentication service ' - 'is listening.')), - cfg.StrOpt('swift_store_user', secret=True, - deprecated_reason=(""" -The option 'user' in the Swift back-end configuration file is set instead. -"""), - help=_('The user to authenticate against the Swift ' - 'authentication service.')), - cfg.StrOpt('swift_store_key', secret=True, - deprecated_reason=(""" -The option 'key' in the Swift back-end configuration file is used -to set the authentication key instead. -"""), - help=_('Auth key for the user authenticating against the ' - 'Swift authentication service.')), - cfg.StrOpt('swift_store_config_file', secret=True, - help=_(""" -File containing the swift account(s) configurations. - -Include a string value representing the path to a configuration -file that has references for each of the configured Swift -account(s)/backing stores. By default, no file path is specified -and customized Swift referencing is diabled. Configuring this option -is highly recommended while using Swift storage backend for image -storage as it helps avoid storage of credentials in the -database. - -Possible values: - * None - * String value representing a vaid configuration file path - -Related options: - * None - -""")), -] - -# SafeConfigParser was deprecated in Python 3.2 -if sys.version_info >= (3, 2): - CONFIG = configparser.ConfigParser() -else: - CONFIG = configparser.SafeConfigParser() - -LOG = logging.getLogger(__name__) - - -CONF = cfg.CONF -CONF.register_opts(swift_opts) - - -def is_multiple_swift_store_accounts_enabled(): - if CONF.swift_store_config_file is None: - return False - return True - - -class SwiftParams(object): - def __init__(self): - if is_multiple_swift_store_accounts_enabled(): - self.params = self._load_config() - else: - self.params = self._form_default_params() - - def _form_default_params(self): - default = {} - if (CONF.swift_store_user and CONF.swift_store_key - and CONF.swift_store_auth_address): - default['user'] = CONF.swift_store_user - default['key'] = CONF.swift_store_key - default['auth_address'] = CONF.swift_store_auth_address - return {CONF.default_swift_reference: default} - return {} - - def _load_config(self): - try: - conf_file = CONF.find_file(CONF.swift_store_config_file) - CONFIG.read(conf_file) - except Exception as e: - msg = (_LE("swift config file %(conf_file)s:%(exc)s not found") % - {'conf_file': CONF.swift_store_config_file, 'exc': e}) - LOG.error(msg) - raise exception.InvalidSwiftStoreConfiguration() - account_params = {} - account_references = CONFIG.sections() - for ref in account_references: - reference = {} - try: - reference['auth_address'] = CONFIG.get(ref, 'auth_address') - reference['user'] = CONFIG.get(ref, 'user') - reference['key'] = CONFIG.get(ref, 'key') - account_params[ref] = reference - except (ValueError, SyntaxError, configparser.NoOptionError) as e: - LOG.exception(_LE("Invalid format of swift store config " - "cfg")) - return account_params diff --git a/glance/common/timeutils.py b/glance/common/timeutils.py deleted file mode 100644 index ea64844a..00000000 --- a/glance/common/timeutils.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Time related utilities and helper functions. -""" - -import datetime - -import iso8601 -from monotonic import monotonic as now # noqa -from oslo_utils import encodeutils - -# ISO 8601 extended time format with microseconds -_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' -_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' -PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND - - -def isotime(at=None, subsecond=False): - """Stringify time in ISO 8601 format.""" - if not at: - at = utcnow() - st = at.strftime(_ISO8601_TIME_FORMAT - if not subsecond - else _ISO8601_TIME_FORMAT_SUBSECOND) - tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' - st += ('Z' if tz == 'UTC' else tz) - return st - - -def parse_isotime(timestr): - """Parse time from ISO 8601 format.""" - try: - return iso8601.parse_date(timestr) - except iso8601.ParseError as e: - raise ValueError(encodeutils.exception_to_unicode(e)) - except TypeError as e: - raise ValueError(encodeutils.exception_to_unicode(e)) - - -def utcnow(with_timezone=False): - """Overridable version of utils.utcnow that can return a TZ-aware datetime. - """ - if utcnow.override_time: - try: - return utcnow.override_time.pop(0) - except AttributeError: - return utcnow.override_time - if with_timezone: - return datetime.datetime.now(tz=iso8601.iso8601.UTC) - return datetime.datetime.utcnow() - - -def normalize_time(timestamp): - """Normalize time in arbitrary timezone to UTC naive object.""" - offset = timestamp.utcoffset() - if offset is None: - return timestamp - return timestamp.replace(tzinfo=None) - offset - - -def iso8601_from_timestamp(timestamp, microsecond=False): - """Returns an iso8601 formatted date from timestamp.""" - return isotime(datetime.datetime.utcfromtimestamp(timestamp), microsecond) - -utcnow.override_time = None - - -def delta_seconds(before, after): - """Return the difference between two timing objects. - - Compute the difference in seconds between two date, time, or - datetime objects (as a float, to microsecond resolution). - """ - delta = after - before - return datetime.timedelta.total_seconds(delta) diff --git a/glance/common/trust_auth.py b/glance/common/trust_auth.py deleted file mode 100644 index ff3cf3a1..00000000 --- a/glance/common/trust_auth.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneauth1 import exceptions as ka_exceptions -from keystoneauth1.identity import v3 -from keystoneauth1.loading import conf -from keystoneauth1.loading import session -from keystoneclient.v3 import client as ks_client -from oslo_config import cfg -from oslo_log import log as logging - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -class TokenRefresher(object): - """Class that responsible for token refreshing with trusts""" - - def __init__(self, user_plugin, user_project, user_roles): - """Prepare all parameters and clients required to refresh token""" - - # step 1: Prepare parameters required to connect to keystone - self.auth_url = CONF.keystone_authtoken.auth_uri - if not self.auth_url.endswith('/v3'): - self.auth_url += '/v3' - - self.ssl_settings = { - 'cacert': CONF.keystone_authtoken.cafile, - 'insecure': CONF.keystone_authtoken.insecure, - 'cert': CONF.keystone_authtoken.certfile, - 'key': CONF.keystone_authtoken.keyfile, - } - - # step 2: create trust to ensure that we can always update token - - # trustor = user who made the request - trustor_client = self._load_client(user_plugin, self.ssl_settings) - trustor_id = trustor_client.session.get_user_id() - - # get trustee user client that impersonates main user - trustee_user_auth = conf.load_from_conf_options(CONF, - 'keystone_authtoken') - # save service user client because we need new service token - # to refresh trust-scoped client later - self.trustee_user_client = self._load_client(trustee_user_auth, - self.ssl_settings) - trustee_id = self.trustee_user_client.session.get_user_id() - - self.trust_id = trustor_client.trusts.create(trustor_user=trustor_id, - trustee_user=trustee_id, - impersonation=True, - role_names=user_roles, - project=user_project).id - LOG.debug("Trust %s has been created.", self.trust_id) - - # step 3: postpone trust-scoped client initialization - # until we need to refresh the token - self.trustee_client = None - - def refresh_token(self): - """Receive new token if user need to update old token - - :return: new token that can be used for authentication - """ - LOG.debug("Requesting the new token with trust %s", self.trust_id) - if self.trustee_client is None: - self.trustee_client = self._refresh_trustee_client() - try: - return self.trustee_client.session.get_token() - except ka_exceptions.Unauthorized: - # in case of Unauthorized exceptions try to refresh client because - # service user token may expired - self.trustee_client = self._refresh_trustee_client() - return self.trustee_client.session.get_token() - - def release_resources(self): - """Release keystone resources required for refreshing""" - - try: - if self.trustee_client is None: - self._refresh_trustee_client().trusts.delete(self.trust_id) - else: - self.trustee_client.trusts.delete(self.trust_id) - except ka_exceptions.Unauthorized: - # service user token may expire when we are trying to delete token - # so need to update client to ensure that this is not the reason - # of failure - self.trustee_client = self._refresh_trustee_client() - self.trustee_client.trusts.delete(self.trust_id) - - def _refresh_trustee_client(self): - trustee_token = self.trustee_user_client.session.get_token() - trustee_auth = v3.Token( - trust_id=self.trust_id, - token=trustee_token, - auth_url=self.auth_url - ) - return self._load_client(trustee_auth, self.ssl_settings) - - @staticmethod - def _load_client(plugin, ssl_settings): - # load client from auth settings and user plugin - sess = session.Session().load_from_options( - auth=plugin, **ssl_settings) - return ks_client.Client(session=sess) diff --git a/glance/common/utils.py b/glance/common/utils.py deleted file mode 100644 index 2570f1bd..00000000 --- a/glance/common/utils.py +++ /dev/null @@ -1,674 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2014 SoftLayer Technologies, Inc. -# Copyright 2015 Mirantis, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -System-level utilities and helper functions. -""" - -import errno - -try: - from eventlet import sleep -except ImportError: - from time import sleep -from eventlet.green import socket - -import functools -import os -import re -import uuid - -from OpenSSL import crypto -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -from oslo_utils import netutils -from oslo_utils import strutils -import six -from webob import exc - -from glance.common import exception -from glance.common import timeutils -from glance.i18n import _, _LE, _LW - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - -# Whitelist of v1 API headers of form x-image-meta-xxx -IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size', - 'x-image-meta-is_public', 'x-image-meta-disk_format', - 'x-image-meta-container_format', 'x-image-meta-name', - 'x-image-meta-status', 'x-image-meta-copy_from', - 'x-image-meta-uri', 'x-image-meta-checksum', - 'x-image-meta-created_at', 'x-image-meta-updated_at', - 'x-image-meta-deleted_at', 'x-image-meta-min_ram', - 'x-image-meta-min_disk', 'x-image-meta-owner', - 'x-image-meta-store', 'x-image-meta-id', - 'x-image-meta-protected', 'x-image-meta-deleted', - 'x-image-meta-virtual_size'] - -GLANCE_TEST_SOCKET_FD_STR = 'GLANCE_TEST_SOCKET_FD' - - -def chunkreadable(iter, chunk_size=65536): - """ - Wrap a readable iterator with a reader yielding chunks of - a preferred size, otherwise leave iterator unchanged. - - :param iter: an iter which may also be readable - :param chunk_size: maximum size of chunk - """ - return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter - - -def chunkiter(fp, chunk_size=65536): - """ - Return an iterator to a file-like obj which yields fixed size chunks - - :param fp: a file-like object - :param chunk_size: maximum size of chunk - """ - while True: - chunk = fp.read(chunk_size) - if chunk: - yield chunk - else: - break - - -def cooperative_iter(iter): - """ - Return an iterator which schedules after each - iteration. This can prevent eventlet thread starvation. - - :param iter: an iterator to wrap - """ - try: - for chunk in iter: - sleep(0) - yield chunk - except Exception as err: - with excutils.save_and_reraise_exception(): - msg = _LE("Error: cooperative_iter exception %s") % err - LOG.error(msg) - - -def cooperative_read(fd): - """ - Wrap a file descriptor's read with a partial function which schedules - after each read. This can prevent eventlet thread starvation. - - :param fd: a file descriptor to wrap - """ - def readfn(*args): - result = fd.read(*args) - sleep(0) - return result - return readfn - - -MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit - - -class CooperativeReader(object): - """ - An eventlet thread friendly class for reading in image data. - - When accessing data either through the iterator or the read method - we perform a sleep to allow a co-operative yield. When there is more than - one image being uploaded/downloaded this prevents eventlet thread - starvation, ie allows all threads to be scheduled periodically rather than - having the same thread be continuously active. - """ - def __init__(self, fd): - """ - :param fd: Underlying image file object - """ - self.fd = fd - self.iterator = None - # NOTE(markwash): if the underlying supports read(), overwrite the - # default iterator-based implementation with cooperative_read which - # is more straightforward - if hasattr(fd, 'read'): - self.read = cooperative_read(fd) - else: - self.iterator = None - self.buffer = b'' - self.position = 0 - - def read(self, length=None): - """Return the requested amount of bytes, fetching the next chunk of - the underlying iterator when needed. - - This is replaced with cooperative_read in __init__ if the underlying - fd already supports read(). - """ - if length is None: - if len(self.buffer) - self.position > 0: - # if no length specified but some data exists in buffer, - # return that data and clear the buffer - result = self.buffer[self.position:] - self.buffer = b'' - self.position = 0 - return str(result) - else: - # otherwise read the next chunk from the underlying iterator - # and return it as a whole. Reset the buffer, as subsequent - # calls may specify the length - try: - if self.iterator is None: - self.iterator = self.__iter__() - return next(self.iterator) - except StopIteration: - return '' - finally: - self.buffer = b'' - self.position = 0 - else: - result = bytearray() - while len(result) < length: - if self.position < len(self.buffer): - to_read = length - len(result) - chunk = self.buffer[self.position:self.position + to_read] - result.extend(chunk) - - # This check is here to prevent potential OOM issues if - # this code is called with unreasonably high values of read - # size. Currently it is only called from the HTTP clients - # of Glance backend stores, which use httplib for data - # streaming, which has readsize hardcoded to 8K, so this - # check should never fire. Regardless it still worths to - # make the check, as the code may be reused somewhere else. - if len(result) >= MAX_COOP_READER_BUFFER_SIZE: - raise exception.LimitExceeded() - self.position += len(chunk) - else: - try: - if self.iterator is None: - self.iterator = self.__iter__() - self.buffer = next(self.iterator) - self.position = 0 - except StopIteration: - self.buffer = b'' - self.position = 0 - return bytes(result) - return bytes(result) - - def __iter__(self): - return cooperative_iter(self.fd.__iter__()) - - -class LimitingReader(object): - """ - Reader designed to fail when reading image data past the configured - allowable amount. - """ - def __init__(self, data, limit): - """ - :param data: Underlying image data object - :param limit: maximum number of bytes the reader should allow - """ - self.data = data - self.limit = limit - self.bytes_read = 0 - - def __iter__(self): - for chunk in self.data: - self.bytes_read += len(chunk) - if self.bytes_read > self.limit: - raise exception.ImageSizeLimitExceeded() - else: - yield chunk - - def read(self, i): - result = self.data.read(i) - self.bytes_read += len(result) - if self.bytes_read > self.limit: - raise exception.ImageSizeLimitExceeded() - return result - - -def image_meta_to_http_headers(image_meta): - """ - Returns a set of image metadata into a dict - of HTTP headers that can be fed to either a Webob - Request object or an httplib.HTTP(S)Connection object - - :param image_meta: Mapping of image metadata - """ - headers = {} - for k, v in image_meta.items(): - if v is not None: - if k == 'properties': - for pk, pv in v.items(): - if pv is not None: - headers["x-image-meta-property-%s" - % pk.lower()] = six.text_type(pv) - else: - headers["x-image-meta-%s" % k.lower()] = six.text_type(v) - return headers - - -def get_image_meta_from_headers(response): - """ - Processes HTTP headers from a supplied response that - match the x-image-meta and x-image-meta-property and - returns a mapping of image metadata and properties - - :param response: Response to process - """ - result = {} - properties = {} - - if hasattr(response, 'getheaders'): # httplib.HTTPResponse - headers = response.getheaders() - else: # webob.Response - headers = response.headers.items() - - for key, value in headers: - key = str(key.lower()) - if key.startswith('x-image-meta-property-'): - field_name = key[len('x-image-meta-property-'):].replace('-', '_') - properties[field_name] = value or None - elif key.startswith('x-image-meta-'): - field_name = key[len('x-image-meta-'):].replace('-', '_') - if 'x-image-meta-' + field_name not in IMAGE_META_HEADERS: - msg = _("Bad header: %(header_name)s") % {'header_name': key} - raise exc.HTTPBadRequest(msg, content_type="text/plain") - result[field_name] = value or None - result['properties'] = properties - - for key, nullable in [('size', False), ('min_disk', False), - ('min_ram', False), ('virtual_size', True)]: - if key in result: - try: - result[key] = int(result[key]) - except ValueError: - if nullable and result[key] == str(None): - result[key] = None - else: - extra = (_("Cannot convert image %(key)s '%(value)s' " - "to an integer.") - % {'key': key, 'value': result[key]}) - raise exception.InvalidParameterValue(value=result[key], - param=key, - extra_msg=extra) - if result[key] is not None and result[key] < 0: - extra = _('Cannot be a negative value.') - raise exception.InvalidParameterValue(value=result[key], - param=key, - extra_msg=extra) - - for key in ('is_public', 'deleted', 'protected'): - if key in result: - result[key] = strutils.bool_from_string(result[key]) - return result - - -def create_mashup_dict(image_meta): - """ - Returns a dictionary-like mashup of the image core properties - and the image custom properties from given image metadata. - - :param image_meta: metadata of image with core and custom properties - """ - - d = {} - for key, value in six.iteritems(image_meta): - if isinstance(value, dict): - for subkey, subvalue in six.iteritems( - create_mashup_dict(value)): - if subkey not in image_meta: - d[subkey] = subvalue - else: - d[key] = value - - return d - - -def safe_mkdirs(path): - try: - os.makedirs(path) - except OSError as e: - if e.errno != errno.EEXIST: - raise - - -def mutating(func): - """Decorator to enforce read-only logic""" - @functools.wraps(func) - def wrapped(self, req, *args, **kwargs): - if req.context.read_only: - msg = "Read-only access" - LOG.debug(msg) - raise exc.HTTPForbidden(msg, request=req, - content_type="text/plain") - return func(self, req, *args, **kwargs) - return wrapped - - -def setup_remote_pydev_debug(host, port): - error_msg = _LE('Error setting up the debug environment. Verify that the' - ' option pydev_worker_debug_host is pointing to a valid ' - 'hostname or IP on which a pydev server is listening on' - ' the port indicated by pydev_worker_debug_port.') - - try: - try: - from pydev import pydevd - except ImportError: - import pydevd - - pydevd.settrace(host, - port=port, - stdoutToServer=True, - stderrToServer=True) - return True - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(error_msg) - - -def validate_key_cert(key_file, cert_file): - try: - error_key_name = "private key" - error_filename = key_file - with open(key_file, 'r') as keyfile: - key_str = keyfile.read() - key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str) - - error_key_name = "certificate" - error_filename = cert_file - with open(cert_file, 'r') as certfile: - cert_str = certfile.read() - cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str) - except IOError as ioe: - raise RuntimeError(_("There is a problem with your %(error_key_name)s " - "%(error_filename)s. Please verify it." - " Error: %(ioe)s") % - {'error_key_name': error_key_name, - 'error_filename': error_filename, - 'ioe': ioe}) - except crypto.Error as ce: - raise RuntimeError(_("There is a problem with your %(error_key_name)s " - "%(error_filename)s. Please verify it. OpenSSL" - " error: %(ce)s") % - {'error_key_name': error_key_name, - 'error_filename': error_filename, - 'ce': ce}) - - try: - data = str(uuid.uuid4()) - # On Python 3, explicitly encode to UTF-8 to call crypto.sign() which - # requires bytes. Otherwise, it raises a deprecation warning (and - # will raise an error later). - data = encodeutils.to_utf8(data) - digest = CONF.digest_algorithm - if digest == 'sha1': - LOG.warn( - _LW('The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)' - ' state that the SHA-1 is not suitable for' - ' general-purpose digital signature applications (as' - ' specified in FIPS 186-3) that require 112 bits of' - ' security. The default value is sha1 in Kilo for a' - ' smooth upgrade process, and it will be updated' - ' with sha256 in next release(L).')) - out = crypto.sign(key, data, digest) - crypto.verify(cert, out, data, digest) - except crypto.Error as ce: - raise RuntimeError(_("There is a problem with your key pair. " - "Please verify that cert %(cert_file)s and " - "key %(key_file)s belong together. OpenSSL " - "error %(ce)s") % {'cert_file': cert_file, - 'key_file': key_file, - 'ce': ce}) - - -def get_test_suite_socket(): - global GLANCE_TEST_SOCKET_FD_STR - if GLANCE_TEST_SOCKET_FD_STR in os.environ: - fd = int(os.environ[GLANCE_TEST_SOCKET_FD_STR]) - sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) - if six.PY2: - sock = socket.SocketType(_sock=sock) - sock.listen(CONF.backlog) - del os.environ[GLANCE_TEST_SOCKET_FD_STR] - os.close(fd) - return sock - return None - - -def is_valid_hostname(hostname): - """Verify whether a hostname (not an FQDN) is valid.""" - return re.match('^[a-zA-Z0-9-]+$', hostname) is not None - - -def is_valid_fqdn(fqdn): - """Verify whether a host is a valid FQDN.""" - return re.match('^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$', fqdn) is not None - - -def parse_valid_host_port(host_port): - """ - Given a "host:port" string, attempts to parse it as intelligently as - possible to determine if it is valid. This includes IPv6 [host]:port form, - IPv4 ip:port form, and hostname:port or fqdn:port form. - - Invalid inputs will raise a ValueError, while valid inputs will return - a (host, port) tuple where the port will always be of type int. - """ - - try: - try: - host, port = netutils.parse_host_port(host_port) - except Exception: - raise ValueError(_('Host and port "%s" is not valid.') % host_port) - - if not netutils.is_valid_port(port): - raise ValueError(_('Port "%s" is not valid.') % port) - - # First check for valid IPv6 and IPv4 addresses, then a generic - # hostname. Failing those, if the host includes a period, then this - # should pass a very generic FQDN check. The FQDN check for letters at - # the tail end will weed out any hilariously absurd IPv4 addresses. - - if not (netutils.is_valid_ipv6(host) or netutils.is_valid_ipv4(host) or - is_valid_hostname(host) or is_valid_fqdn(host)): - raise ValueError(_('Host "%s" is not valid.') % host) - - except Exception as ex: - raise ValueError(_('%s ' - 'Please specify a host:port pair, where host is an ' - 'IPv4 address, IPv6 address, hostname, or FQDN. If ' - 'using an IPv6 address, enclose it in brackets ' - 'separately from the port (i.e., ' - '"[fe80::a:b:c]:9876").') % ex) - - return (host, int(port)) - - -try: - REGEX_4BYTE_UNICODE = re.compile(u'[\U00010000-\U0010ffff]') -except re.error: - # UCS-2 build case - REGEX_4BYTE_UNICODE = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]') - - -def no_4byte_params(f): - """ - Checks that no 4 byte unicode characters are allowed - in dicts' keys/values and string's parameters - """ - def wrapper(*args, **kwargs): - - def _is_match(some_str): - return (isinstance(some_str, six.text_type) and - REGEX_4BYTE_UNICODE.findall(some_str) != []) - - def _check_dict(data_dict): - # a dict of dicts has to be checked recursively - for key, value in six.iteritems(data_dict): - if isinstance(value, dict): - _check_dict(value) - else: - if _is_match(key): - msg = _("Property names can't contain 4 byte unicode.") - raise exception.Invalid(msg) - if _is_match(value): - msg = (_("%s can't contain 4 byte unicode characters.") - % key.title()) - raise exception.Invalid(msg) - - for data_dict in [arg for arg in args if isinstance(arg, dict)]: - _check_dict(data_dict) - # now check args for str values - for arg in args: - if _is_match(arg): - msg = _("Param values can't contain 4 byte unicode.") - raise exception.Invalid(msg) - # check kwargs as well, as params are passed as kwargs via - # registry calls - _check_dict(kwargs) - return f(*args, **kwargs) - return wrapper - - -def stash_conf_values(): - """ - Make a copy of some of the current global CONF's settings. - Allows determining if any of these values have changed - when the config is reloaded. - """ - conf = { - 'bind_host': CONF.bind_host, - 'bind_port': CONF.bind_port, - 'tcp_keepidle': CONF.cert_file, - 'backlog': CONF.backlog, - 'key_file': CONF.key_file, - 'cert_file': CONF.cert_file - } - - return conf - - -def split_filter_op(expression): - """Split operator from threshold in an expression. - Designed for use on a comparative-filtering query field. - When no operator is found, default to an equality comparison. - - :param expression: the expression to parse - - :returns: a tuple (operator, threshold) parsed from expression - """ - left, sep, right = expression.partition(':') - if sep: - # If the expression is a date of the format ISO 8601 like - # CCYY-MM-DDThh:mm:ss+hh:mm and has no operator, it should - # not be partitioned, and a default operator of eq should be - # assumed. - try: - timeutils.parse_isotime(expression) - op = 'eq' - threshold = expression - except ValueError: - op = left - threshold = right - else: - op = 'eq' # default operator - threshold = left - - # NOTE stevelle decoding escaped values may be needed later - return op, threshold - - -def validate_quotes(value): - """Validate filter values - - Validation opening/closing quotes in the expression. - """ - open_quotes = True - for i in range(len(value)): - if value[i] == '"': - if i and value[i - 1] == '\\': - continue - if open_quotes: - if i and value[i - 1] != ',': - msg = _("Invalid filter value %s. There is no comma " - "before opening quotation mark.") % value - raise exception.InvalidParameterValue(message=msg) - else: - if i + 1 != len(value) and value[i + 1] != ",": - msg = _("Invalid filter value %s. There is no comma " - "after closing quotation mark.") % value - raise exception.InvalidParameterValue(message=msg) - open_quotes = not open_quotes - if not open_quotes: - msg = _("Invalid filter value %s. The quote is not closed.") % value - raise exception.InvalidParameterValue(message=msg) - - -def split_filter_value_for_quotes(value): - """Split filter values - - Split values by commas and quotes for 'in' operator, according api-wg. - """ - validate_quotes(value) - tmp = re.compile(r''' - "( # if found a double-quote - [^\"\\]* # take characters either non-quotes or backslashes - (?:\\. # take backslashes and character after it - [^\"\\]*)* # take characters either non-quotes or backslashes - ) # before double-quote - ",? # a double-quote with comma maybe - | ([^,]+),? # if not found double-quote take any non-comma - # characters with comma maybe - | , # if we have only comma take empty string - ''', re.VERBOSE) - return [val[0] or val[1] for val in re.findall(tmp, value)] - - -def evaluate_filter_op(value, operator, threshold): - """Evaluate a comparison operator. - Designed for use on a comparative-filtering query field. - - :param value: evaluated against the operator, as left side of expression - :param operator: any supported filter operation - :param threshold: to compare value against, as right side of expression - - :raises InvalidFilterOperatorValue: if an unknown operator is provided - - :returns: boolean result of applied comparison - - """ - if operator == 'gt': - return value > threshold - elif operator == 'gte': - return value >= threshold - elif operator == 'lt': - return value < threshold - elif operator == 'lte': - return value <= threshold - elif operator == 'neq': - return value != threshold - elif operator == 'eq': - return value == threshold - - msg = _("Unable to filter on a unknown operator.") - raise exception.InvalidFilterOperatorValue(msg) diff --git a/glance/common/wsgi.py b/glance/common/wsgi.py deleted file mode 100644 index e09ef328..00000000 --- a/glance/common/wsgi.py +++ /dev/null @@ -1,1213 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2010 OpenStack Foundation -# Copyright 2014 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utility methods for working with WSGI servers -""" -from __future__ import print_function - -import errno -import functools -import os -import signal -import sys -import time - -from eventlet.green import socket -from eventlet.green import ssl -import eventlet.greenio -import eventlet.wsgi -import glance_store -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import strutils -from osprofiler import opts as profiler_opts -import routes.middleware -import six -import webob.dec -import webob.exc -from webob import multidict - -from glance.common import config -from glance.common import exception -from glance.common import utils -from glance import i18n -from glance.i18n import _, _LE, _LI, _LW - - -bind_opts = [ - cfg.HostAddressOpt('bind_host', - default='0.0.0.0', - help=_(""" -IP address to bind the glance servers to. - -Provide an IP address to bind the glance server to. The default -value is ``0.0.0.0``. - -Edit this option to enable the server to listen on one particular -IP address on the network card. This facilitates selection of a -particular network interface for the server. - -Possible values: - * A valid IPv4 address - * A valid IPv6 address - -Related options: - * None - -""")), - - cfg.PortOpt('bind_port', - help=_(""" -Port number on which the server will listen. - -Provide a valid port number to bind the server's socket to. This -port is then set to identify processes and forward network messages -that arrive at the server. The default bind_port value for the API -server is 9292 and for the registry server is 9191. - -Possible values: - * A valid port number (0 to 65535) - -Related options: - * None - -""")), -] - -socket_opts = [ - cfg.IntOpt('backlog', - default=4096, - min=1, - help=_(""" -Set the number of incoming connection requests. - -Provide a positive integer value to limit the number of requests in -the backlog queue. The default queue size is 4096. - -An incoming connection to a TCP listener socket is queued before a -connection can be established with the server. Setting the backlog -for a TCP socket ensures a limited queue size for incoming traffic. - -Possible values: - * Positive integer - -Related options: - * None - -""")), - - cfg.IntOpt('tcp_keepidle', - default=600, - min=1, - help=_(""" -Set the wait time before a connection recheck. - -Provide a positive integer value representing time in seconds which -is set as the idle wait time before a TCP keep alive packet can be -sent to the host. The default value is 600 seconds. - -Setting ``tcp_keepidle`` helps verify at regular intervals that a -connection is intact and prevents frequent TCP connection -reestablishment. - -Possible values: - * Positive integer value representing time in seconds - -Related options: - * None - -""")), - - cfg.StrOpt('ca_file', - sample_default='/etc/ssl/cafile', - help=_(""" -Absolute path to the CA file. - -Provide a string value representing a valid absolute path to -the Certificate Authority file to use for client authentication. - -A CA file typically contains necessary trusted certificates to -use for the client authentication. This is essential to ensure -that a secure connection is established to the server via the -internet. - -Possible values: - * Valid absolute path to the CA file - -Related options: - * None - -""")), - - cfg.StrOpt('cert_file', - sample_default='/etc/ssl/certs', - help=_(""" -Absolute path to the certificate file. - -Provide a string value representing a valid absolute path to the -certificate file which is required to start the API service -securely. - -A certificate file typically is a public key container and includes -the server's public key, server name, server information and the -signature which was a result of the verification process using the -CA certificate. This is required for a secure connection -establishment. - -Possible values: - * Valid absolute path to the certificate file - -Related options: - * None - -""")), - - cfg.StrOpt('key_file', - sample_default='/etc/ssl/key/key-file.pem', - help=_(""" -Absolute path to a private key file. - -Provide a string value representing a valid absolute path to a -private key file which is required to establish the client-server -connection. - -Possible values: - * Absolute path to the private key file - -Related options: - * None - -""")), -] - -eventlet_opts = [ - cfg.IntOpt('workers', - min=0, - help=_(""" -Number of Glance worker processes to start. - -Provide a non-negative integer value to set the number of child -process workers to service requests. By default, the number of CPUs -available is set as the value for ``workers``. - -Each worker process is made to listen on the port set in the -configuration file and contains a greenthread pool of size 1000. - -NOTE: Setting the number of workers to zero, triggers the creation -of a single API process with a greenthread pool of size 1000. - -Possible values: - * 0 - * Positive integer value (typically equal to the number of CPUs) - -Related options: - * None - -""")), - - cfg.IntOpt('max_header_line', - default=16384, - min=0, - help=_(""" -Maximum line size of message headers. - -Provide an integer value representing a length to limit the size of -message headers. The default value is 16384. - -NOTE: ``max_header_line`` may need to be increased when using large -tokens (typically those generated by the Keystone v3 API with big -service catalogs). However, it is to be kept in mind that larger -values for ``max_header_line`` would flood the logs. - -Setting ``max_header_line`` to 0 sets no limit for the line size of -message headers. - -Possible values: - * 0 - * Positive integer - -Related options: - * None - -""")), - - cfg.BoolOpt('http_keepalive', - default=True, - help=_(""" -Set keep alive option for HTTP over TCP. - -Provide a boolean value to determine sending of keep alive packets. -If set to ``False``, the server returns the header -"Connection: close". If set to ``True``, the server returns a -"Connection: Keep-Alive" in its responses. This enables retention of -the same TCP connection for HTTP conversations instead of opening a -new one with each new request. - -This option must be set to ``False`` if the client socket connection -needs to be closed explicitly after the response is received and -read successfully by the client. - -Possible values: - * True - * False - -Related options: - * None - -""")), - - cfg.IntOpt('client_socket_timeout', - default=900, - min=0, - help=_(""" -Timeout for client connections' socket operations. - -Provide a valid integer value representing time in seconds to set -the period of wait before an incoming connection can be closed. The -default value is 900 seconds. - -The value zero implies wait forever. - -Possible values: - * Zero - * Positive integer - -Related options: - * None - -""")), -] - -wsgi_opts = [ - cfg.StrOpt('secure_proxy_ssl_header', - deprecated_for_removal=True, - deprecated_reason=_('Use the http_proxy_to_wsgi middleware ' - 'instead.'), - help=_('The HTTP header used to determine the scheme for the ' - 'original request, even if it was removed by an SSL ' - 'terminating proxy. Typical value is ' - '"HTTP_X_FORWARDED_PROTO".')), -] - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF -CONF.register_opts(bind_opts) -CONF.register_opts(socket_opts) -CONF.register_opts(eventlet_opts) -CONF.register_opts(wsgi_opts) -profiler_opts.set_defaults(CONF) - -ASYNC_EVENTLET_THREAD_POOL_LIST = [] - - -def get_num_workers(): - """Return the configured number of workers.""" - if CONF.workers is None: - # None implies the number of CPUs - return processutils.get_worker_count() - return CONF.workers - - -def get_bind_addr(default_port=None): - """Return the host and port to bind to.""" - return (CONF.bind_host, CONF.bind_port or default_port) - - -def ssl_wrap_socket(sock): - """ - Wrap an existing socket in SSL - - :param sock: non-SSL socket to wrap - - :returns: An SSL wrapped socket - """ - utils.validate_key_cert(CONF.key_file, CONF.cert_file) - - ssl_kwargs = { - 'server_side': True, - 'certfile': CONF.cert_file, - 'keyfile': CONF.key_file, - 'cert_reqs': ssl.CERT_NONE, - } - - if CONF.ca_file: - ssl_kwargs['ca_certs'] = CONF.ca_file - ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED - - return ssl.wrap_socket(sock, **ssl_kwargs) - - -def get_socket(default_port): - """ - Bind socket to bind ip:port in conf - - note: Mostly comes from Swift with a few small changes... - - :param default_port: port to bind to if none is specified in conf - - :returns: a socket object as returned from socket.listen or - ssl.wrap_socket if conf specifies cert_file - """ - bind_addr = get_bind_addr(default_port) - - # TODO(jaypipes): eventlet's greened socket module does not actually - # support IPv6 in getaddrinfo(). We need to get around this in the - # future or monitor upstream for a fix - address_family = [ - addr[0] for addr in socket.getaddrinfo(bind_addr[0], - bind_addr[1], - socket.AF_UNSPEC, - socket.SOCK_STREAM) - if addr[0] in (socket.AF_INET, socket.AF_INET6) - ][0] - - use_ssl = CONF.key_file or CONF.cert_file - if use_ssl and (not CONF.key_file or not CONF.cert_file): - raise RuntimeError(_("When running server in SSL mode, you must " - "specify both a cert_file and key_file " - "option value in your configuration file")) - - sock = utils.get_test_suite_socket() - retry_until = time.time() + 30 - - while not sock and time.time() < retry_until: - try: - sock = eventlet.listen(bind_addr, - backlog=CONF.backlog, - family=address_family) - except socket.error as err: - if err.args[0] != errno.EADDRINUSE: - raise - eventlet.sleep(0.1) - if not sock: - raise RuntimeError(_("Could not bind to %(host)s:%(port)s after" - " trying for 30 seconds") % - {'host': bind_addr[0], - 'port': bind_addr[1]}) - - return sock - - -def set_eventlet_hub(): - try: - eventlet.hubs.use_hub('poll') - except Exception: - try: - eventlet.hubs.use_hub('selects') - except Exception: - msg = _("eventlet 'poll' nor 'selects' hubs are available " - "on this platform") - raise exception.WorkerCreationFailure( - reason=msg) - - -def initialize_glance_store(): - """Initialize glance store.""" - glance_store.register_opts(CONF) - glance_store.create_stores(CONF) - glance_store.verify_default_store() - - -def get_asynchronous_eventlet_pool(size=1000): - """Return eventlet pool to caller. - - Also store pools created in global list, to wait on - it after getting signal for graceful shutdown. - - :param size: eventlet pool size - :returns: eventlet pool - """ - global ASYNC_EVENTLET_THREAD_POOL_LIST - - pool = eventlet.GreenPool(size=size) - # Add pool to global ASYNC_EVENTLET_THREAD_POOL_LIST - ASYNC_EVENTLET_THREAD_POOL_LIST.append(pool) - - return pool - - -class Server(object): - """Server class to manage multiple WSGI sockets and applications. - - This class requires initialize_glance_store set to True if - glance store needs to be initialized. - """ - def __init__(self, threads=1000, initialize_glance_store=False): - os.umask(0o27) # ensure files are created with the correct privileges - self._logger = logging.getLogger("eventlet.wsgi.server") - self.threads = threads - self.children = set() - self.stale_children = set() - self.running = True - # NOTE(abhishek): Allows us to only re-initialize glance_store when - # the API's configuration reloads. - self.initialize_glance_store = initialize_glance_store - self.pgid = os.getpid() - try: - # NOTE(flaper87): Make sure this process - # runs in its own process group. - os.setpgid(self.pgid, self.pgid) - except OSError: - # NOTE(flaper87): When running glance-control, - # (glance's functional tests, for example) - # setpgid fails with EPERM as glance-control - # creates a fresh session, of which the newly - # launched service becomes the leader (session - # leaders may not change process groups) - # - # Running glance-(api|registry) is safe and - # shouldn't raise any error here. - self.pgid = 0 - - def hup(self, *args): - """ - Reloads configuration files with zero down time - """ - signal.signal(signal.SIGHUP, signal.SIG_IGN) - raise exception.SIGHUPInterrupt - - def kill_children(self, *args): - """Kills the entire process group.""" - signal.signal(signal.SIGTERM, signal.SIG_IGN) - signal.signal(signal.SIGINT, signal.SIG_IGN) - self.running = False - os.killpg(self.pgid, signal.SIGTERM) - - def start(self, application, default_port): - """ - Run a WSGI server with the given application. - - :param application: The application to be run in the WSGI server - :param default_port: Port to bind to if none is specified in conf - """ - self.application = application - self.default_port = default_port - self.configure() - self.start_wsgi() - - def start_wsgi(self): - workers = get_num_workers() - if workers == 0: - # Useful for profiling, test, debug etc. - self.pool = self.create_pool() - self.pool.spawn_n(self._single_run, self.application, self.sock) - return - else: - LOG.info(_LI("Starting %d workers"), workers) - signal.signal(signal.SIGTERM, self.kill_children) - signal.signal(signal.SIGINT, self.kill_children) - signal.signal(signal.SIGHUP, self.hup) - while len(self.children) < workers: - self.run_child() - - def create_pool(self): - return get_asynchronous_eventlet_pool(size=self.threads) - - def _remove_children(self, pid): - if pid in self.children: - self.children.remove(pid) - LOG.info(_LI('Removed dead child %s'), pid) - elif pid in self.stale_children: - self.stale_children.remove(pid) - LOG.info(_LI('Removed stale child %s'), pid) - else: - LOG.warn(_LW('Unrecognised child %s') % pid) - - def _verify_and_respawn_children(self, pid, status): - if len(self.stale_children) == 0: - LOG.debug('No stale children') - if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: - LOG.error(_LE('Not respawning child %d, cannot ' - 'recover from termination') % pid) - if not self.children and not self.stale_children: - LOG.info( - _LI('All workers have terminated. Exiting')) - self.running = False - else: - if len(self.children) < get_num_workers(): - self.run_child() - - def wait_on_children(self): - while self.running: - try: - pid, status = os.wait() - if os.WIFEXITED(status) or os.WIFSIGNALED(status): - self._remove_children(pid) - self._verify_and_respawn_children(pid, status) - except OSError as err: - if err.errno not in (errno.EINTR, errno.ECHILD): - raise - except KeyboardInterrupt: - LOG.info(_LI('Caught keyboard interrupt. Exiting.')) - break - except exception.SIGHUPInterrupt: - self.reload() - continue - eventlet.greenio.shutdown_safe(self.sock) - self.sock.close() - LOG.debug('Exited') - - def configure(self, old_conf=None, has_changed=None): - """ - Apply configuration settings - - :param old_conf: Cached old configuration settings (if any) - :param has changed: callable to determine if a parameter has changed - """ - eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line - self.client_socket_timeout = CONF.client_socket_timeout or None - self.configure_socket(old_conf, has_changed) - if self.initialize_glance_store: - initialize_glance_store() - - def reload(self): - """ - Reload and re-apply configuration settings - - Existing child processes are sent a SIGHUP signal - and will exit after completing existing requests. - New child processes, which will have the updated - configuration, are spawned. This allows preventing - interruption to the service. - """ - def _has_changed(old, new, param): - old = old.get(param) - new = getattr(new, param) - return (new != old) - - old_conf = utils.stash_conf_values() - has_changed = functools.partial(_has_changed, old_conf, CONF) - CONF.reload_config_files() - os.killpg(self.pgid, signal.SIGHUP) - self.stale_children = self.children - self.children = set() - - # Ensure any logging config changes are picked up - logging.setup(CONF, 'glance') - config.set_config_defaults() - - self.configure(old_conf, has_changed) - self.start_wsgi() - - def wait(self): - """Wait until all servers have completed running.""" - try: - if self.children: - self.wait_on_children() - else: - self.pool.waitall() - except KeyboardInterrupt: - pass - - def run_child(self): - def child_hup(*args): - """Shuts down child processes, existing requests are handled.""" - signal.signal(signal.SIGHUP, signal.SIG_IGN) - eventlet.wsgi.is_accepting = False - self.sock.close() - - pid = os.fork() - if pid == 0: - signal.signal(signal.SIGHUP, child_hup) - signal.signal(signal.SIGTERM, signal.SIG_DFL) - # ignore the interrupt signal to avoid a race whereby - # a child worker receives the signal before the parent - # and is respawned unnecessarily as a result - signal.signal(signal.SIGINT, signal.SIG_IGN) - # The child has no need to stash the unwrapped - # socket, and the reference prevents a clean - # exit on sighup - self._sock = None - self.run_server() - LOG.info(_LI('Child %d exiting normally'), os.getpid()) - # self.pool.waitall() is now called in wsgi's server so - # it's safe to exit here - sys.exit(0) - else: - LOG.info(_LI('Started child %s'), pid) - self.children.add(pid) - - def run_server(self): - """Run a WSGI server.""" - if cfg.CONF.pydev_worker_debug_host: - utils.setup_remote_pydev_debug(cfg.CONF.pydev_worker_debug_host, - cfg.CONF.pydev_worker_debug_port) - - eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0" - self.pool = self.create_pool() - try: - eventlet.wsgi.server(self.sock, - self.application, - log=self._logger, - custom_pool=self.pool, - debug=False, - keepalive=CONF.http_keepalive, - socket_timeout=self.client_socket_timeout) - except socket.error as err: - if err[0] != errno.EINVAL: - raise - - # waiting on async pools - if ASYNC_EVENTLET_THREAD_POOL_LIST: - for pool in ASYNC_EVENTLET_THREAD_POOL_LIST: - pool.waitall() - - def _single_run(self, application, sock): - """Start a WSGI server in a new green thread.""" - LOG.info(_LI("Starting single process server")) - eventlet.wsgi.server(sock, application, custom_pool=self.pool, - log=self._logger, - debug=False, - keepalive=CONF.http_keepalive, - socket_timeout=self.client_socket_timeout) - - def configure_socket(self, old_conf=None, has_changed=None): - """ - Ensure a socket exists and is appropriately configured. - - This function is called on start up, and can also be - called in the event of a configuration reload. - - When called for the first time a new socket is created. - If reloading and either bind_host or bind port have been - changed the existing socket must be closed and a new - socket opened (laws of physics). - - In all other cases (bind_host/bind_port have not changed) - the existing socket is reused. - - :param old_conf: Cached old configuration settings (if any) - :param has changed: callable to determine if a parameter has changed - """ - # Do we need a fresh socket? - new_sock = (old_conf is None or ( - has_changed('bind_host') or - has_changed('bind_port'))) - # Will we be using https? - use_ssl = not (not CONF.cert_file or not CONF.key_file) - # Were we using https before? - old_use_ssl = (old_conf is not None and not ( - not old_conf.get('key_file') or - not old_conf.get('cert_file'))) - # Do we now need to perform an SSL wrap on the socket? - wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock) - # Do we now need to perform an SSL unwrap on the socket? - unwrap_sock = use_ssl is False and old_use_ssl is True - - if new_sock: - self._sock = None - if old_conf is not None: - self.sock.close() - _sock = get_socket(self.default_port) - _sock.setsockopt(socket.SOL_SOCKET, - socket.SO_REUSEADDR, 1) - # sockets can hang around forever without keepalive - _sock.setsockopt(socket.SOL_SOCKET, - socket.SO_KEEPALIVE, 1) - self._sock = _sock - - if wrap_sock: - self.sock = ssl_wrap_socket(self._sock) - - if unwrap_sock: - self.sock = self._sock - - if new_sock and not use_ssl: - self.sock = self._sock - - # Pick up newly deployed certs - if old_conf is not None and use_ssl is True and old_use_ssl is True: - if has_changed('cert_file') or has_changed('key_file'): - utils.validate_key_cert(CONF.key_file, CONF.cert_file) - if has_changed('cert_file'): - self.sock.certfile = CONF.cert_file - if has_changed('key_file'): - self.sock.keyfile = CONF.key_file - - if new_sock or (old_conf is not None and has_changed('tcp_keepidle')): - # This option isn't available in the OS X version of eventlet - if hasattr(socket, 'TCP_KEEPIDLE'): - self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, - CONF.tcp_keepidle) - - if old_conf is not None and has_changed('backlog'): - self.sock.listen(CONF.backlog) - - -class Middleware(object): - """ - Base WSGI middleware wrapper. These classes require an application to be - initialized that will be called next. By default the middleware will - simply call its wrapped app, or you can override __call__ to customize its - behavior. - """ - - def __init__(self, application): - self.application = application - - @classmethod - def factory(cls, global_conf, **local_conf): - def filter(app): - return cls(app) - return filter - - def process_request(self, req): - """ - Called on each request. - - If this returns None, the next application down the stack will be - executed. If it returns a response then that response will be returned - and execution will stop here. - - """ - return None - - def process_response(self, response): - """Do whatever you'd like to the response.""" - return response - - @webob.dec.wsgify - def __call__(self, req): - response = self.process_request(req) - if response: - return response - response = req.get_response(self.application) - response.request = req - try: - return self.process_response(response) - except webob.exc.HTTPException as e: - return e - - -class Debug(Middleware): - """ - Helper class that can be inserted into any WSGI application chain - to get information about the request and response. - """ - - @webob.dec.wsgify - def __call__(self, req): - print(("*" * 40) + " REQUEST ENVIRON") - for key, value in req.environ.items(): - print(key, "=", value) - print('') - resp = req.get_response(self.application) - - print(("*" * 40) + " RESPONSE HEADERS") - for (key, value) in six.iteritems(resp.headers): - print(key, "=", value) - print('') - - resp.app_iter = self.print_generator(resp.app_iter) - - return resp - - @staticmethod - def print_generator(app_iter): - """ - Iterator that prints the contents of a wrapper string iterator - when iterated. - """ - print(("*" * 40) + " BODY") - for part in app_iter: - sys.stdout.write(part) - sys.stdout.flush() - yield part - print() - - -class APIMapper(routes.Mapper): - """ - Handle route matching when url is '' because routes.Mapper returns - an error in this case. - """ - - def routematch(self, url=None, environ=None): - if url is "": - result = self._match("", environ) - return result[0], result[1] - return routes.Mapper.routematch(self, url, environ) - - -class RejectMethodController(object): - def reject(self, req, allowed_methods, *args, **kwargs): - LOG.debug("The method %s is not allowed for this resource", - req.environ['REQUEST_METHOD']) - raise webob.exc.HTTPMethodNotAllowed( - headers=[('Allow', allowed_methods)]) - - -class Router(object): - """ - WSGI middleware that maps incoming requests to WSGI apps. - """ - - def __init__(self, mapper): - """ - Create a router for the given routes.Mapper. - - Each route in `mapper` must specify a 'controller', which is a - WSGI app to call. You'll probably want to specify an 'action' as - well and have your controller be a wsgi.Controller, who will route - the request to the action method. - - Examples: - mapper = routes.Mapper() - sc = ServerController() - - # Explicit mapping of one route to a controller+action - mapper.connect(None, "/svrlist", controller=sc, action="list") - - # Actions are all implicitly defined - mapper.resource("server", "servers", controller=sc) - - # Pointing to an arbitrary WSGI app. You can specify the - # {path_info:.*} parameter so the target app can be handed just that - # section of the URL. - mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) - """ - mapper.redirect("", "/") - self.map = mapper - self._router = routes.middleware.RoutesMiddleware(self._dispatch, - self.map) - - @classmethod - def factory(cls, global_conf, **local_conf): - return cls(APIMapper()) - - @webob.dec.wsgify - def __call__(self, req): - """ - Route the incoming request to a controller based on self.map. - If no match, return either a 404(Not Found) or 501(Not Implemented). - """ - return self._router - - @staticmethod - @webob.dec.wsgify - def _dispatch(req): - """ - Called by self._router after matching the incoming request to a route - and putting the information into req.environ. Either returns 404, - 501, or the routed WSGI app's response. - """ - match = req.environ['wsgiorg.routing_args'][1] - if not match: - implemented_http_methods = ['GET', 'HEAD', 'POST', 'PUT', - 'DELETE', 'PATCH'] - if req.environ['REQUEST_METHOD'] not in implemented_http_methods: - return webob.exc.HTTPNotImplemented() - else: - return webob.exc.HTTPNotFound() - app = match['controller'] - return app - - -class Request(webob.Request): - """Add some OpenStack API-specific logic to the base webob.Request.""" - - def __init__(self, environ, *args, **kwargs): - if CONF.secure_proxy_ssl_header: - scheme = environ.get(CONF.secure_proxy_ssl_header) - if scheme: - environ['wsgi.url_scheme'] = scheme - super(Request, self).__init__(environ, *args, **kwargs) - - def best_match_content_type(self): - """Determine the requested response content-type.""" - supported = ('application/json',) - bm = self.accept.best_match(supported) - return bm or 'application/json' - - def get_content_type(self, allowed_content_types): - """Determine content type of the request body.""" - if "Content-Type" not in self.headers: - raise exception.InvalidContentType(content_type=None) - - content_type = self.content_type - - if content_type not in allowed_content_types: - raise exception.InvalidContentType(content_type=content_type) - else: - return content_type - - def best_match_language(self): - """Determines best available locale from the Accept-Language header. - - :returns: the best language match or None if the 'Accept-Language' - header was not available in the request. - """ - if not self.accept_language: - return None - langs = i18n.get_available_languages('glance') - return self.accept_language.best_match(langs) - - def get_range_from_request(self, image_size): - """Return the `Range` in a request.""" - - range_str = self.headers.get('Range') - if range_str is not None: - - # NOTE(dharinic): We do not support multi range requests. - if ',' in range_str: - msg = ("Requests with multiple ranges are not supported in " - "Glance. You may make multiple single-range requests " - "instead.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - range_ = webob.byterange.Range.parse(range_str) - if range_ is None: - msg = ("Invalid Range header.") - raise webob.exc.HTTPRequestRangeNotSatisfiable(msg) - # NOTE(dharinic): Ensure that a range like bytes=4- for an image - # size of 3 is invalidated as per rfc7233. - if range_.start >= image_size: - msg = ("Invalid start position in Range header. " - "Start position MUST be in the inclusive range [0, %s]." - % (image_size - 1)) - raise webob.exc.HTTPRequestRangeNotSatisfiable(msg) - return range_ - - # NOTE(dharinic): For backward compatibility reasons, we maintain - # support for 'Content-Range' in requests even though it's not - # correct to use it in requests.. - c_range_str = self.headers.get('Content-Range') - if c_range_str is not None: - content_range = webob.byterange.ContentRange.parse(c_range_str) - # NOTE(dharinic): Ensure that a content range like 1-4/* for an - # image size of 3 is invalidated. - if content_range is None: - msg = ("Invalid Content-Range header.") - raise webob.exc.HTTPRequestRangeNotSatisfiable(msg) - if (content_range.length is None and - content_range.stop > image_size): - msg = ("Invalid stop position in Content-Range header. " - "The stop position MUST be in the inclusive range " - "[0, %s]." % (image_size - 1)) - raise webob.exc.HTTPRequestRangeNotSatisfiable(msg) - if content_range.start >= image_size: - msg = ("Invalid start position in Content-Range header. " - "Start position MUST be in the inclusive range [0, %s]." - % (image_size - 1)) - raise webob.exc.HTTPRequestRangeNotSatisfiable(msg) - return content_range - - -class JSONRequestDeserializer(object): - valid_transfer_encoding = frozenset(['chunked', 'compress', 'deflate', - 'gzip', 'identity']) - httpverb_may_have_body = frozenset({'POST', 'PUT', 'PATCH'}) - - @classmethod - def is_valid_encoding(cls, request): - request_encoding = request.headers.get('transfer-encoding', '').lower() - return request_encoding in cls.valid_transfer_encoding - - @classmethod - def is_valid_method(cls, request): - return request.method.upper() in cls.httpverb_may_have_body - - def has_body(self, request): - """ - Returns whether a Webob.Request object will possess an entity body. - - :param request: Webob.Request object - """ - - if self.is_valid_encoding(request) and self.is_valid_method(request): - request.is_body_readable = True - return True - - if request.content_length is not None and request.content_length > 0: - return True - - return False - - @staticmethod - def _sanitizer(obj): - """Sanitizer method that will be passed to jsonutils.loads.""" - return obj - - def from_json(self, datastring): - try: - jsondata = jsonutils.loads(datastring, object_hook=self._sanitizer) - if not isinstance(jsondata, (dict, list)): - msg = _('Unexpected body type. Expected list/dict.') - raise webob.exc.HTTPBadRequest(explanation=msg) - return jsondata - except ValueError: - msg = _('Malformed JSON in request body.') - raise webob.exc.HTTPBadRequest(explanation=msg) - - def default(self, request): - if self.has_body(request): - return {'body': self.from_json(request.body)} - else: - return {} - - -class JSONResponseSerializer(object): - - def _sanitizer(self, obj): - """Sanitizer method that will be passed to jsonutils.dumps.""" - if hasattr(obj, "to_dict"): - return obj.to_dict() - if isinstance(obj, multidict.MultiDict): - return obj.mixed() - return jsonutils.to_primitive(obj) - - def to_json(self, data): - return jsonutils.dump_as_bytes(data, default=self._sanitizer) - - def default(self, response, result): - response.content_type = 'application/json' - body = self.to_json(result) - body = encodeutils.to_utf8(body) - response.body = body - - -def translate_exception(req, e): - """Translates all translatable elements of the given exception.""" - - # The RequestClass attribute in the webob.dec.wsgify decorator - # does not guarantee that the request object will be a particular - # type; this check is therefore necessary. - if not hasattr(req, "best_match_language"): - return e - - locale = req.best_match_language() - - if isinstance(e, webob.exc.HTTPError): - e.explanation = i18n.translate(e.explanation, locale) - e.detail = i18n.translate(e.detail, locale) - if getattr(e, 'body_template', None): - e.body_template = i18n.translate(e.body_template, locale) - return e - - -class Resource(object): - """ - WSGI app that handles (de)serialization and controller dispatch. - - Reads routing information supplied by RoutesMiddleware and calls - the requested action method upon its deserializer, controller, - and serializer. Those three objects may implement any of the basic - controller action methods (create, update, show, index, delete) - along with any that may be specified in the api router. A 'default' - method may also be implemented to be used in place of any - non-implemented actions. Deserializer methods must accept a request - argument and return a dictionary. Controller methods must accept a - request argument. Additionally, they must also accept keyword - arguments that represent the keys returned by the Deserializer. They - may raise a webob.exc exception or return a dict, which will be - serialized by requested content type. - """ - - def __init__(self, controller, deserializer=None, serializer=None): - """ - :param controller: object that implement methods created by routes lib - :param deserializer: object that supports webob request deserialization - through controller-like actions - :param serializer: object that supports webob response serialization - through controller-like actions - """ - self.controller = controller - self.serializer = serializer or JSONResponseSerializer() - self.deserializer = deserializer or JSONRequestDeserializer() - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, request): - """WSGI method that controls (de)serialization and method dispatch.""" - action_args = self.get_action_args(request.environ) - action = action_args.pop('action', None) - body_reject = strutils.bool_from_string( - action_args.pop('body_reject', None)) - - try: - if body_reject and self.deserializer.has_body(request): - msg = _('A body is not expected with this request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - deserialized_request = self.dispatch(self.deserializer, - action, request) - action_args.update(deserialized_request) - action_result = self.dispatch(self.controller, action, - request, **action_args) - except webob.exc.WSGIHTTPException as e: - exc_info = sys.exc_info() - e = translate_exception(request, e) - six.reraise(type(e), e, exc_info[2]) - except UnicodeDecodeError: - msg = _("Error decoding your request. Either the URL or the " - "request body contained characters that could not be " - "decoded by Glance") - raise webob.exc.HTTPBadRequest(explanation=msg) - except Exception as e: - LOG.exception(_LE("Caught error: %s"), - encodeutils.exception_to_unicode(e)) - response = webob.exc.HTTPInternalServerError() - return response - - try: - response = webob.Response(request=request) - self.dispatch(self.serializer, action, response, action_result) - # encode all headers in response to utf-8 to prevent unicode errors - for name, value in list(response.headers.items()): - if six.PY2 and isinstance(value, six.text_type): - response.headers[name] = encodeutils.safe_encode(value) - return response - except webob.exc.WSGIHTTPException as e: - return translate_exception(request, e) - except webob.exc.HTTPException as e: - return e - # return unserializable result (typically a webob exc) - except Exception: - return action_result - - def dispatch(self, obj, action, *args, **kwargs): - """Find action-specific method on self and call it.""" - try: - method = getattr(obj, action) - except AttributeError: - method = getattr(obj, 'default') - - return method(*args, **kwargs) - - def get_action_args(self, request_environment): - """Parse dictionary created by routes library.""" - try: - args = request_environment['wsgiorg.routing_args'][1].copy() - except Exception: - return {} - - try: - del args['controller'] - except KeyError: - pass - - try: - del args['format'] - except KeyError: - pass - - return args diff --git a/glance/common/wsgi_app.py b/glance/common/wsgi_app.py deleted file mode 100644 index 7b621c51..00000000 --- a/glance/common/wsgi_app.py +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import glance_store -from oslo_config import cfg -from oslo_log import log as logging -import osprofiler.initializer - -from glance.common import config -from glance import notifier - -CONF = cfg.CONF -CONF.import_group("profiler", "glance.common.wsgi") -logging.register_options(CONF) - -CONFIG_FILES = ['glance-api-paste.ini', 'glance-api.conf'] - - -def _get_config_files(env=None): - if env is None: - env = os.environ - dirname = env.get('OS_GLANCE_CONFIG_DIR', '/etc/glance').strip() - return [os.path.join(dirname, config_file) for config_file in CONFIG_FILES] - - -def _setup_os_profiler(): - notifier.set_defaults() - if CONF.profiler.enabled: - osprofiler.initializer.init_from_conf(conf=CONF, - context={}, - project='glance', - service='api', - host=CONF.bind_host) - - -def init_app(): - config_files = _get_config_files() - CONF([], project='glance', default_config_files=config_files) - logging.setup(CONF, "glance") - glance_store.register_opts(CONF) - glance_store.create_stores(CONF) - glance_store.verify_default_store() - _setup_os_profiler() - return config.load_paste_app('glance-api') diff --git a/glance/common/wsme_utils.py b/glance/common/wsme_utils.py deleted file mode 100644 index 27ad6688..00000000 --- a/glance/common/wsme_utils.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime - -from wsme import types as wsme_types - -from glance.common import timeutils - - -class WSMEModelTransformer(object): - - def to_dict(self): - # Return the wsme_attributes names:values as a dict - my_dict = {} - for attribute in self._wsme_attributes: - value = getattr(self, attribute.name) - if value is not wsme_types.Unset: - my_dict.update({attribute.name: value}) - return my_dict - - @classmethod - def to_wsme_model(model, db_entity, self_link=None, schema=None): - # Return the wsme_attributes names:values as a dict - names = [] - for attribute in model._wsme_attributes: - names.append(attribute.name) - - values = {} - for name in names: - value = getattr(db_entity, name, None) - if value is not None: - if type(value) == datetime: - iso_datetime_value = timeutils.isotime(value) - values.update({name: iso_datetime_value}) - else: - values.update({name: value}) - - if schema: - values['schema'] = schema - - model_object = model(**values) - - # 'self' kwarg is used in wsme.types.Base.__init__(self, ..) and - # conflicts during initialization. self_link is a proxy field to self. - if self_link: - model_object.self = self_link - - return model_object - - @classmethod - def get_mandatory_attrs(cls): - return [attr.name for attr in cls._wsme_attributes if attr.mandatory] - - -def _get_value(obj): - if obj is not wsme_types.Unset: - return obj - else: - return None diff --git a/glance/context.py b/glance/context.py deleted file mode 100644 index d38e02e5..00000000 --- a/glance/context.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2011-2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_context import context - -from glance.api import policy - - -class RequestContext(context.RequestContext): - """Stores information about the security context. - - Stores how the user accesses the system, as well as additional request - information. - - """ - - def __init__(self, owner_is_tenant=True, service_catalog=None, - policy_enforcer=None, **kwargs): - super(RequestContext, self).__init__(**kwargs) - self.owner_is_tenant = owner_is_tenant - self.service_catalog = service_catalog - self.policy_enforcer = policy_enforcer or policy.Enforcer() - if not self.is_admin: - self.is_admin = self.policy_enforcer.check_is_admin(self) - - def to_dict(self): - d = super(RequestContext, self).to_dict() - d.update({ - 'roles': self.roles, - 'service_catalog': self.service_catalog, - }) - return d - - def to_policy_values(self): - pdict = super(RequestContext, self).to_policy_values() - pdict['user'] = self.user - pdict['tenant'] = self.tenant - return pdict - - @classmethod - def from_dict(cls, values): - return cls(**values) - - @property - def owner(self): - """Return the owner to correlate with an image.""" - return self.tenant if self.owner_is_tenant else self.user - - @property - def can_see_deleted(self): - """Admins can see deleted by default""" - return self.show_deleted or self.is_admin - - -def get_admin_context(show_deleted=False): - """Create an administrator context.""" - return RequestContext(auth_token=None, - tenant=None, - is_admin=True, - show_deleted=show_deleted, - overwrite=False) diff --git a/glance/db/__init__.py b/glance/db/__init__.py deleted file mode 100644 index e2120dd3..00000000 --- a/glance/db/__init__.py +++ /dev/null @@ -1,872 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2010-2012 OpenStack Foundation -# Copyright 2013 IBM Corp. -# Copyright 2015 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import importutils -from wsme.rest import json - -from glance.api.v2.model.metadef_property_type import PropertyType -from glance.common import crypt -from glance.common import exception -from glance.common import location_strategy -import glance.domain -import glance.domain.proxy -from glance.i18n import _ - -CONF = cfg.CONF -CONF.import_opt('image_size_cap', 'glance.common.config') -CONF.import_opt('metadata_encryption_key', 'glance.common.config') - - -def get_api(): - api = importutils.import_module(CONF.data_api) - if hasattr(api, 'configure'): - api.configure() - return api - - -def unwrap(db_api): - return db_api - - -# attributes common to all models -BASE_MODEL_ATTRS = set(['id', 'created_at', 'updated_at', 'deleted_at', - 'deleted']) - - -IMAGE_ATTRS = BASE_MODEL_ATTRS | set(['name', 'status', 'size', 'virtual_size', - 'disk_format', 'container_format', - 'min_disk', 'min_ram', 'is_public', - 'locations', 'checksum', 'owner', - 'protected']) - - -class ImageRepo(object): - - def __init__(self, context, db_api): - self.context = context - self.db_api = db_api - - def get(self, image_id): - try: - db_api_image = dict(self.db_api.image_get(self.context, image_id)) - if db_api_image['deleted']: - raise exception.ImageNotFound() - except (exception.ImageNotFound, exception.Forbidden): - msg = _("No image found with ID %s") % image_id - raise exception.ImageNotFound(msg) - tags = self.db_api.image_tag_get_all(self.context, image_id) - image = self._format_image_from_db(db_api_image, tags) - return ImageProxy(image, self.context, self.db_api) - - def list(self, marker=None, limit=None, sort_key=None, - sort_dir=None, filters=None, member_status='accepted'): - sort_key = ['created_at'] if not sort_key else sort_key - sort_dir = ['desc'] if not sort_dir else sort_dir - db_api_images = self.db_api.image_get_all( - self.context, filters=filters, marker=marker, limit=limit, - sort_key=sort_key, sort_dir=sort_dir, - member_status=member_status, return_tag=True) - images = [] - for db_api_image in db_api_images: - db_image = dict(db_api_image) - image = self._format_image_from_db(db_image, db_image['tags']) - images.append(image) - return images - - def _format_image_from_db(self, db_image, db_tags): - properties = {} - for prop in db_image.pop('properties'): - # NOTE(markwash) db api requires us to filter deleted - if not prop['deleted']: - properties[prop['name']] = prop['value'] - locations = [loc for loc in db_image['locations'] - if loc['status'] == 'active'] - if CONF.metadata_encryption_key: - key = CONF.metadata_encryption_key - for l in locations: - l['url'] = crypt.urlsafe_decrypt(key, l['url']) - return glance.domain.Image( - image_id=db_image['id'], - name=db_image['name'], - status=db_image['status'], - created_at=db_image['created_at'], - updated_at=db_image['updated_at'], - visibility=db_image['visibility'], - min_disk=db_image['min_disk'], - min_ram=db_image['min_ram'], - protected=db_image['protected'], - locations=location_strategy.get_ordered_locations(locations), - checksum=db_image['checksum'], - owner=db_image['owner'], - disk_format=db_image['disk_format'], - container_format=db_image['container_format'], - size=db_image['size'], - virtual_size=db_image['virtual_size'], - extra_properties=properties, - tags=db_tags - ) - - def _format_image_to_db(self, image): - locations = image.locations - if CONF.metadata_encryption_key: - key = CONF.metadata_encryption_key - ld = [] - for loc in locations: - url = crypt.urlsafe_encrypt(key, loc['url']) - ld.append({'url': url, 'metadata': loc['metadata'], - 'status': loc['status'], - # NOTE(zhiyan): New location has no ID field. - 'id': loc.get('id')}) - locations = ld - return { - 'id': image.image_id, - 'name': image.name, - 'status': image.status, - 'created_at': image.created_at, - 'min_disk': image.min_disk, - 'min_ram': image.min_ram, - 'protected': image.protected, - 'locations': locations, - 'checksum': image.checksum, - 'owner': image.owner, - 'disk_format': image.disk_format, - 'container_format': image.container_format, - 'size': image.size, - 'virtual_size': image.virtual_size, - 'visibility': image.visibility, - 'properties': dict(image.extra_properties), - } - - def add(self, image): - image_values = self._format_image_to_db(image) - if (image_values['size'] is not None - and image_values['size'] > CONF.image_size_cap): - raise exception.ImageSizeLimitExceeded - # the updated_at value is not set in the _format_image_to_db - # function since it is specific to image create - image_values['updated_at'] = image.updated_at - new_values = self.db_api.image_create(self.context, image_values) - self.db_api.image_tag_set_all(self.context, - image.image_id, image.tags) - image.created_at = new_values['created_at'] - image.updated_at = new_values['updated_at'] - - def save(self, image, from_state=None): - image_values = self._format_image_to_db(image) - if (image_values['size'] is not None - and image_values['size'] > CONF.image_size_cap): - raise exception.ImageSizeLimitExceeded - try: - new_values = self.db_api.image_update(self.context, - image.image_id, - image_values, - purge_props=True, - from_state=from_state) - except (exception.ImageNotFound, exception.Forbidden): - msg = _("No image found with ID %s") % image.image_id - raise exception.ImageNotFound(msg) - self.db_api.image_tag_set_all(self.context, image.image_id, - image.tags) - image.updated_at = new_values['updated_at'] - - def remove(self, image): - try: - self.db_api.image_update(self.context, image.image_id, - {'status': image.status}, - purge_props=True) - except (exception.ImageNotFound, exception.Forbidden): - msg = _("No image found with ID %s") % image.image_id - raise exception.ImageNotFound(msg) - # NOTE(markwash): don't update tags? - new_values = self.db_api.image_destroy(self.context, image.image_id) - image.updated_at = new_values['updated_at'] - - -class ImageProxy(glance.domain.proxy.Image): - - def __init__(self, image, context, db_api): - self.context = context - self.db_api = db_api - self.image = image - super(ImageProxy, self).__init__(image) - - -class ImageMemberRepo(object): - - def __init__(self, context, db_api, image): - self.context = context - self.db_api = db_api - self.image = image - - def _format_image_member_from_db(self, db_image_member): - return glance.domain.ImageMembership( - id=db_image_member['id'], - image_id=db_image_member['image_id'], - member_id=db_image_member['member'], - status=db_image_member['status'], - created_at=db_image_member['created_at'], - updated_at=db_image_member['updated_at'] - ) - - def _format_image_member_to_db(self, image_member): - image_member = {'image_id': self.image.image_id, - 'member': image_member.member_id, - 'status': image_member.status, - 'created_at': image_member.created_at} - return image_member - - def list(self): - db_members = self.db_api.image_member_find( - self.context, image_id=self.image.image_id) - image_members = [] - for db_member in db_members: - image_members.append(self._format_image_member_from_db(db_member)) - return image_members - - def add(self, image_member): - try: - self.get(image_member.member_id) - except exception.NotFound: - pass - else: - msg = _('The target member %(member_id)s is already ' - 'associated with image %(image_id)s.') % { - 'member_id': image_member.member_id, - 'image_id': self.image.image_id} - raise exception.Duplicate(msg) - - image_member_values = self._format_image_member_to_db(image_member) - # Note(shalq): find the image member including the member marked with - # deleted. We will use only one record to represent membership between - # the same image and member. The record of the deleted image member - # will be reused, if it exists, update its properties instead of - # creating a new one. - members = self.db_api.image_member_find(self.context, - image_id=self.image.image_id, - member=image_member.member_id, - include_deleted=True) - if members: - new_values = self.db_api.image_member_update(self.context, - members[0]['id'], - image_member_values) - else: - new_values = self.db_api.image_member_create(self.context, - image_member_values) - image_member.created_at = new_values['created_at'] - image_member.updated_at = new_values['updated_at'] - image_member.id = new_values['id'] - - def remove(self, image_member): - try: - self.db_api.image_member_delete(self.context, image_member.id) - except (exception.NotFound, exception.Forbidden): - msg = _("The specified member %s could not be found") - raise exception.NotFound(msg % image_member.id) - - def save(self, image_member, from_state=None): - image_member_values = self._format_image_member_to_db(image_member) - try: - new_values = self.db_api.image_member_update(self.context, - image_member.id, - image_member_values) - except (exception.NotFound, exception.Forbidden): - raise exception.NotFound() - image_member.updated_at = new_values['updated_at'] - - def get(self, member_id): - try: - db_api_image_member = self.db_api.image_member_find( - self.context, - self.image.image_id, - member_id) - if not db_api_image_member: - raise exception.NotFound() - except (exception.NotFound, exception.Forbidden): - raise exception.NotFound() - - image_member = self._format_image_member_from_db( - db_api_image_member[0]) - return image_member - - -class TaskRepo(object): - - def __init__(self, context, db_api): - self.context = context - self.db_api = db_api - - def _format_task_from_db(self, db_task): - return glance.domain.Task( - task_id=db_task['id'], - task_type=db_task['type'], - status=db_task['status'], - owner=db_task['owner'], - expires_at=db_task['expires_at'], - created_at=db_task['created_at'], - updated_at=db_task['updated_at'], - task_input=db_task['input'], - result=db_task['result'], - message=db_task['message'], - ) - - def _format_task_stub_from_db(self, db_task): - return glance.domain.TaskStub( - task_id=db_task['id'], - task_type=db_task['type'], - status=db_task['status'], - owner=db_task['owner'], - expires_at=db_task['expires_at'], - created_at=db_task['created_at'], - updated_at=db_task['updated_at'], - ) - - def _format_task_to_db(self, task): - task = {'id': task.task_id, - 'type': task.type, - 'status': task.status, - 'input': task.task_input, - 'result': task.result, - 'owner': task.owner, - 'message': task.message, - 'expires_at': task.expires_at, - 'created_at': task.created_at, - 'updated_at': task.updated_at, - } - return task - - def get(self, task_id): - try: - db_api_task = self.db_api.task_get(self.context, task_id) - except (exception.NotFound, exception.Forbidden): - msg = _('Could not find task %s') % task_id - raise exception.NotFound(msg) - return self._format_task_from_db(db_api_task) - - def list(self, marker=None, limit=None, sort_key='created_at', - sort_dir='desc', filters=None): - db_api_tasks = self.db_api.task_get_all(self.context, - filters=filters, - marker=marker, - limit=limit, - sort_key=sort_key, - sort_dir=sort_dir) - return [self._format_task_stub_from_db(task) for task in db_api_tasks] - - def save(self, task): - task_values = self._format_task_to_db(task) - try: - updated_values = self.db_api.task_update(self.context, - task.task_id, - task_values) - except (exception.NotFound, exception.Forbidden): - msg = _('Could not find task %s') % task.task_id - raise exception.NotFound(msg) - task.updated_at = updated_values['updated_at'] - - def add(self, task): - task_values = self._format_task_to_db(task) - updated_values = self.db_api.task_create(self.context, task_values) - task.created_at = updated_values['created_at'] - task.updated_at = updated_values['updated_at'] - - def remove(self, task): - task_values = self._format_task_to_db(task) - try: - self.db_api.task_update(self.context, task.task_id, task_values) - updated_values = self.db_api.task_delete(self.context, - task.task_id) - except (exception.NotFound, exception.Forbidden): - msg = _('Could not find task %s') % task.task_id - raise exception.NotFound(msg) - task.updated_at = updated_values['updated_at'] - task.deleted_at = updated_values['deleted_at'] - - -class MetadefNamespaceRepo(object): - - def __init__(self, context, db_api): - self.context = context - self.db_api = db_api - - def _format_namespace_from_db(self, namespace_obj): - return glance.domain.MetadefNamespace( - namespace_id=namespace_obj['id'], - namespace=namespace_obj['namespace'], - display_name=namespace_obj['display_name'], - description=namespace_obj['description'], - owner=namespace_obj['owner'], - visibility=namespace_obj['visibility'], - protected=namespace_obj['protected'], - created_at=namespace_obj['created_at'], - updated_at=namespace_obj['updated_at'] - ) - - def _format_namespace_to_db(self, namespace_obj): - namespace = { - 'namespace': namespace_obj.namespace, - 'display_name': namespace_obj.display_name, - 'description': namespace_obj.description, - 'visibility': namespace_obj.visibility, - 'protected': namespace_obj.protected, - 'owner': namespace_obj.owner - } - return namespace - - def add(self, namespace): - self.db_api.metadef_namespace_create( - self.context, - self._format_namespace_to_db(namespace) - ) - - def get(self, namespace): - try: - db_api_namespace = self.db_api.metadef_namespace_get( - self.context, namespace) - except (exception.NotFound, exception.Forbidden): - msg = _('Could not find namespace %s') % namespace - raise exception.NotFound(msg) - return self._format_namespace_from_db(db_api_namespace) - - def list(self, marker=None, limit=None, sort_key='created_at', - sort_dir='desc', filters=None): - db_namespaces = self.db_api.metadef_namespace_get_all( - self.context, - marker=marker, - limit=limit, - sort_key=sort_key, - sort_dir=sort_dir, - filters=filters - ) - return [self._format_namespace_from_db(namespace_obj) - for namespace_obj in db_namespaces] - - def remove(self, namespace): - try: - self.db_api.metadef_namespace_delete(self.context, - namespace.namespace) - except (exception.NotFound, exception.Forbidden): - msg = _("The specified namespace %s could not be found") - raise exception.NotFound(msg % namespace.namespace) - - def remove_objects(self, namespace): - try: - self.db_api.metadef_object_delete_namespace_content( - self.context, - namespace.namespace - ) - except (exception.NotFound, exception.Forbidden): - msg = _("The specified namespace %s could not be found") - raise exception.NotFound(msg % namespace.namespace) - - def remove_properties(self, namespace): - try: - self.db_api.metadef_property_delete_namespace_content( - self.context, - namespace.namespace - ) - except (exception.NotFound, exception.Forbidden): - msg = _("The specified namespace %s could not be found") - raise exception.NotFound(msg % namespace.namespace) - - def remove_tags(self, namespace): - try: - self.db_api.metadef_tag_delete_namespace_content( - self.context, - namespace.namespace - ) - except (exception.NotFound, exception.Forbidden): - msg = _("The specified namespace %s could not be found") - raise exception.NotFound(msg % namespace.namespace) - - def object_count(self, namespace_name): - return self.db_api.metadef_object_count( - self.context, - namespace_name - ) - - def property_count(self, namespace_name): - return self.db_api.metadef_property_count( - self.context, - namespace_name - ) - - def save(self, namespace): - try: - self.db_api.metadef_namespace_update( - self.context, namespace.namespace_id, - self._format_namespace_to_db(namespace) - ) - except exception.NotFound as e: - raise exception.NotFound(explanation=e.msg) - return namespace - - -class MetadefObjectRepo(object): - - def __init__(self, context, db_api): - self.context = context - self.db_api = db_api - self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) - - def _format_metadef_object_from_db(self, metadata_object, - namespace_entity): - required_str = metadata_object['required'] - required_list = required_str.split(",") if required_str else [] - - # Convert the persisted json schema to a dict of PropertyTypes - property_types = {} - json_props = metadata_object['json_schema'] - for id in json_props: - property_types[id] = json.fromjson(PropertyType, json_props[id]) - - return glance.domain.MetadefObject( - namespace=namespace_entity, - object_id=metadata_object['id'], - name=metadata_object['name'], - required=required_list, - description=metadata_object['description'], - properties=property_types, - created_at=metadata_object['created_at'], - updated_at=metadata_object['updated_at'] - ) - - def _format_metadef_object_to_db(self, metadata_object): - - required_str = (",".join(metadata_object.required) if - metadata_object.required else None) - - # Convert the model PropertyTypes dict to a JSON string - properties = metadata_object.properties - db_schema = {} - if properties: - for k, v in properties.items(): - json_data = json.tojson(PropertyType, v) - db_schema[k] = json_data - - db_metadata_object = { - 'name': metadata_object.name, - 'required': required_str, - 'description': metadata_object.description, - 'json_schema': db_schema - } - return db_metadata_object - - def add(self, metadata_object): - self.db_api.metadef_object_create( - self.context, - metadata_object.namespace, - self._format_metadef_object_to_db(metadata_object) - ) - - def get(self, namespace, object_name): - try: - namespace_entity = self.meta_namespace_repo.get(namespace) - db_metadata_object = self.db_api.metadef_object_get( - self.context, - namespace, - object_name) - except (exception.NotFound, exception.Forbidden): - msg = _('Could not find metadata object %s') % object_name - raise exception.NotFound(msg) - return self._format_metadef_object_from_db(db_metadata_object, - namespace_entity) - - def list(self, marker=None, limit=None, sort_key='created_at', - sort_dir='desc', filters=None): - namespace = filters['namespace'] - namespace_entity = self.meta_namespace_repo.get(namespace) - db_metadata_objects = self.db_api.metadef_object_get_all( - self.context, namespace) - return [self._format_metadef_object_from_db(metadata_object, - namespace_entity) - for metadata_object in db_metadata_objects] - - def remove(self, metadata_object): - try: - self.db_api.metadef_object_delete( - self.context, - metadata_object.namespace.namespace, - metadata_object.name - ) - except (exception.NotFound, exception.Forbidden): - msg = _("The specified metadata object %s could not be found") - raise exception.NotFound(msg % metadata_object.name) - - def save(self, metadata_object): - try: - self.db_api.metadef_object_update( - self.context, metadata_object.namespace.namespace, - metadata_object.object_id, - self._format_metadef_object_to_db(metadata_object)) - except exception.NotFound as e: - raise exception.NotFound(explanation=e.msg) - return metadata_object - - -class MetadefResourceTypeRepo(object): - - def __init__(self, context, db_api): - self.context = context - self.db_api = db_api - self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) - - def _format_resource_type_from_db(self, resource_type, namespace): - return glance.domain.MetadefResourceType( - namespace=namespace, - name=resource_type['name'], - prefix=resource_type['prefix'], - properties_target=resource_type['properties_target'], - created_at=resource_type['created_at'], - updated_at=resource_type['updated_at'] - ) - - def _format_resource_type_to_db(self, resource_type): - db_resource_type = { - 'name': resource_type.name, - 'prefix': resource_type.prefix, - 'properties_target': resource_type.properties_target - } - return db_resource_type - - def add(self, resource_type): - self.db_api.metadef_resource_type_association_create( - self.context, resource_type.namespace, - self._format_resource_type_to_db(resource_type) - ) - - def get(self, resource_type, namespace): - namespace_entity = self.meta_namespace_repo.get(namespace) - db_resource_type = ( - self.db_api. - metadef_resource_type_association_get( - self.context, - namespace, - resource_type - ) - ) - return self._format_resource_type_from_db(db_resource_type, - namespace_entity) - - def list(self, filters=None): - namespace = filters['namespace'] - if namespace: - namespace_entity = self.meta_namespace_repo.get(namespace) - db_resource_types = ( - self.db_api. - metadef_resource_type_association_get_all_by_namespace( - self.context, - namespace - ) - ) - return [self._format_resource_type_from_db(resource_type, - namespace_entity) - for resource_type in db_resource_types] - else: - db_resource_types = ( - self.db_api. - metadef_resource_type_get_all(self.context) - ) - return [glance.domain.MetadefResourceType( - namespace=None, - name=resource_type['name'], - prefix=None, - properties_target=None, - created_at=resource_type['created_at'], - updated_at=resource_type['updated_at'] - ) for resource_type in db_resource_types] - - def remove(self, resource_type): - try: - self.db_api.metadef_resource_type_association_delete( - self.context, resource_type.namespace.namespace, - resource_type.name) - - except (exception.NotFound, exception.Forbidden): - msg = _("The specified resource type %s could not be found ") - raise exception.NotFound(msg % resource_type.name) - - -class MetadefPropertyRepo(object): - - def __init__(self, context, db_api): - self.context = context - self.db_api = db_api - self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) - - def _format_metadef_property_from_db( - self, - property, - namespace_entity): - - return glance.domain.MetadefProperty( - namespace=namespace_entity, - property_id=property['id'], - name=property['name'], - schema=property['json_schema'] - ) - - def _format_metadef_property_to_db(self, property): - - db_metadata_object = { - 'name': property.name, - 'json_schema': property.schema - } - return db_metadata_object - - def add(self, property): - self.db_api.metadef_property_create( - self.context, - property.namespace, - self._format_metadef_property_to_db(property) - ) - - def get(self, namespace, property_name): - try: - namespace_entity = self.meta_namespace_repo.get(namespace) - db_property_type = self.db_api.metadef_property_get( - self.context, - namespace, - property_name - ) - except (exception.NotFound, exception.Forbidden): - msg = _('Could not find property %s') % property_name - raise exception.NotFound(msg) - return self._format_metadef_property_from_db( - db_property_type, namespace_entity) - - def list(self, marker=None, limit=None, sort_key='created_at', - sort_dir='desc', filters=None): - namespace = filters['namespace'] - namespace_entity = self.meta_namespace_repo.get(namespace) - - db_properties = self.db_api.metadef_property_get_all( - self.context, namespace) - return ( - [self._format_metadef_property_from_db( - property, namespace_entity) for property in db_properties] - ) - - def remove(self, property): - try: - self.db_api.metadef_property_delete( - self.context, property.namespace.namespace, property.name) - except (exception.NotFound, exception.Forbidden): - msg = _("The specified property %s could not be found") - raise exception.NotFound(msg % property.name) - - def save(self, property): - try: - self.db_api.metadef_property_update( - self.context, property.namespace.namespace, - property.property_id, - self._format_metadef_property_to_db(property) - ) - except exception.NotFound as e: - raise exception.NotFound(explanation=e.msg) - return property - - -class MetadefTagRepo(object): - - def __init__(self, context, db_api): - self.context = context - self.db_api = db_api - self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) - - def _format_metadef_tag_from_db(self, metadata_tag, - namespace_entity): - return glance.domain.MetadefTag( - namespace=namespace_entity, - tag_id=metadata_tag['id'], - name=metadata_tag['name'], - created_at=metadata_tag['created_at'], - updated_at=metadata_tag['updated_at'] - ) - - def _format_metadef_tag_to_db(self, metadata_tag): - db_metadata_tag = { - 'name': metadata_tag.name - } - return db_metadata_tag - - def add(self, metadata_tag): - self.db_api.metadef_tag_create( - self.context, - metadata_tag.namespace, - self._format_metadef_tag_to_db(metadata_tag) - ) - - def add_tags(self, metadata_tags): - tag_list = [] - namespace = None - for metadata_tag in metadata_tags: - tag_list.append(self._format_metadef_tag_to_db(metadata_tag)) - if namespace is None: - namespace = metadata_tag.namespace - - self.db_api.metadef_tag_create_tags( - self.context, namespace, tag_list) - - def get(self, namespace, name): - try: - namespace_entity = self.meta_namespace_repo.get(namespace) - db_metadata_tag = self.db_api.metadef_tag_get( - self.context, - namespace, - name) - except (exception.NotFound, exception.Forbidden): - msg = _('Could not find metadata tag %s') % name - raise exception.NotFound(msg) - return self._format_metadef_tag_from_db(db_metadata_tag, - namespace_entity) - - def list(self, marker=None, limit=None, sort_key='created_at', - sort_dir='desc', filters=None): - namespace = filters['namespace'] - namespace_entity = self.meta_namespace_repo.get(namespace) - - db_metadata_tag = self.db_api.metadef_tag_get_all( - self.context, namespace, filters, marker, limit, sort_key, - sort_dir) - - return [self._format_metadef_tag_from_db(metadata_tag, - namespace_entity) - for metadata_tag in db_metadata_tag] - - def remove(self, metadata_tag): - try: - self.db_api.metadef_tag_delete( - self.context, - metadata_tag.namespace.namespace, - metadata_tag.name - ) - except (exception.NotFound, exception.Forbidden): - msg = _("The specified metadata tag %s could not be found") - raise exception.NotFound(msg % metadata_tag.name) - - def save(self, metadata_tag): - try: - self.db_api.metadef_tag_update( - self.context, metadata_tag.namespace.namespace, - metadata_tag.tag_id, - self._format_metadef_tag_to_db(metadata_tag)) - except exception.NotFound as e: - raise exception.NotFound(explanation=e.msg) - return metadata_tag diff --git a/glance/db/metadata.py b/glance/db/metadata.py deleted file mode 100644 index d8115d37..00000000 --- a/glance/db/metadata.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Copyright 2013 OpenStack Foundation -# Copyright 2013 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Metadata setup commands.""" - -import threading - -from oslo_config import cfg -from oslo_db import options as db_options -from stevedore import driver - -from glance.db.sqlalchemy import api as db_api - - -_IMPL = None -_LOCK = threading.Lock() - -db_options.set_defaults(cfg.CONF) - - -def get_backend(): - global _IMPL - if _IMPL is None: - with _LOCK: - if _IMPL is None: - _IMPL = driver.DriverManager( - "glance.database.metadata_backend", - cfg.CONF.database.backend).driver - return _IMPL - - -def load_metadefs(): - """Read metadefinition files and insert data into the database""" - return get_backend().db_load_metadefs(engine=db_api.get_engine(), - metadata_path=None, - merge=False, - prefer_new=False, - overwrite=False) - - -def unload_metadefs(): - """Unload metadefinitions from database""" - return get_backend().db_unload_metadefs(engine=db_api.get_engine()) - - -def export_metadefs(): - """Export metadefinitions from database to files""" - return get_backend().db_export_metadefs(engine=db_api.get_engine(), - metadata_path=None) diff --git a/glance/db/migration.py b/glance/db/migration.py deleted file mode 100644 index 638894b2..00000000 --- a/glance/db/migration.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Copyright 2013 OpenStack Foundation -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Database setup and migration commands.""" - -import os -import threading - -from oslo_config import cfg -from oslo_db import options as db_options -from stevedore import driver - - -_IMPL = None -_LOCK = threading.Lock() - -db_options.set_defaults(cfg.CONF) - - -def get_backend(): - global _IMPL - if _IMPL is None: - with _LOCK: - if _IMPL is None: - _IMPL = driver.DriverManager( - "glance.database.migration_backend", - cfg.CONF.database.backend).driver - return _IMPL - - -# Migration-related constants -EXPAND_BRANCH = 'expand' -CONTRACT_BRANCH = 'contract' -CURRENT_RELEASE = 'pike' -ALEMBIC_INIT_VERSION = 'liberty' -LATEST_REVISION = 'pike01' -INIT_VERSION = 0 - -MIGRATE_REPO_PATH = os.path.join( - os.path.abspath(os.path.dirname(__file__)), - 'sqlalchemy', - 'migrate_repo', -) diff --git a/glance/db/registry/__init__.py b/glance/db/registry/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/db/registry/api.py b/glance/db/registry/api.py deleted file mode 100644 index f4f7aef2..00000000 --- a/glance/db/registry/api.py +++ /dev/null @@ -1,546 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# Copyright 2015 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -This is the Registry's Driver API. - -This API relies on the registry RPC client (version >= 2). The functions bellow -work as a proxy for the database back-end configured in the registry service, -which means that everything returned by that back-end will be also returned by -this API. - - -This API exists for supporting deployments not willing to put database -credentials in glance-api. Those deployments can rely on this registry driver -that will talk to a remote registry service, which will then access the -database back-end. -""" - -import functools - -from glance.db import utils as db_utils -from glance.registry.client.v2 import api - - -def configure(): - api.configure_registry_client() - - -def _get_client(func): - """Injects a client instance to the each function - - This decorator creates an instance of the Registry - client and passes it as an argument to each function - in this API. - """ - @functools.wraps(func) - def wrapper(context, *args, **kwargs): - client = api.get_registry_client(context) - return func(client, *args, **kwargs) - return wrapper - - -@_get_client -def image_create(client, values, v1_mode=False): - """Create an image from the values dictionary.""" - return client.image_create(values=values, v1_mode=v1_mode) - - -@_get_client -def image_update(client, image_id, values, purge_props=False, from_state=None, - v1_mode=False): - """ - Set the given properties on an image and update it. - - :raises ImageNotFound: if image does not exist. - """ - return client.image_update(values=values, - image_id=image_id, - purge_props=purge_props, - from_state=from_state, - v1_mode=v1_mode) - - -@_get_client -def image_destroy(client, image_id): - """Destroy the image or raise if it does not exist.""" - return client.image_destroy(image_id=image_id) - - -@_get_client -def image_get(client, image_id, force_show_deleted=False, v1_mode=False): - return client.image_get(image_id=image_id, - force_show_deleted=force_show_deleted, - v1_mode=v1_mode) - - -def is_image_visible(context, image, status=None): - """Return True if the image is visible in this context.""" - return db_utils.is_image_visible(context, image, image_member_find, status) - - -@_get_client -def image_get_all(client, filters=None, marker=None, limit=None, - sort_key=None, sort_dir=None, - member_status='accepted', is_public=None, - admin_as_user=False, return_tag=False, v1_mode=False): - """ - Get all images that match zero or more filters. - - :param filters: dict of filter keys and values. If a 'properties' - key is present, it is treated as a dict of key/value - filters on the image properties attribute - :param marker: image id after which to start page - :param limit: maximum number of images to return - :param sort_key: image attribute by which results should be sorted - :param sort_dir: direction in which results should be sorted (asc, desc) - :param member_status: only return shared images that have this membership - status - :param is_public: If true, return only public images. If false, return - only private and shared images. - :param admin_as_user: For backwards compatibility. If true, then return to - an admin the equivalent set of images which it would see - if it were a regular user - :param return_tag: To indicates whether image entry in result includes it - relevant tag entries. This could improve upper-layer - query performance, to prevent using separated calls - :param v1_mode: If true, mutates the 'visibility' value of each image - into the v1-compatible field 'is_public' - """ - sort_key = ['created_at'] if not sort_key else sort_key - sort_dir = ['desc'] if not sort_dir else sort_dir - return client.image_get_all(filters=filters, marker=marker, limit=limit, - sort_key=sort_key, sort_dir=sort_dir, - member_status=member_status, - is_public=is_public, - admin_as_user=admin_as_user, - return_tag=return_tag, - v1_mode=v1_mode) - - -@_get_client -def image_property_create(client, values, session=None): - """Create an ImageProperty object""" - return client.image_property_create(values=values) - - -@_get_client -def image_property_delete(client, prop_ref, image_ref, session=None): - """ - Used internally by _image_property_create and image_property_update - """ - return client.image_property_delete(prop_ref=prop_ref, image_ref=image_ref) - - -@_get_client -def image_member_create(client, values, session=None): - """Create an ImageMember object""" - return client.image_member_create(values=values) - - -@_get_client -def image_member_update(client, memb_id, values): - """Update an ImageMember object""" - return client.image_member_update(memb_id=memb_id, values=values) - - -@_get_client -def image_member_delete(client, memb_id, session=None): - """Delete an ImageMember object""" - client.image_member_delete(memb_id=memb_id) - - -@_get_client -def image_member_find(client, image_id=None, member=None, status=None, - include_deleted=False): - """Find all members that meet the given criteria. - - Note, currently include_deleted should be true only when create a new - image membership, as there may be a deleted image membership between - the same image and tenant, the membership will be reused in this case. - It should be false in other cases. - - :param image_id: identifier of image entity - :param member: tenant to which membership has been granted - :include_deleted: A boolean indicating whether the result should include - the deleted record of image member - """ - return client.image_member_find(image_id=image_id, - member=member, - status=status, - include_deleted=include_deleted) - - -@_get_client -def image_member_count(client, image_id): - """Return the number of image members for this image - - :param image_id: identifier of image entity - """ - return client.image_member_count(image_id=image_id) - - -@_get_client -def image_tag_set_all(client, image_id, tags): - client.image_tag_set_all(image_id=image_id, tags=tags) - - -@_get_client -def image_tag_create(client, image_id, value, session=None): - """Create an image tag.""" - return client.image_tag_create(image_id=image_id, value=value) - - -@_get_client -def image_tag_delete(client, image_id, value, session=None): - """Delete an image tag.""" - client.image_tag_delete(image_id=image_id, value=value) - - -@_get_client -def image_tag_get_all(client, image_id, session=None): - """Get a list of tags for a specific image.""" - return client.image_tag_get_all(image_id=image_id) - - -@_get_client -def image_location_delete(client, image_id, location_id, status, session=None): - """Delete an image location.""" - client.image_location_delete(image_id=image_id, location_id=location_id, - status=status) - - -@_get_client -def image_location_update(client, image_id, location, session=None): - """Update image location.""" - client.image_location_update(image_id=image_id, location=location) - - -@_get_client -def user_get_storage_usage(client, owner_id, image_id=None, session=None): - return client.user_get_storage_usage(owner_id=owner_id, image_id=image_id) - - -@_get_client -def task_get(client, task_id, session=None, force_show_deleted=False): - """Get a single task object - :returns: task dictionary - """ - return client.task_get(task_id=task_id, session=session, - force_show_deleted=force_show_deleted) - - -@_get_client -def task_get_all(client, filters=None, marker=None, limit=None, - sort_key='created_at', sort_dir='desc', admin_as_user=False): - """Get all tasks that match zero or more filters. - - :param filters: dict of filter keys and values. - :param marker: task id after which to start page - :param limit: maximum number of tasks to return - :param sort_key: task attribute by which results should be sorted - :param sort_dir: direction in which results should be sorted (asc, desc) - :param admin_as_user: For backwards compatibility. If true, then return to - an admin the equivalent set of tasks which it would see - if it were a regular user - :returns: tasks set - """ - return client.task_get_all(filters=filters, marker=marker, limit=limit, - sort_key=sort_key, sort_dir=sort_dir, - admin_as_user=admin_as_user) - - -@_get_client -def task_create(client, values, session=None): - """Create a task object""" - return client.task_create(values=values, session=session) - - -@_get_client -def task_delete(client, task_id, session=None): - """Delete a task object""" - return client.task_delete(task_id=task_id, session=session) - - -@_get_client -def task_update(client, task_id, values, session=None): - return client.task_update(task_id=task_id, values=values, session=session) - - -# Metadef -@_get_client -def metadef_namespace_get_all( - client, marker=None, limit=None, sort_key='created_at', - sort_dir=None, filters=None, session=None): - return client.metadef_namespace_get_all( - marker=marker, limit=limit, - sort_key=sort_key, sort_dir=sort_dir, filters=filters) - - -@_get_client -def metadef_namespace_get(client, namespace_name, session=None): - return client.metadef_namespace_get(namespace_name=namespace_name) - - -@_get_client -def metadef_namespace_create(client, values, session=None): - return client.metadef_namespace_create(values=values) - - -@_get_client -def metadef_namespace_update( - client, namespace_id, namespace_dict, - session=None): - return client.metadef_namespace_update( - namespace_id=namespace_id, namespace_dict=namespace_dict) - - -@_get_client -def metadef_namespace_delete(client, namespace_name, session=None): - return client.metadef_namespace_delete( - namespace_name=namespace_name) - - -@_get_client -def metadef_object_get_all(client, namespace_name, session=None): - return client.metadef_object_get_all( - namespace_name=namespace_name) - - -@_get_client -def metadef_object_get( - client, - namespace_name, object_name, session=None): - return client.metadef_object_get( - namespace_name=namespace_name, object_name=object_name) - - -@_get_client -def metadef_object_create( - client, - namespace_name, object_dict, session=None): - return client.metadef_object_create( - namespace_name=namespace_name, object_dict=object_dict) - - -@_get_client -def metadef_object_update( - client, - namespace_name, object_id, - object_dict, session=None): - return client.metadef_object_update( - namespace_name=namespace_name, object_id=object_id, - object_dict=object_dict) - - -@_get_client -def metadef_object_delete( - client, - namespace_name, object_name, - session=None): - return client.metadef_object_delete( - namespace_name=namespace_name, object_name=object_name) - - -@_get_client -def metadef_object_delete_namespace_content( - client, - namespace_name, session=None): - return client.metadef_object_delete_namespace_content( - namespace_name=namespace_name) - - -@_get_client -def metadef_object_count( - client, - namespace_name, session=None): - return client.metadef_object_count( - namespace_name=namespace_name) - - -@_get_client -def metadef_property_get_all( - client, - namespace_name, session=None): - return client.metadef_property_get_all( - namespace_name=namespace_name) - - -@_get_client -def metadef_property_get( - client, - namespace_name, property_name, - session=None): - return client.metadef_property_get( - namespace_name=namespace_name, property_name=property_name) - - -@_get_client -def metadef_property_create( - client, - namespace_name, property_dict, - session=None): - return client.metadef_property_create( - namespace_name=namespace_name, property_dict=property_dict) - - -@_get_client -def metadef_property_update( - client, - namespace_name, property_id, - property_dict, session=None): - return client.metadef_property_update( - namespace_name=namespace_name, property_id=property_id, - property_dict=property_dict) - - -@_get_client -def metadef_property_delete( - client, - namespace_name, property_name, - session=None): - return client.metadef_property_delete( - namespace_name=namespace_name, property_name=property_name) - - -@_get_client -def metadef_property_delete_namespace_content( - client, - namespace_name, session=None): - return client.metadef_property_delete_namespace_content( - namespace_name=namespace_name) - - -@_get_client -def metadef_property_count( - client, - namespace_name, session=None): - return client.metadef_property_count( - namespace_name=namespace_name) - - -@_get_client -def metadef_resource_type_create(client, values, session=None): - return client.metadef_resource_type_create(values=values) - - -@_get_client -def metadef_resource_type_get( - client, - resource_type_name, session=None): - return client.metadef_resource_type_get( - resource_type_name=resource_type_name) - - -@_get_client -def metadef_resource_type_get_all(client, session=None): - return client.metadef_resource_type_get_all() - - -@_get_client -def metadef_resource_type_delete( - client, - resource_type_name, session=None): - return client.metadef_resource_type_delete( - resource_type_name=resource_type_name) - - -@_get_client -def metadef_resource_type_association_get( - client, - namespace_name, resource_type_name, - session=None): - return client.metadef_resource_type_association_get( - namespace_name=namespace_name, resource_type_name=resource_type_name) - - -@_get_client -def metadef_resource_type_association_create( - client, - namespace_name, values, session=None): - return client.metadef_resource_type_association_create( - namespace_name=namespace_name, values=values) - - -@_get_client -def metadef_resource_type_association_delete( - client, - namespace_name, resource_type_name, session=None): - return client.metadef_resource_type_association_delete( - namespace_name=namespace_name, resource_type_name=resource_type_name) - - -@_get_client -def metadef_resource_type_association_get_all_by_namespace( - client, - namespace_name, session=None): - return client.metadef_resource_type_association_get_all_by_namespace( - namespace_name=namespace_name) - - -@_get_client -def metadef_tag_get_all(client, namespace_name, filters=None, marker=None, - limit=None, sort_key='created_at', sort_dir=None, - session=None): - return client.metadef_tag_get_all( - namespace_name=namespace_name, filters=filters, marker=marker, - limit=limit, sort_key=sort_key, sort_dir=sort_dir, session=session) - - -@_get_client -def metadef_tag_get(client, namespace_name, name, session=None): - return client.metadef_tag_get( - namespace_name=namespace_name, name=name) - - -@_get_client -def metadef_tag_create( - client, namespace_name, tag_dict, session=None): - return client.metadef_tag_create( - namespace_name=namespace_name, tag_dict=tag_dict) - - -@_get_client -def metadef_tag_create_tags( - client, namespace_name, tag_list, session=None): - return client.metadef_tag_create_tags( - namespace_name=namespace_name, tag_list=tag_list) - - -@_get_client -def metadef_tag_update( - client, namespace_name, id, tag_dict, session=None): - return client.metadef_tag_update( - namespace_name=namespace_name, id=id, tag_dict=tag_dict) - - -@_get_client -def metadef_tag_delete( - client, namespace_name, name, session=None): - return client.metadef_tag_delete( - namespace_name=namespace_name, name=name) - - -@_get_client -def metadef_tag_delete_namespace_content( - client, namespace_name, session=None): - return client.metadef_tag_delete_namespace_content( - namespace_name=namespace_name) - - -@_get_client -def metadef_tag_count(client, namespace_name, session=None): - return client.metadef_tag_count(namespace_name=namespace_name) diff --git a/glance/db/simple/__init__.py b/glance/db/simple/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/db/simple/api.py b/glance/db/simple/api.py deleted file mode 100644 index d8133d13..00000000 --- a/glance/db/simple/api.py +++ /dev/null @@ -1,2059 +0,0 @@ -# Copyright 2012 OpenStack, Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import functools -import uuid - -from oslo_config import cfg -from oslo_log import log as logging -import six - -from glance.common import exception -from glance.common import timeutils -from glance.common import utils -from glance.db import utils as db_utils -from glance.i18n import _, _LI, _LW - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -DATA = { - 'images': {}, - 'members': {}, - 'metadef_namespace_resource_types': [], - 'metadef_namespaces': [], - 'metadef_objects': [], - 'metadef_properties': [], - 'metadef_resource_types': [], - 'metadef_tags': [], - 'tags': {}, - 'locations': [], - 'tasks': {}, - 'task_info': {}, -} - -INDEX = 0 - - -def log_call(func): - @functools.wraps(func) - def wrapped(*args, **kwargs): - LOG.info(_LI('Calling %(funcname)s: args=%(args)s, ' - 'kwargs=%(kwargs)s'), - {"funcname": func.__name__, - "args": args, - "kwargs": kwargs}) - output = func(*args, **kwargs) - LOG.info(_LI('Returning %(funcname)s: %(output)s'), - {"funcname": func.__name__, - "output": output}) - return output - return wrapped - - -def configure(): - if CONF.workers not in [0, 1]: - msg = _('CONF.workers should be set to 0 or 1 when using the ' - 'db.simple.api backend. Fore more info, see ' - 'https://bugs.launchpad.net/glance/+bug/1619508') - LOG.critical(msg) - raise SystemExit(msg) - - -def reset(): - global DATA - DATA = { - 'images': {}, - 'members': [], - 'metadef_namespace_resource_types': [], - 'metadef_namespaces': [], - 'metadef_objects': [], - 'metadef_properties': [], - 'metadef_resource_types': [], - 'metadef_tags': [], - 'tags': {}, - 'locations': [], - 'tasks': {}, - 'task_info': {}, - } - - -def clear_db_env(*args, **kwargs): - """ - Setup global environment configuration variables. - - We have no connection-oriented environment variables, so this is a NOOP. - """ - pass - - -def _get_session(): - return DATA - - -@utils.no_4byte_params -def _image_location_format(image_id, value, meta_data, status, deleted=False): - dt = timeutils.utcnow() - return { - 'id': str(uuid.uuid4()), - 'image_id': image_id, - 'created_at': dt, - 'updated_at': dt, - 'deleted_at': dt if deleted else None, - 'deleted': deleted, - 'url': value, - 'metadata': meta_data, - 'status': status, - } - - -def _image_property_format(image_id, name, value): - return { - 'image_id': image_id, - 'name': name, - 'value': value, - 'deleted': False, - 'deleted_at': None, - } - - -def _image_member_format(image_id, tenant_id, can_share, status='pending', - deleted=False): - dt = timeutils.utcnow() - return { - 'id': str(uuid.uuid4()), - 'image_id': image_id, - 'member': tenant_id, - 'can_share': can_share, - 'status': status, - 'created_at': dt, - 'updated_at': dt, - 'deleted': deleted, - } - - -def _pop_task_info_values(values): - task_info_values = {} - for k, v in list(values.items()): - if k in ['input', 'result', 'message']: - values.pop(k) - task_info_values[k] = v - - return task_info_values - - -def _format_task_from_db(task_ref, task_info_ref): - task = copy.deepcopy(task_ref) - if task_info_ref: - task_info = copy.deepcopy(task_info_ref) - task_info_values = _pop_task_info_values(task_info) - task.update(task_info_values) - return task - - -def _task_format(task_id, **values): - dt = timeutils.utcnow() - task = { - 'id': task_id, - 'type': 'import', - 'status': 'pending', - 'owner': None, - 'expires_at': None, - 'created_at': dt, - 'updated_at': dt, - 'deleted_at': None, - 'deleted': False, - } - task.update(values) - return task - - -def _task_info_format(task_id, **values): - task_info = { - 'task_id': task_id, - 'input': None, - 'result': None, - 'message': None, - } - task_info.update(values) - return task_info - - -@utils.no_4byte_params -def _image_update(image, values, properties): - # NOTE(bcwaldon): store properties as a list to match sqlalchemy driver - properties = [{'name': k, - 'value': v, - 'image_id': image['id'], - 'deleted': False} for k, v in properties.items()] - if 'properties' not in image.keys(): - image['properties'] = [] - image['properties'].extend(properties) - values = db_utils.ensure_image_dict_v2_compliant(values) - image.update(values) - return image - - -def _image_format(image_id, **values): - dt = timeutils.utcnow() - image = { - 'id': image_id, - 'name': None, - 'owner': None, - 'locations': [], - 'status': 'queued', - 'protected': False, - 'visibility': 'shared', - 'container_format': None, - 'disk_format': None, - 'min_ram': 0, - 'min_disk': 0, - 'size': None, - 'virtual_size': None, - 'checksum': None, - 'tags': [], - 'created_at': dt, - 'updated_at': dt, - 'deleted_at': None, - 'deleted': False, - } - - locations = values.pop('locations', None) - if locations is not None: - image['locations'] = [] - for location in locations: - location_ref = _image_location_format(image_id, - location['url'], - location['metadata'], - location['status']) - image['locations'].append(location_ref) - DATA['locations'].append(location_ref) - - return _image_update(image, values, values.pop('properties', {})) - - -def _filter_images(images, filters, context, - status='accepted', is_public=None, - admin_as_user=False): - filtered_images = [] - if 'properties' in filters: - prop_filter = filters.pop('properties') - filters.update(prop_filter) - - if status == 'all': - status = None - - visibility = filters.pop('visibility', None) - - for image in images: - member = image_member_find(context, image_id=image['id'], - member=context.owner, status=status) - is_member = len(member) > 0 - has_ownership = context.owner and image['owner'] == context.owner - image_is_public = image['visibility'] == 'public' - image_is_community = image['visibility'] == 'community' - image_is_shared = image['visibility'] == 'shared' - acts_as_admin = context.is_admin and not admin_as_user - can_see = (image_is_public - or image_is_community - or has_ownership - or (is_member and image_is_shared) - or acts_as_admin) - if not can_see: - continue - - if visibility: - if visibility == 'public': - if not image_is_public: - continue - elif visibility == 'private': - if not (image['visibility'] == 'private'): - continue - if not (has_ownership or acts_as_admin): - continue - elif visibility == 'shared': - if not image_is_shared: - continue - elif visibility == 'community': - if not image_is_community: - continue - else: - if (not has_ownership) and image_is_community: - continue - - if is_public is not None: - if not image_is_public == is_public: - continue - - to_add = True - for k, value in six.iteritems(filters): - key = k - if k.endswith('_min') or k.endswith('_max'): - key = key[0:-4] - try: - value = int(value) - except ValueError: - msg = _("Unable to filter on a range " - "with a non-numeric value.") - raise exception.InvalidFilterRangeValue(msg) - if k.endswith('_min'): - to_add = image.get(key) >= value - elif k.endswith('_max'): - to_add = image.get(key) <= value - elif k in ['created_at', 'updated_at']: - attr_value = image.get(key) - operator, isotime = utils.split_filter_op(value) - parsed_time = timeutils.parse_isotime(isotime) - threshold = timeutils.normalize_time(parsed_time) - to_add = utils.evaluate_filter_op(attr_value, operator, - threshold) - elif k in ['name', 'id', 'status', - 'container_format', 'disk_format']: - attr_value = image.get(key) - operator, list_value = utils.split_filter_op(value) - if operator == 'in': - threshold = utils.split_filter_value_for_quotes(list_value) - to_add = attr_value in threshold - elif operator == 'eq': - to_add = (attr_value == list_value) - else: - msg = (_("Unable to filter by unknown operator '%s'.") - % operator) - raise exception.InvalidFilterOperatorValue(msg) - - elif k != 'is_public' and image.get(k) is not None: - to_add = image.get(key) == value - elif k == 'tags': - filter_tags = value - image_tags = image_tag_get_all(context, image['id']) - for tag in filter_tags: - if tag not in image_tags: - to_add = False - break - else: - to_add = False - for p in image['properties']: - properties = {p['name']: p['value'], - 'deleted': p['deleted']} - to_add |= (properties.get(key) == value and - properties.get('deleted') is False) - - if not to_add: - break - - if to_add: - filtered_images.append(image) - - return filtered_images - - -def _do_pagination(context, images, marker, limit, show_deleted, - status='accepted'): - start = 0 - end = -1 - if marker is None: - start = 0 - else: - # Check that the image is accessible - _image_get(context, marker, force_show_deleted=show_deleted, - status=status) - - for i, image in enumerate(images): - if image['id'] == marker: - start = i + 1 - break - else: - raise exception.ImageNotFound() - - end = start + limit if limit is not None else None - return images[start:end] - - -def _sort_images(images, sort_key, sort_dir): - sort_key = ['created_at'] if not sort_key else sort_key - - default_sort_dir = 'desc' - - if not sort_dir: - sort_dir = [default_sort_dir] * len(sort_key) - elif len(sort_dir) == 1: - default_sort_dir = sort_dir[0] - sort_dir *= len(sort_key) - - for key in ['created_at', 'id']: - if key not in sort_key: - sort_key.append(key) - sort_dir.append(default_sort_dir) - - for key in sort_key: - if images and not (key in images[0]): - raise exception.InvalidSortKey() - - if any(dir for dir in sort_dir if dir not in ['asc', 'desc']): - raise exception.InvalidSortDir() - - if len(sort_key) != len(sort_dir): - raise exception.Invalid(message='Number of sort dirs does not match ' - 'the number of sort keys') - - for key, dir in reversed(list(zip(sort_key, sort_dir))): - reverse = dir == 'desc' - images.sort(key=lambda x: x[key] or '', reverse=reverse) - - return images - - -def _image_get(context, image_id, force_show_deleted=False, status=None): - try: - image = DATA['images'][image_id] - except KeyError: - LOG.warn(_LW('Could not find image %s') % image_id) - raise exception.ImageNotFound() - - if image['deleted'] and not (force_show_deleted - or context.can_see_deleted): - LOG.warn(_LW('Unable to get deleted image')) - raise exception.ImageNotFound() - - if not is_image_visible(context, image): - LOG.warn(_LW('Unable to get unowned image')) - raise exception.Forbidden("Image not visible to you") - - return image - - -@log_call -def image_get(context, image_id, session=None, force_show_deleted=False, - v1_mode=False): - image = copy.deepcopy(_image_get(context, image_id, force_show_deleted)) - image = _normalize_locations(context, image, - force_show_deleted=force_show_deleted) - if v1_mode: - image = db_utils.mutate_image_dict_to_v1(image) - return image - - -@log_call -def image_get_all(context, filters=None, marker=None, limit=None, - sort_key=None, sort_dir=None, - member_status='accepted', is_public=None, - admin_as_user=False, return_tag=False, v1_mode=False): - filters = filters or {} - images = DATA['images'].values() - images = _filter_images(images, filters, context, member_status, - is_public, admin_as_user) - images = _sort_images(images, sort_key, sort_dir) - images = _do_pagination(context, images, marker, limit, - filters.get('deleted')) - - force_show_deleted = True if filters.get('deleted') else False - res = [] - for image in images: - img = _normalize_locations(context, copy.deepcopy(image), - force_show_deleted=force_show_deleted) - if return_tag: - img['tags'] = image_tag_get_all(context, img['id']) - - if v1_mode: - img = db_utils.mutate_image_dict_to_v1(img) - res.append(img) - return res - - -@log_call -def image_property_create(context, values): - image = _image_get(context, values['image_id']) - prop = _image_property_format(values['image_id'], - values['name'], - values['value']) - image['properties'].append(prop) - return prop - - -@log_call -def image_property_delete(context, prop_ref, image_ref): - prop = None - for p in DATA['images'][image_ref]['properties']: - if p['name'] == prop_ref: - prop = p - if not prop: - raise exception.NotFound() - prop['deleted_at'] = timeutils.utcnow() - prop['deleted'] = True - return prop - - -@log_call -def image_member_find(context, image_id=None, member=None, - status=None, include_deleted=False): - filters = [] - images = DATA['images'] - members = DATA['members'] - - def is_visible(member): - return (member['member'] == context.owner or - images[member['image_id']]['owner'] == context.owner) - - if not context.is_admin: - filters.append(is_visible) - - if image_id is not None: - filters.append(lambda m: m['image_id'] == image_id) - if member is not None: - filters.append(lambda m: m['member'] == member) - if status is not None: - filters.append(lambda m: m['status'] == status) - - for f in filters: - members = filter(f, members) - return [copy.deepcopy(m) for m in members] - - -@log_call -def image_member_count(context, image_id): - """Return the number of image members for this image - - :param image_id: identifier of image entity - """ - if not image_id: - msg = _("Image id is required.") - raise exception.Invalid(msg) - - members = DATA['members'] - return len([x for x in members if x['image_id'] == image_id]) - - -@log_call -def image_member_create(context, values): - member = _image_member_format(values['image_id'], - values['member'], - values.get('can_share', False), - values.get('status', 'pending'), - values.get('deleted', False)) - global DATA - DATA['members'].append(member) - return copy.deepcopy(member) - - -@log_call -def image_member_update(context, member_id, values): - global DATA - for member in DATA['members']: - if member['id'] == member_id: - member.update(values) - member['updated_at'] = timeutils.utcnow() - return copy.deepcopy(member) - else: - raise exception.NotFound() - - -@log_call -def image_member_delete(context, member_id): - global DATA - for i, member in enumerate(DATA['members']): - if member['id'] == member_id: - del DATA['members'][i] - break - else: - raise exception.NotFound() - - -@log_call -@utils.no_4byte_params -def image_location_add(context, image_id, location): - deleted = location['status'] in ('deleted', 'pending_delete') - location_ref = _image_location_format(image_id, - value=location['url'], - meta_data=location['metadata'], - status=location['status'], - deleted=deleted) - DATA['locations'].append(location_ref) - image = DATA['images'][image_id] - image.setdefault('locations', []).append(location_ref) - - -@log_call -@utils.no_4byte_params -def image_location_update(context, image_id, location): - loc_id = location.get('id') - if loc_id is None: - msg = _("The location data has an invalid ID: %d") % loc_id - raise exception.Invalid(msg) - - deleted = location['status'] in ('deleted', 'pending_delete') - updated_time = timeutils.utcnow() - delete_time = updated_time if deleted else None - - updated = False - for loc in DATA['locations']: - if loc['id'] == loc_id and loc['image_id'] == image_id: - loc.update({"value": location['url'], - "meta_data": location['metadata'], - "status": location['status'], - "deleted": deleted, - "updated_at": updated_time, - "deleted_at": delete_time}) - updated = True - break - - if not updated: - msg = (_("No location found with ID %(loc)s from image %(img)s") % - dict(loc=loc_id, img=image_id)) - LOG.warn(msg) - raise exception.NotFound(msg) - - -@log_call -def image_location_delete(context, image_id, location_id, status, - delete_time=None): - if status not in ('deleted', 'pending_delete'): - msg = _("The status of deleted image location can only be set to " - "'pending_delete' or 'deleted'.") - raise exception.Invalid(msg) - - deleted = False - for loc in DATA['locations']: - if loc['id'] == location_id and loc['image_id'] == image_id: - deleted = True - delete_time = delete_time or timeutils.utcnow() - loc.update({"deleted": deleted, - "status": status, - "updated_at": delete_time, - "deleted_at": delete_time}) - break - - if not deleted: - msg = (_("No location found with ID %(loc)s from image %(img)s") % - dict(loc=location_id, img=image_id)) - LOG.warn(msg) - raise exception.NotFound(msg) - - -def _image_locations_set(context, image_id, locations): - # NOTE(zhiyan): 1. Remove records from DB for deleted locations - used_loc_ids = [loc['id'] for loc in locations if loc.get('id')] - image = DATA['images'][image_id] - for loc in image['locations']: - if loc['id'] not in used_loc_ids and not loc['deleted']: - image_location_delete(context, image_id, loc['id'], 'deleted') - for i, loc in enumerate(DATA['locations']): - if (loc['image_id'] == image_id and loc['id'] not in used_loc_ids and - not loc['deleted']): - del DATA['locations'][i] - - # NOTE(zhiyan): 2. Adding or update locations - for loc in locations: - if loc.get('id') is None: - image_location_add(context, image_id, loc) - else: - image_location_update(context, image_id, loc) - - -def _image_locations_delete_all(context, image_id, delete_time=None): - image = DATA['images'][image_id] - for loc in image['locations']: - if not loc['deleted']: - image_location_delete(context, image_id, loc['id'], 'deleted', - delete_time=delete_time) - - for i, loc in enumerate(DATA['locations']): - if image_id == loc['image_id'] and loc['deleted'] == False: - del DATA['locations'][i] - - -def _normalize_locations(context, image, force_show_deleted=False): - """ - Generate suitable dictionary list for locations field of image. - - We don't need to set other data fields of location record which return - from image query. - """ - - if image['status'] == 'deactivated' and not context.is_admin: - # Locations are not returned for a deactivated image for non-admin user - image['locations'] = [] - return image - - if force_show_deleted: - locations = image['locations'] - else: - locations = [x for x in image['locations'] if not x['deleted']] - image['locations'] = [{'id': loc['id'], - 'url': loc['url'], - 'metadata': loc['metadata'], - 'status': loc['status']} - for loc in locations] - return image - - -@log_call -def image_create(context, image_values, v1_mode=False): - global DATA - image_id = image_values.get('id', str(uuid.uuid4())) - - if image_id in DATA['images']: - raise exception.Duplicate() - - if 'status' not in image_values: - raise exception.Invalid('status is a required attribute') - - allowed_keys = set(['id', 'name', 'status', 'min_ram', 'min_disk', 'size', - 'virtual_size', 'checksum', 'locations', 'owner', - 'protected', 'is_public', 'container_format', - 'disk_format', 'created_at', 'updated_at', 'deleted', - 'deleted_at', 'properties', 'tags', 'visibility']) - - incorrect_keys = set(image_values.keys()) - allowed_keys - if incorrect_keys: - raise exception.Invalid( - 'The keys %s are not valid' % str(incorrect_keys)) - - image = _image_format(image_id, **image_values) - DATA['images'][image_id] = image - DATA['tags'][image_id] = image.pop('tags', []) - - image = _normalize_locations(context, copy.deepcopy(image)) - if v1_mode: - image = db_utils.mutate_image_dict_to_v1(image) - return image - - -@log_call -def image_update(context, image_id, image_values, purge_props=False, - from_state=None, v1_mode=False): - global DATA - try: - image = DATA['images'][image_id] - except KeyError: - raise exception.ImageNotFound() - - location_data = image_values.pop('locations', None) - if location_data is not None: - _image_locations_set(context, image_id, location_data) - - # replace values for properties that already exist - new_properties = image_values.pop('properties', {}) - for prop in image['properties']: - if prop['name'] in new_properties: - prop['value'] = new_properties.pop(prop['name']) - elif purge_props: - # this matches weirdness in the sqlalchemy api - prop['deleted'] = True - - image['updated_at'] = timeutils.utcnow() - _image_update(image, image_values, new_properties) - DATA['images'][image_id] = image - - image = _normalize_locations(context, copy.deepcopy(image)) - if v1_mode: - image = db_utils.mutate_image_dict_to_v1(image) - return image - - -@log_call -def image_destroy(context, image_id): - global DATA - try: - delete_time = timeutils.utcnow() - DATA['images'][image_id]['deleted'] = True - DATA['images'][image_id]['deleted_at'] = delete_time - - # NOTE(flaper87): Move the image to one of the deleted statuses - # if it hasn't been done yet. - if (DATA['images'][image_id]['status'] not in - ['deleted', 'pending_delete']): - DATA['images'][image_id]['status'] = 'deleted' - - _image_locations_delete_all(context, image_id, - delete_time=delete_time) - - for prop in DATA['images'][image_id]['properties']: - image_property_delete(context, prop['name'], image_id) - - members = image_member_find(context, image_id=image_id) - for member in members: - image_member_delete(context, member['id']) - - tags = image_tag_get_all(context, image_id) - for tag in tags: - image_tag_delete(context, image_id, tag) - - return _normalize_locations(context, - copy.deepcopy(DATA['images'][image_id])) - except KeyError: - raise exception.ImageNotFound() - - -@log_call -def image_tag_get_all(context, image_id): - return DATA['tags'].get(image_id, []) - - -@log_call -def image_tag_get(context, image_id, value): - tags = image_tag_get_all(context, image_id) - if value in tags: - return value - else: - raise exception.NotFound() - - -@log_call -def image_tag_set_all(context, image_id, values): - global DATA - DATA['tags'][image_id] = list(values) - - -@log_call -@utils.no_4byte_params -def image_tag_create(context, image_id, value): - global DATA - DATA['tags'][image_id].append(value) - return value - - -@log_call -def image_tag_delete(context, image_id, value): - global DATA - try: - DATA['tags'][image_id].remove(value) - except ValueError: - raise exception.NotFound() - - -def is_image_visible(context, image, status=None): - if status == 'all': - status = None - return db_utils.is_image_visible(context, image, image_member_find, status) - - -def user_get_storage_usage(context, owner_id, image_id=None, session=None): - images = image_get_all(context, filters={'owner': owner_id}) - total = 0 - for image in images: - if image['status'] in ['killed', 'deleted']: - continue - - if image['id'] != image_id: - locations = [loc for loc in image['locations'] - if loc.get('status') != 'deleted'] - total += (image['size'] * len(locations)) - return total - - -@log_call -def task_create(context, values): - """Create a task object""" - global DATA - - task_values = copy.deepcopy(values) - task_id = task_values.get('id', str(uuid.uuid4())) - required_attributes = ['type', 'status', 'input'] - allowed_attributes = ['id', 'type', 'status', 'input', 'result', 'owner', - 'message', 'expires_at', 'created_at', - 'updated_at', 'deleted_at', 'deleted'] - - if task_id in DATA['tasks']: - raise exception.Duplicate() - - for key in required_attributes: - if key not in task_values: - raise exception.Invalid('%s is a required attribute' % key) - - incorrect_keys = set(task_values.keys()) - set(allowed_attributes) - if incorrect_keys: - raise exception.Invalid( - 'The keys %s are not valid' % str(incorrect_keys)) - - task_info_values = _pop_task_info_values(task_values) - task = _task_format(task_id, **task_values) - DATA['tasks'][task_id] = task - task_info = _task_info_create(task['id'], task_info_values) - - return _format_task_from_db(task, task_info) - - -@log_call -def task_update(context, task_id, values): - """Update a task object""" - global DATA - task_values = copy.deepcopy(values) - task_info_values = _pop_task_info_values(task_values) - try: - task = DATA['tasks'][task_id] - except KeyError: - LOG.debug("No task found with ID %s", task_id) - raise exception.TaskNotFound(task_id=task_id) - - task.update(task_values) - task['updated_at'] = timeutils.utcnow() - DATA['tasks'][task_id] = task - task_info = _task_info_update(task['id'], task_info_values) - - return _format_task_from_db(task, task_info) - - -@log_call -def task_get(context, task_id, force_show_deleted=False): - task, task_info = _task_get(context, task_id, force_show_deleted) - return _format_task_from_db(task, task_info) - - -def _task_get(context, task_id, force_show_deleted=False): - try: - task = DATA['tasks'][task_id] - except KeyError: - msg = _LW('Could not find task %s') % task_id - LOG.warn(msg) - raise exception.TaskNotFound(task_id=task_id) - - if task['deleted'] and not (force_show_deleted or context.can_see_deleted): - msg = _LW('Unable to get deleted task %s') % task_id - LOG.warn(msg) - raise exception.TaskNotFound(task_id=task_id) - - if not _is_task_visible(context, task): - LOG.debug("Forbidding request, task %s is not visible", task_id) - msg = _("Forbidding request, task %s is not visible") % task_id - raise exception.Forbidden(msg) - - task_info = _task_info_get(task_id) - - return task, task_info - - -@log_call -def task_delete(context, task_id): - global DATA - try: - DATA['tasks'][task_id]['deleted'] = True - DATA['tasks'][task_id]['deleted_at'] = timeutils.utcnow() - DATA['tasks'][task_id]['updated_at'] = timeutils.utcnow() - return copy.deepcopy(DATA['tasks'][task_id]) - except KeyError: - LOG.debug("No task found with ID %s", task_id) - raise exception.TaskNotFound(task_id=task_id) - - -def _task_soft_delete(context): - """Scrub task entities which are expired """ - global DATA - now = timeutils.utcnow() - tasks = DATA['tasks'].values() - - for task in tasks: - if(task['owner'] == context.owner and task['deleted'] == False - and task['expires_at'] <= now): - - task['deleted'] = True - task['deleted_at'] = timeutils.utcnow() - - -@log_call -def task_get_all(context, filters=None, marker=None, limit=None, - sort_key='created_at', sort_dir='desc'): - """ - Get all tasks that match zero or more filters. - - :param filters: dict of filter keys and values. - :param marker: task id after which to start page - :param limit: maximum number of tasks to return - :param sort_key: task attribute by which results should be sorted - :param sort_dir: direction in which results should be sorted (asc, desc) - :returns: tasks set - """ - _task_soft_delete(context) - filters = filters or {} - tasks = DATA['tasks'].values() - tasks = _filter_tasks(tasks, filters, context) - tasks = _sort_tasks(tasks, sort_key, sort_dir) - tasks = _paginate_tasks(context, tasks, marker, limit, - filters.get('deleted')) - - filtered_tasks = [] - for task in tasks: - filtered_tasks.append(_format_task_from_db(task, task_info_ref=None)) - - return filtered_tasks - - -def _is_task_visible(context, task): - """Return True if the task is visible in this context.""" - # Is admin == task visible - if context.is_admin: - return True - - # No owner == task visible - if task['owner'] is None: - return True - - # Perform tests based on whether we have an owner - if context.owner is not None: - if context.owner == task['owner']: - return True - - return False - - -def _filter_tasks(tasks, filters, context, admin_as_user=False): - filtered_tasks = [] - - for task in tasks: - has_ownership = context.owner and task['owner'] == context.owner - can_see = (has_ownership or (context.is_admin and not admin_as_user)) - if not can_see: - continue - - add = True - for k, value in six.iteritems(filters): - add = task[k] == value and task['deleted'] is False - if not add: - break - - if add: - filtered_tasks.append(task) - - return filtered_tasks - - -def _sort_tasks(tasks, sort_key, sort_dir): - reverse = False - if tasks and not (sort_key in tasks[0]): - raise exception.InvalidSortKey() - keyfn = lambda x: (x[sort_key] if x[sort_key] is not None else '', - x['created_at'], x['id']) - reverse = sort_dir == 'desc' - tasks.sort(key=keyfn, reverse=reverse) - return tasks - - -def _paginate_tasks(context, tasks, marker, limit, show_deleted): - start = 0 - end = -1 - if marker is None: - start = 0 - else: - # Check that the task is accessible - _task_get(context, marker, force_show_deleted=show_deleted) - - for i, task in enumerate(tasks): - if task['id'] == marker: - start = i + 1 - break - else: - if task: - raise exception.TaskNotFound(task_id=task['id']) - else: - msg = _("Task does not exist") - raise exception.NotFound(message=msg) - - end = start + limit if limit is not None else None - return tasks[start:end] - - -def _task_info_create(task_id, values): - """Create a Task Info for Task with given task ID""" - global DATA - task_info = _task_info_format(task_id, **values) - DATA['task_info'][task_id] = task_info - - return task_info - - -def _task_info_update(task_id, values): - """Update Task Info for Task with given task ID and updated values""" - global DATA - try: - task_info = DATA['task_info'][task_id] - except KeyError: - LOG.debug("No task info found with task id %s", task_id) - raise exception.TaskNotFound(task_id=task_id) - - task_info.update(values) - DATA['task_info'][task_id] = task_info - - return task_info - - -def _task_info_get(task_id): - """Get Task Info for Task with given task ID""" - global DATA - try: - task_info = DATA['task_info'][task_id] - except KeyError: - msg = _LW('Could not find task info %s') % task_id - LOG.warn(msg) - raise exception.TaskNotFound(task_id=task_id) - - return task_info - - -def _metadef_delete_namespace_content(get_func, key, context, namespace_name): - global DATA - metadefs = get_func(context, namespace_name) - data = DATA[key] - for metadef in metadefs: - data.remove(metadef) - return metadefs - - -@log_call -@utils.no_4byte_params -def metadef_namespace_create(context, values): - """Create a namespace object""" - global DATA - - namespace_values = copy.deepcopy(values) - namespace_name = namespace_values.get('namespace') - required_attributes = ['namespace', 'owner'] - allowed_attributes = ['namespace', 'owner', 'display_name', 'description', - 'visibility', 'protected'] - - for namespace in DATA['metadef_namespaces']: - if namespace['namespace'] == namespace_name: - LOG.debug("Can not create the metadata definition namespace. " - "Namespace=%s already exists.", namespace_name) - raise exception.MetadefDuplicateNamespace( - namespace_name=namespace_name) - - for key in required_attributes: - if key not in namespace_values: - raise exception.Invalid('%s is a required attribute' % key) - - incorrect_keys = set(namespace_values.keys()) - set(allowed_attributes) - if incorrect_keys: - raise exception.Invalid( - 'The keys %s are not valid' % str(incorrect_keys)) - - namespace = _format_namespace(namespace_values) - DATA['metadef_namespaces'].append(namespace) - - return namespace - - -@log_call -@utils.no_4byte_params -def metadef_namespace_update(context, namespace_id, values): - """Update a namespace object""" - global DATA - namespace_values = copy.deepcopy(values) - - namespace = metadef_namespace_get_by_id(context, namespace_id) - if namespace['namespace'] != values['namespace']: - for db_namespace in DATA['metadef_namespaces']: - if db_namespace['namespace'] == values['namespace']: - LOG.debug("Invalid update. It would result in a duplicate " - "metadata definition namespace with the same " - "name of %s", values['namespace']) - emsg = (_("Invalid update. It would result in a duplicate" - " metadata definition namespace with the same" - " name of %s") - % values['namespace']) - raise exception.MetadefDuplicateNamespace(emsg) - DATA['metadef_namespaces'].remove(namespace) - - namespace.update(namespace_values) - namespace['updated_at'] = timeutils.utcnow() - DATA['metadef_namespaces'].append(namespace) - - return namespace - - -@log_call -def metadef_namespace_get_by_id(context, namespace_id): - """Get a namespace object""" - try: - namespace = next(namespace for namespace in DATA['metadef_namespaces'] - if namespace['id'] == namespace_id) - except StopIteration: - msg = (_("Metadata definition namespace not found for id=%s") - % namespace_id) - LOG.warn(msg) - raise exception.MetadefNamespaceNotFound(msg) - - if not _is_namespace_visible(context, namespace): - LOG.debug("Forbidding request, metadata definition namespace=%s " - "is not visible.", namespace.namespace) - emsg = _("Forbidding request, metadata definition namespace=%s " - "is not visible.") % namespace.namespace - raise exception.MetadefForbidden(emsg) - - return namespace - - -@log_call -def metadef_namespace_get(context, namespace_name): - """Get a namespace object""" - try: - namespace = next(namespace for namespace in DATA['metadef_namespaces'] - if namespace['namespace'] == namespace_name) - except StopIteration: - LOG.debug("No namespace found with name %s", namespace_name) - raise exception.MetadefNamespaceNotFound( - namespace_name=namespace_name) - - _check_namespace_visibility(context, namespace, namespace_name) - - return namespace - - -@log_call -def metadef_namespace_get_all(context, - marker=None, - limit=None, - sort_key='created_at', - sort_dir='desc', - filters=None): - """Get a namespaces list""" - resource_types = filters.get('resource_types', []) if filters else [] - visibility = filters.get('visibility') if filters else None - - namespaces = [] - for namespace in DATA['metadef_namespaces']: - if not _is_namespace_visible(context, namespace): - continue - - if visibility and namespace['visibility'] != visibility: - continue - - if resource_types: - for association in DATA['metadef_namespace_resource_types']: - if association['namespace_id'] == namespace['id']: - if association['name'] in resource_types: - break - else: - continue - - namespaces.append(namespace) - - return namespaces - - -@log_call -def metadef_namespace_delete(context, namespace_name): - """Delete a namespace object""" - global DATA - - namespace = metadef_namespace_get(context, namespace_name) - DATA['metadef_namespaces'].remove(namespace) - - return namespace - - -@log_call -def metadef_namespace_delete_content(context, namespace_name): - """Delete a namespace content""" - global DATA - namespace = metadef_namespace_get(context, namespace_name) - namespace_id = namespace['id'] - - objects = [] - - for object in DATA['metadef_objects']: - if object['namespace_id'] != namespace_id: - objects.append(object) - - DATA['metadef_objects'] = objects - - properties = [] - - for property in DATA['metadef_objects']: - if property['namespace_id'] != namespace_id: - properties.append(object) - - DATA['metadef_objects'] = properties - - return namespace - - -@log_call -def metadef_object_get(context, namespace_name, object_name): - """Get a metadef object""" - namespace = metadef_namespace_get(context, namespace_name) - - _check_namespace_visibility(context, namespace, namespace_name) - - for object in DATA['metadef_objects']: - if (object['namespace_id'] == namespace['id'] and - object['name'] == object_name): - return object - else: - LOG.debug("The metadata definition object with name=%(name)s" - " was not found in namespace=%(namespace_name)s.", - {'name': object_name, 'namespace_name': namespace_name}) - raise exception.MetadefObjectNotFound(namespace_name=namespace_name, - object_name=object_name) - - -@log_call -def metadef_object_get_by_id(context, namespace_name, object_id): - """Get a metadef object""" - namespace = metadef_namespace_get(context, namespace_name) - - _check_namespace_visibility(context, namespace, namespace_name) - - for object in DATA['metadef_objects']: - if (object['namespace_id'] == namespace['id'] and - object['id'] == object_id): - return object - else: - msg = (_("Metadata definition object not found for id=%s") - % object_id) - LOG.warn(msg) - raise exception.MetadefObjectNotFound(msg) - - -@log_call -def metadef_object_get_all(context, namespace_name): - """Get a metadef objects list""" - namespace = metadef_namespace_get(context, namespace_name) - - objects = [] - - _check_namespace_visibility(context, namespace, namespace_name) - - for object in DATA['metadef_objects']: - if object['namespace_id'] == namespace['id']: - objects.append(object) - - return objects - - -@log_call -@utils.no_4byte_params -def metadef_object_create(context, namespace_name, values): - """Create a metadef object""" - global DATA - - object_values = copy.deepcopy(values) - object_name = object_values['name'] - required_attributes = ['name'] - allowed_attributes = ['name', 'description', 'json_schema', 'required'] - - namespace = metadef_namespace_get(context, namespace_name) - - for object in DATA['metadef_objects']: - if (object['name'] == object_name and - object['namespace_id'] == namespace['id']): - LOG.debug("A metadata definition object with name=%(name)s " - "in namespace=%(namespace_name)s already exists.", - {'name': object_name, 'namespace_name': namespace_name}) - raise exception.MetadefDuplicateObject( - object_name=object_name, namespace_name=namespace_name) - - for key in required_attributes: - if key not in object_values: - raise exception.Invalid('%s is a required attribute' % key) - - incorrect_keys = set(object_values.keys()) - set(allowed_attributes) - if incorrect_keys: - raise exception.Invalid( - 'The keys %s are not valid' % str(incorrect_keys)) - - object_values['namespace_id'] = namespace['id'] - - _check_namespace_visibility(context, namespace, namespace_name) - - object = _format_object(object_values) - DATA['metadef_objects'].append(object) - - return object - - -@log_call -@utils.no_4byte_params -def metadef_object_update(context, namespace_name, object_id, values): - """Update a metadef object""" - global DATA - - namespace = metadef_namespace_get(context, namespace_name) - - _check_namespace_visibility(context, namespace, namespace_name) - - object = metadef_object_get_by_id(context, namespace_name, object_id) - if object['name'] != values['name']: - for db_object in DATA['metadef_objects']: - if (db_object['name'] == values['name'] and - db_object['namespace_id'] == namespace['id']): - LOG.debug("Invalid update. It would result in a duplicate " - "metadata definition object with same name=%(name)s " - "in namespace=%(namespace_name)s.", - {'name': object['name'], - 'namespace_name': namespace_name}) - emsg = (_("Invalid update. It would result in a duplicate" - " metadata definition object with the same" - " name=%(name)s " - " in namespace=%(namespace_name)s.") - % {'name': object['name'], - 'namespace_name': namespace_name}) - raise exception.MetadefDuplicateObject(emsg) - DATA['metadef_objects'].remove(object) - - object.update(values) - object['updated_at'] = timeutils.utcnow() - DATA['metadef_objects'].append(object) - - return object - - -@log_call -def metadef_object_delete(context, namespace_name, object_name): - """Delete a metadef object""" - global DATA - - object = metadef_object_get(context, namespace_name, object_name) - DATA['metadef_objects'].remove(object) - - return object - - -def metadef_object_delete_namespace_content(context, namespace_name, - session=None): - """Delete an object or raise if namespace or object doesn't exist.""" - return _metadef_delete_namespace_content( - metadef_object_get_all, 'metadef_objects', context, namespace_name) - - -@log_call -def metadef_object_count(context, namespace_name): - """Get metadef object count in a namespace""" - namespace = metadef_namespace_get(context, namespace_name) - - _check_namespace_visibility(context, namespace, namespace_name) - - count = 0 - for object in DATA['metadef_objects']: - if object['namespace_id'] == namespace['id']: - count = count + 1 - - return count - - -@log_call -def metadef_property_count(context, namespace_name): - """Get properties count in a namespace""" - namespace = metadef_namespace_get(context, namespace_name) - - _check_namespace_visibility(context, namespace, namespace_name) - - count = 0 - for property in DATA['metadef_properties']: - if property['namespace_id'] == namespace['id']: - count = count + 1 - - return count - - -@log_call -@utils.no_4byte_params -def metadef_property_create(context, namespace_name, values): - """Create a metadef property""" - global DATA - - property_values = copy.deepcopy(values) - property_name = property_values['name'] - required_attributes = ['name'] - allowed_attributes = ['name', 'description', 'json_schema', 'required'] - - namespace = metadef_namespace_get(context, namespace_name) - - for property in DATA['metadef_properties']: - if (property['name'] == property_name and - property['namespace_id'] == namespace['id']): - LOG.debug("Can not create metadata definition property. A property" - " with name=%(name)s already exists in" - " namespace=%(namespace_name)s.", - {'name': property_name, - 'namespace_name': namespace_name}) - raise exception.MetadefDuplicateProperty( - property_name=property_name, - namespace_name=namespace_name) - - for key in required_attributes: - if key not in property_values: - raise exception.Invalid('%s is a required attribute' % key) - - incorrect_keys = set(property_values.keys()) - set(allowed_attributes) - if incorrect_keys: - raise exception.Invalid( - 'The keys %s are not valid' % str(incorrect_keys)) - - property_values['namespace_id'] = namespace['id'] - - _check_namespace_visibility(context, namespace, namespace_name) - - property = _format_property(property_values) - DATA['metadef_properties'].append(property) - - return property - - -@log_call -@utils.no_4byte_params -def metadef_property_update(context, namespace_name, property_id, values): - """Update a metadef property""" - global DATA - - namespace = metadef_namespace_get(context, namespace_name) - - _check_namespace_visibility(context, namespace, namespace_name) - - property = metadef_property_get_by_id(context, namespace_name, property_id) - if property['name'] != values['name']: - for db_property in DATA['metadef_properties']: - if (db_property['name'] == values['name'] and - db_property['namespace_id'] == namespace['id']): - LOG.debug("Invalid update. It would result in a duplicate" - " metadata definition property with the same" - " name=%(name)s" - " in namespace=%(namespace_name)s.", - {'name': property['name'], - 'namespace_name': namespace_name}) - emsg = (_("Invalid update. It would result in a duplicate" - " metadata definition property with the same" - " name=%(name)s" - " in namespace=%(namespace_name)s.") - % {'name': property['name'], - 'namespace_name': namespace_name}) - raise exception.MetadefDuplicateProperty(emsg) - DATA['metadef_properties'].remove(property) - - property.update(values) - property['updated_at'] = timeutils.utcnow() - DATA['metadef_properties'].append(property) - - return property - - -@log_call -def metadef_property_get_all(context, namespace_name): - """Get a metadef properties list""" - namespace = metadef_namespace_get(context, namespace_name) - - properties = [] - - _check_namespace_visibility(context, namespace, namespace_name) - - for property in DATA['metadef_properties']: - if property['namespace_id'] == namespace['id']: - properties.append(property) - - return properties - - -@log_call -def metadef_property_get_by_id(context, namespace_name, property_id): - """Get a metadef property""" - namespace = metadef_namespace_get(context, namespace_name) - - _check_namespace_visibility(context, namespace, namespace_name) - - for property in DATA['metadef_properties']: - if (property['namespace_id'] == namespace['id'] and - property['id'] == property_id): - return property - else: - msg = (_("Metadata definition property not found for id=%s") - % property_id) - LOG.warn(msg) - raise exception.MetadefPropertyNotFound(msg) - - -@log_call -def metadef_property_get(context, namespace_name, property_name): - """Get a metadef property""" - namespace = metadef_namespace_get(context, namespace_name) - - _check_namespace_visibility(context, namespace, namespace_name) - - for property in DATA['metadef_properties']: - if (property['namespace_id'] == namespace['id'] and - property['name'] == property_name): - return property - else: - LOG.debug("No property found with name=%(name)s in" - " namespace=%(namespace_name)s ", - {'name': property_name, 'namespace_name': namespace_name}) - raise exception.MetadefPropertyNotFound(namespace_name=namespace_name, - property_name=property_name) - - -@log_call -def metadef_property_delete(context, namespace_name, property_name): - """Delete a metadef property""" - global DATA - - property = metadef_property_get(context, namespace_name, property_name) - DATA['metadef_properties'].remove(property) - - return property - - -def metadef_property_delete_namespace_content(context, namespace_name, - session=None): - """Delete a property or raise if it or namespace doesn't exist.""" - return _metadef_delete_namespace_content( - metadef_property_get_all, 'metadef_properties', context, - namespace_name) - - -@log_call -def metadef_resource_type_create(context, values): - """Create a metadef resource type""" - global DATA - - resource_type_values = copy.deepcopy(values) - resource_type_name = resource_type_values['name'] - - allowed_attrubites = ['name', 'protected'] - - for resource_type in DATA['metadef_resource_types']: - if resource_type['name'] == resource_type_name: - raise exception.Duplicate() - - incorrect_keys = set(resource_type_values.keys()) - set(allowed_attrubites) - if incorrect_keys: - raise exception.Invalid( - 'The keys %s are not valid' % str(incorrect_keys)) - - resource_type = _format_resource_type(resource_type_values) - DATA['metadef_resource_types'].append(resource_type) - - return resource_type - - -@log_call -def metadef_resource_type_get_all(context): - """List all resource types""" - return DATA['metadef_resource_types'] - - -@log_call -def metadef_resource_type_get(context, resource_type_name): - """Get a resource type""" - try: - resource_type = next(resource_type for resource_type in - DATA['metadef_resource_types'] - if resource_type['name'] == - resource_type_name) - except StopIteration: - LOG.debug("No resource type found with name %s", resource_type_name) - raise exception.MetadefResourceTypeNotFound( - resource_type_name=resource_type_name) - - return resource_type - - -@log_call -def metadef_resource_type_association_create(context, namespace_name, - values): - global DATA - - association_values = copy.deepcopy(values) - - namespace = metadef_namespace_get(context, namespace_name) - resource_type_name = association_values['name'] - resource_type = metadef_resource_type_get(context, - resource_type_name) - - required_attributes = ['name', 'properties_target', 'prefix'] - allowed_attributes = copy.deepcopy(required_attributes) - - for association in DATA['metadef_namespace_resource_types']: - if (association['namespace_id'] == namespace['id'] and - association['resource_type'] == resource_type['id']): - LOG.debug("The metadata definition resource-type association of" - " resource_type=%(resource_type_name)s to" - " namespace=%(namespace_name)s, already exists.", - {'resource_type_name': resource_type_name, - 'namespace_name': namespace_name}) - raise exception.MetadefDuplicateResourceTypeAssociation( - resource_type_name=resource_type_name, - namespace_name=namespace_name) - - for key in required_attributes: - if key not in association_values: - raise exception.Invalid('%s is a required attribute' % key) - - incorrect_keys = set(association_values.keys()) - set(allowed_attributes) - if incorrect_keys: - raise exception.Invalid( - 'The keys %s are not valid' % str(incorrect_keys)) - - association = _format_association(namespace, resource_type, - association_values) - DATA['metadef_namespace_resource_types'].append(association) - - return association - - -@log_call -def metadef_resource_type_association_get(context, namespace_name, - resource_type_name): - namespace = metadef_namespace_get(context, namespace_name) - resource_type = metadef_resource_type_get(context, resource_type_name) - - for association in DATA['metadef_namespace_resource_types']: - if (association['namespace_id'] == namespace['id'] and - association['resource_type'] == resource_type['id']): - return association - else: - LOG.debug("No resource type association found associated with " - "namespace %s and resource type %s", namespace_name, - resource_type_name) - raise exception.MetadefResourceTypeAssociationNotFound( - resource_type_name=resource_type_name, - namespace_name=namespace_name) - - -@log_call -def metadef_resource_type_association_get_all_by_namespace(context, - namespace_name): - namespace = metadef_namespace_get(context, namespace_name) - - namespace_resource_types = [] - for resource_type in DATA['metadef_namespace_resource_types']: - if resource_type['namespace_id'] == namespace['id']: - namespace_resource_types.append(resource_type) - - return namespace_resource_types - - -@log_call -def metadef_resource_type_association_delete(context, namespace_name, - resource_type_name): - global DATA - - resource_type = metadef_resource_type_association_get(context, - namespace_name, - resource_type_name) - DATA['metadef_namespace_resource_types'].remove(resource_type) - - return resource_type - - -@log_call -def metadef_tag_get(context, namespace_name, name): - """Get a metadef tag""" - namespace = metadef_namespace_get(context, namespace_name) - _check_namespace_visibility(context, namespace, namespace_name) - - for tag in DATA['metadef_tags']: - if tag['namespace_id'] == namespace['id'] and tag['name'] == name: - return tag - else: - LOG.debug("The metadata definition tag with name=%(name)s" - " was not found in namespace=%(namespace_name)s.", - {'name': name, 'namespace_name': namespace_name}) - raise exception.MetadefTagNotFound(name=name, - namespace_name=namespace_name) - - -@log_call -def metadef_tag_get_by_id(context, namespace_name, id): - """Get a metadef tag""" - namespace = metadef_namespace_get(context, namespace_name) - _check_namespace_visibility(context, namespace, namespace_name) - - for tag in DATA['metadef_tags']: - if tag['namespace_id'] == namespace['id'] and tag['id'] == id: - return tag - else: - msg = (_("Metadata definition tag not found for id=%s") % id) - LOG.warn(msg) - raise exception.MetadefTagNotFound(msg) - - -@log_call -def metadef_tag_get_all(context, namespace_name, filters=None, marker=None, - limit=None, sort_key='created_at', sort_dir=None, - session=None): - """Get a metadef tags list""" - - namespace = metadef_namespace_get(context, namespace_name) - _check_namespace_visibility(context, namespace, namespace_name) - - tags = [] - for tag in DATA['metadef_tags']: - if tag['namespace_id'] == namespace['id']: - tags.append(tag) - - return tags - - -@log_call -@utils.no_4byte_params -def metadef_tag_create(context, namespace_name, values): - """Create a metadef tag""" - global DATA - - tag_values = copy.deepcopy(values) - tag_name = tag_values['name'] - required_attributes = ['name'] - allowed_attributes = ['name'] - - namespace = metadef_namespace_get(context, namespace_name) - - for tag in DATA['metadef_tags']: - if tag['name'] == tag_name and tag['namespace_id'] == namespace['id']: - LOG.debug("A metadata definition tag with name=%(name)s" - " in namespace=%(namespace_name)s already exists.", - {'name': tag_name, 'namespace_name': namespace_name}) - raise exception.MetadefDuplicateTag( - name=tag_name, namespace_name=namespace_name) - - for key in required_attributes: - if key not in tag_values: - raise exception.Invalid('%s is a required attribute' % key) - - incorrect_keys = set(tag_values.keys()) - set(allowed_attributes) - if incorrect_keys: - raise exception.Invalid( - 'The keys %s are not valid' % str(incorrect_keys)) - - tag_values['namespace_id'] = namespace['id'] - - _check_namespace_visibility(context, namespace, namespace_name) - - tag = _format_tag(tag_values) - DATA['metadef_tags'].append(tag) - return tag - - -@log_call -def metadef_tag_create_tags(context, namespace_name, tag_list): - """Create a metadef tag""" - global DATA - - namespace = metadef_namespace_get(context, namespace_name) - _check_namespace_visibility(context, namespace, namespace_name) - - required_attributes = ['name'] - allowed_attributes = ['name'] - data_tag_list = [] - tag_name_list = [] - for tag_value in tag_list: - tag_values = copy.deepcopy(tag_value) - tag_name = tag_values['name'] - - for key in required_attributes: - if key not in tag_values: - raise exception.Invalid('%s is a required attribute' % key) - - incorrect_keys = set(tag_values.keys()) - set(allowed_attributes) - if incorrect_keys: - raise exception.Invalid( - 'The keys %s are not valid' % str(incorrect_keys)) - - if tag_name in tag_name_list: - LOG.debug("A metadata definition tag with name=%(name)s" - " in namespace=%(namespace_name)s already exists.", - {'name': tag_name, 'namespace_name': namespace_name}) - raise exception.MetadefDuplicateTag( - name=tag_name, namespace_name=namespace_name) - else: - tag_name_list.append(tag_name) - - tag_values['namespace_id'] = namespace['id'] - data_tag_list.append(_format_tag(tag_values)) - - DATA['metadef_tags'] = [] - for tag in data_tag_list: - DATA['metadef_tags'].append(tag) - - return data_tag_list - - -@log_call -@utils.no_4byte_params -def metadef_tag_update(context, namespace_name, id, values): - """Update a metadef tag""" - global DATA - - namespace = metadef_namespace_get(context, namespace_name) - - _check_namespace_visibility(context, namespace, namespace_name) - - tag = metadef_tag_get_by_id(context, namespace_name, id) - if tag['name'] != values['name']: - for db_tag in DATA['metadef_tags']: - if (db_tag['name'] == values['name'] and - db_tag['namespace_id'] == namespace['id']): - LOG.debug("Invalid update. It would result in a duplicate" - " metadata definition tag with same name=%(name)s " - " in namespace=%(namespace_name)s.", - {'name': tag['name'], - 'namespace_name': namespace_name}) - raise exception.MetadefDuplicateTag( - name=tag['name'], namespace_name=namespace_name) - - DATA['metadef_tags'].remove(tag) - - tag.update(values) - tag['updated_at'] = timeutils.utcnow() - DATA['metadef_tags'].append(tag) - return tag - - -@log_call -def metadef_tag_delete(context, namespace_name, name): - """Delete a metadef tag""" - global DATA - - tags = metadef_tag_get(context, namespace_name, name) - DATA['metadef_tags'].remove(tags) - - return tags - - -def metadef_tag_delete_namespace_content(context, namespace_name, - session=None): - """Delete an tag or raise if namespace or tag doesn't exist.""" - return _metadef_delete_namespace_content( - metadef_tag_get_all, 'metadef_tags', context, namespace_name) - - -@log_call -def metadef_tag_count(context, namespace_name): - """Get metadef tag count in a namespace""" - namespace = metadef_namespace_get(context, namespace_name) - - _check_namespace_visibility(context, namespace, namespace_name) - - count = 0 - for tag in DATA['metadef_tags']: - if tag['namespace_id'] == namespace['id']: - count = count + 1 - - return count - - -def _format_association(namespace, resource_type, association_values): - association = { - 'namespace_id': namespace['id'], - 'resource_type': resource_type['id'], - 'properties_target': None, - 'prefix': None, - 'created_at': timeutils.utcnow(), - 'updated_at': timeutils.utcnow() - - } - association.update(association_values) - return association - - -def _format_resource_type(values): - dt = timeutils.utcnow() - resource_type = { - 'id': _get_metadef_id(), - 'name': values['name'], - 'protected': True, - 'created_at': dt, - 'updated_at': dt - } - resource_type.update(values) - return resource_type - - -def _format_property(values): - property = { - 'id': _get_metadef_id(), - 'namespace_id': None, - 'name': None, - 'json_schema': None - } - property.update(values) - return property - - -def _format_namespace(values): - dt = timeutils.utcnow() - namespace = { - 'id': _get_metadef_id(), - 'namespace': None, - 'display_name': None, - 'description': None, - 'visibility': 'private', - 'protected': False, - 'owner': None, - 'created_at': dt, - 'updated_at': dt - } - namespace.update(values) - return namespace - - -def _format_object(values): - dt = timeutils.utcnow() - object = { - 'id': _get_metadef_id(), - 'namespace_id': None, - 'name': None, - 'description': None, - 'json_schema': None, - 'required': None, - 'created_at': dt, - 'updated_at': dt - } - object.update(values) - return object - - -def _format_tag(values): - dt = timeutils.utcnow() - tag = { - 'id': _get_metadef_id(), - 'namespace_id': None, - 'name': None, - 'created_at': dt, - 'updated_at': dt - } - tag.update(values) - return tag - - -def _is_namespace_visible(context, namespace): - """Return true if namespace is visible in this context""" - if context.is_admin: - return True - - if namespace.get('visibility', '') == 'public': - return True - - if namespace['owner'] is None: - return True - - if context.owner is not None: - if context.owner == namespace['owner']: - return True - - return False - - -def _check_namespace_visibility(context, namespace, namespace_name): - if not _is_namespace_visible(context, namespace): - LOG.debug("Forbidding request, metadata definition namespace=%s " - "is not visible.", namespace_name) - emsg = _("Forbidding request, metadata definition namespace=%s" - " is not visible.") % namespace_name - raise exception.MetadefForbidden(emsg) - - -def _get_metadef_id(): - global INDEX - INDEX += 1 - return INDEX diff --git a/glance/db/sqlalchemy/__init__.py b/glance/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/db/sqlalchemy/alembic_migrations/README b/glance/db/sqlalchemy/alembic_migrations/README deleted file mode 100644 index 2500aa1b..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/README +++ /dev/null @@ -1 +0,0 @@ -Generic single-database configuration. diff --git a/glance/db/sqlalchemy/alembic_migrations/__init__.py b/glance/db/sqlalchemy/alembic_migrations/__init__.py deleted file mode 100644 index 40392629..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/__init__.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2016 Rackspace -# Copyright 2013 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys - -from alembic import command as alembic_command -from alembic import config as alembic_config -from alembic import migration as alembic_migration -from alembic import script as alembic_script -from oslo_db import exception as db_exception -from oslo_db.sqlalchemy import migration as sqla_migration - -from glance.db import migration as db_migration -from glance.db.sqlalchemy import api as db_api -from glance.i18n import _ - - -def get_alembic_config(engine=None): - """Return a valid alembic config object""" - ini_path = os.path.join(os.path.dirname(__file__), 'alembic.ini') - config = alembic_config.Config(os.path.abspath(ini_path)) - if engine is None: - engine = db_api.get_engine() - config.set_main_option('sqlalchemy.url', str(engine.url)) - return config - - -def get_current_alembic_heads(): - """Return current heads (if any) from the alembic migration table""" - engine = db_api.get_engine() - with engine.connect() as conn: - context = alembic_migration.MigrationContext.configure(conn) - heads = context.get_current_heads() - return heads - - -def get_current_legacy_head(): - try: - legacy_head = sqla_migration.db_version(db_api.get_engine(), - db_migration.MIGRATE_REPO_PATH, - db_migration.INIT_VERSION) - except db_exception.DbMigrationError: - legacy_head = None - return legacy_head - - -def is_database_under_alembic_control(): - if get_current_alembic_heads(): - return True - return False - - -def is_database_under_migrate_control(): - if get_current_legacy_head(): - return True - return False - - -def place_database_under_alembic_control(): - a_config = get_alembic_config() - - if not is_database_under_migrate_control(): - return - - if not is_database_under_alembic_control(): - print(_("Database is currently not under Alembic's migration " - "control.")) - head = get_current_legacy_head() - if head == 42: - alembic_version = 'liberty' - elif head == 43: - alembic_version = 'mitaka01' - elif head == 44: - alembic_version = 'mitaka02' - elif head == 45: - alembic_version = 'ocata01' - elif head in range(1, 42): - print("Legacy head: ", head) - sys.exit(_("The current database version is not supported any " - "more. Please upgrade to Liberty release first.")) - else: - sys.exit(_("Unable to place database under Alembic's migration " - "control. Unknown database state, can't proceed " - "further.")) - - print(_("Placing database under Alembic's migration control at " - "revision:"), alembic_version) - alembic_command.stamp(a_config, alembic_version) - - -def get_alembic_branch_head(branch): - """Return head revision name for particular branch""" - a_config = get_alembic_config() - script = alembic_script.ScriptDirectory.from_config(a_config) - return script.revision_map.get_current_head(branch) diff --git a/glance/db/sqlalchemy/alembic_migrations/add_artifacts_tables.py b/glance/db/sqlalchemy/alembic_migrations/add_artifacts_tables.py deleted file mode 100644 index 6d965f6a..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/add_artifacts_tables.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright 2016 Rackspace -# Copyright 2013 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from alembic import op -from sqlalchemy.schema import ( - Column, PrimaryKeyConstraint, ForeignKeyConstraint) - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, Integer, BigInteger, String, Text, Numeric) # noqa - - -def _add_artifacts_table(): - op.create_table('artifacts', - Column('id', String(length=36), nullable=False), - Column('name', String(length=255), nullable=False), - Column('type_name', String(length=255), nullable=False), - Column('type_version_prefix', - BigInteger(), - nullable=False), - Column('type_version_suffix', - String(length=255), - nullable=True), - Column('type_version_meta', - String(length=255), - nullable=True), - Column('version_prefix', BigInteger(), nullable=False), - Column('version_suffix', - String(length=255), - nullable=True), - Column('version_meta', String(length=255), nullable=True), - Column('description', Text(), nullable=True), - Column('visibility', String(length=32), nullable=False), - Column('state', String(length=32), nullable=False), - Column('owner', String(length=255), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime(), nullable=True), - Column('published_at', DateTime(), nullable=True), - PrimaryKeyConstraint('id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_artifact_name_and_version', - 'artifacts', - ['name', 'version_prefix', 'version_suffix'], - unique=False) - op.create_index('ix_artifact_owner', 'artifacts', ['owner'], unique=False) - op.create_index('ix_artifact_state', 'artifacts', ['state'], unique=False) - op.create_index('ix_artifact_type', - 'artifacts', - ['type_name', - 'type_version_prefix', - 'type_version_suffix'], - unique=False) - op.create_index('ix_artifact_visibility', - 'artifacts', - ['visibility'], - unique=False) - - -def _add_artifact_blobs_table(): - op.create_table('artifact_blobs', - Column('id', String(length=36), nullable=False), - Column('artifact_id', String(length=36), nullable=False), - Column('size', BigInteger(), nullable=False), - Column('checksum', String(length=32), nullable=True), - Column('name', String(length=255), nullable=False), - Column('item_key', String(length=329), nullable=True), - Column('position', Integer(), nullable=True), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - ForeignKeyConstraint(['artifact_id'], ['artifacts.id'], ), - PrimaryKeyConstraint('id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_artifact_blobs_artifact_id', - 'artifact_blobs', - ['artifact_id'], - unique=False) - op.create_index('ix_artifact_blobs_name', - 'artifact_blobs', - ['name'], - unique=False) - - -def _add_artifact_dependencies_table(): - op.create_table('artifact_dependencies', - Column('id', String(length=36), nullable=False), - Column('artifact_source', - String(length=36), - nullable=False), - Column('artifact_dest', String(length=36), nullable=False), - Column('artifact_origin', - String(length=36), - nullable=False), - Column('is_direct', Boolean(), nullable=False), - Column('position', Integer(), nullable=True), - Column('name', String(length=36), nullable=True), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - ForeignKeyConstraint(['artifact_dest'], - ['artifacts.id'], ), - ForeignKeyConstraint(['artifact_origin'], - ['artifacts.id'], ), - ForeignKeyConstraint(['artifact_source'], - ['artifacts.id'], ), - PrimaryKeyConstraint('id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_artifact_dependencies_dest_id', - 'artifact_dependencies', - ['artifact_dest'], - unique=False) - op.create_index('ix_artifact_dependencies_direct_dependencies', - 'artifact_dependencies', - ['artifact_source', 'is_direct'], - unique=False) - op.create_index('ix_artifact_dependencies_origin_id', - 'artifact_dependencies', - ['artifact_origin'], - unique=False) - op.create_index('ix_artifact_dependencies_source_id', - 'artifact_dependencies', - ['artifact_source'], - unique=False) - - -def _add_artifact_properties_table(): - op.create_table('artifact_properties', - Column('id', String(length=36), nullable=False), - Column('artifact_id', String(length=36), nullable=False), - Column('name', String(length=255), nullable=False), - Column('string_value', String(length=255), nullable=True), - Column('int_value', Integer(), nullable=True), - Column('numeric_value', Numeric(), nullable=True), - Column('bool_value', Boolean(), nullable=True), - Column('text_value', Text(), nullable=True), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('position', Integer(), nullable=True), - ForeignKeyConstraint(['artifact_id'], ['artifacts.id'], ), - PrimaryKeyConstraint('id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_artifact_properties_artifact_id', - 'artifact_properties', - ['artifact_id'], - unique=False) - op.create_index('ix_artifact_properties_name', - 'artifact_properties', - ['name'], - unique=False) - - -def _add_artifact_tags_table(): - op.create_table('artifact_tags', - Column('id', String(length=36), nullable=False), - Column('artifact_id', String(length=36), nullable=False), - Column('value', String(length=255), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - ForeignKeyConstraint(['artifact_id'], ['artifacts.id'], ), - PrimaryKeyConstraint('id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_artifact_tags_artifact_id', - 'artifact_tags', - ['artifact_id'], - unique=False) - op.create_index('ix_artifact_tags_artifact_id_tag_value', - 'artifact_tags', - ['artifact_id', 'value'], - unique=False) - - -def _add_artifact_blob_locations_table(): - op.create_table('artifact_blob_locations', - Column('id', String(length=36), nullable=False), - Column('blob_id', String(length=36), nullable=False), - Column('value', Text(), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('position', Integer(), nullable=True), - Column('status', String(length=36), nullable=True), - ForeignKeyConstraint(['blob_id'], ['artifact_blobs.id'], ), - PrimaryKeyConstraint('id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_artifact_blob_locations_blob_id', - 'artifact_blob_locations', - ['blob_id'], - unique=False) - - -def upgrade(): - _add_artifacts_table() - _add_artifact_blobs_table() - _add_artifact_dependencies_table() - _add_artifact_properties_table() - _add_artifact_tags_table() - _add_artifact_blob_locations_table() diff --git a/glance/db/sqlalchemy/alembic_migrations/add_images_tables.py b/glance/db/sqlalchemy/alembic_migrations/add_images_tables.py deleted file mode 100644 index 399c77e4..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/add_images_tables.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright 2016 Rackspace -# Copyright 2013 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from alembic import op -from sqlalchemy import sql -from sqlalchemy.schema import ( - Column, PrimaryKeyConstraint, ForeignKeyConstraint, UniqueConstraint) - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, Integer, BigInteger, String, Text) # noqa -from glance.db.sqlalchemy.models import JSONEncodedDict - - -def _add_images_table(): - op.create_table('images', - Column('id', String(length=36), nullable=False), - Column('name', String(length=255), nullable=True), - Column('size', BigInteger(), nullable=True), - Column('status', String(length=30), nullable=False), - Column('is_public', Boolean(), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=True), - Column('deleted_at', DateTime(), nullable=True), - Column('deleted', Boolean(), nullable=False), - Column('disk_format', String(length=20), nullable=True), - Column('container_format', - String(length=20), - nullable=True), - Column('checksum', String(length=32), nullable=True), - Column('owner', String(length=255), nullable=True), - Column('min_disk', Integer(), nullable=False), - Column('min_ram', Integer(), nullable=False), - Column('protected', - Boolean(), - server_default=sql.false(), - nullable=False), - Column('virtual_size', BigInteger(), nullable=True), - PrimaryKeyConstraint('id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('checksum_image_idx', - 'images', - ['checksum'], - unique=False) - op.create_index('ix_images_deleted', - 'images', - ['deleted'], - unique=False) - op.create_index('ix_images_is_public', - 'images', - ['is_public'], - unique=False) - op.create_index('owner_image_idx', - 'images', - ['owner'], - unique=False) - - -def _add_image_properties_table(): - op.create_table('image_properties', - Column('id', Integer(), nullable=False), - Column('image_id', String(length=36), nullable=False), - Column('name', String(length=255), nullable=False), - Column('value', Text(), nullable=True), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=True), - Column('deleted_at', DateTime(), nullable=True), - Column('deleted', Boolean(), nullable=False), - PrimaryKeyConstraint('id'), - ForeignKeyConstraint(['image_id'], ['images.id'], ), - UniqueConstraint('image_id', - 'name', - name='ix_image_properties_image_id_name'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_image_properties_deleted', - 'image_properties', - ['deleted'], - unique=False) - op.create_index('ix_image_properties_image_id', - 'image_properties', - ['image_id'], - unique=False) - - -def _add_image_locations_table(): - op.create_table('image_locations', - Column('id', Integer(), nullable=False), - Column('image_id', String(length=36), nullable=False), - Column('value', Text(), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=True), - Column('deleted_at', DateTime(), nullable=True), - Column('deleted', Boolean(), nullable=False), - Column('meta_data', JSONEncodedDict(), nullable=True), - Column('status', - String(length=30), - server_default='active', - nullable=False), - PrimaryKeyConstraint('id'), - ForeignKeyConstraint(['image_id'], ['images.id'], ), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_image_locations_deleted', - 'image_locations', - ['deleted'], - unique=False) - op.create_index('ix_image_locations_image_id', - 'image_locations', - ['image_id'], - unique=False) - - -def _add_image_members_table(): - deleted_member_constraint = 'image_members_image_id_member_deleted_at_key' - op.create_table('image_members', - Column('id', Integer(), nullable=False), - Column('image_id', String(length=36), nullable=False), - Column('member', String(length=255), nullable=False), - Column('can_share', Boolean(), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=True), - Column('deleted_at', DateTime(), nullable=True), - Column('deleted', Boolean(), nullable=False), - Column('status', - String(length=20), - server_default='pending', - nullable=False), - ForeignKeyConstraint(['image_id'], ['images.id'], ), - PrimaryKeyConstraint('id'), - UniqueConstraint('image_id', - 'member', - 'deleted_at', - name=deleted_member_constraint), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_image_members_deleted', - 'image_members', - ['deleted'], - unique=False) - op.create_index('ix_image_members_image_id', - 'image_members', - ['image_id'], - unique=False) - op.create_index('ix_image_members_image_id_member', - 'image_members', - ['image_id', 'member'], - unique=False) - - -def _add_images_tags_table(): - op.create_table('image_tags', - Column('id', Integer(), nullable=False), - Column('image_id', String(length=36), nullable=False), - Column('value', String(length=255), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=True), - Column('deleted_at', DateTime(), nullable=True), - Column('deleted', Boolean(), nullable=False), - ForeignKeyConstraint(['image_id'], ['images.id'], ), - PrimaryKeyConstraint('id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_image_tags_image_id', - 'image_tags', - ['image_id'], - unique=False) - op.create_index('ix_image_tags_image_id_tag_value', - 'image_tags', - ['image_id', 'value'], - unique=False) - - -def upgrade(): - _add_images_table() - _add_image_properties_table() - _add_image_locations_table() - _add_image_members_table() - _add_images_tags_table() diff --git a/glance/db/sqlalchemy/alembic_migrations/add_metadefs_tables.py b/glance/db/sqlalchemy/alembic_migrations/add_metadefs_tables.py deleted file mode 100644 index 96fa7333..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/add_metadefs_tables.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2016 Rackspace -# Copyright 2013 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from alembic import op -from sqlalchemy.schema import ( - Column, PrimaryKeyConstraint, ForeignKeyConstraint, UniqueConstraint) - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, Integer, String, Text) # noqa -from glance.db.sqlalchemy.models import JSONEncodedDict - - -def _add_metadef_namespaces_table(): - op.create_table('metadef_namespaces', - Column('id', Integer(), nullable=False), - Column('namespace', String(length=80), nullable=False), - Column('display_name', String(length=80), nullable=True), - Column('description', Text(), nullable=True), - Column('visibility', String(length=32), nullable=True), - Column('protected', Boolean(), nullable=True), - Column('owner', String(length=255), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=True), - PrimaryKeyConstraint('id'), - UniqueConstraint('namespace', - name='uq_metadef_namespaces_namespace'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_metadef_namespaces_owner', - 'metadef_namespaces', - ['owner'], - unique=False) - - -def _add_metadef_resource_types_table(): - op.create_table('metadef_resource_types', - Column('id', Integer(), nullable=False), - Column('name', String(length=80), nullable=False), - Column('protected', Boolean(), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=True), - PrimaryKeyConstraint('id'), - UniqueConstraint('name', - name='uq_metadef_resource_types_name'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - -def _add_metadef_namespace_resource_types_table(): - op.create_table('metadef_namespace_resource_types', - Column('resource_type_id', Integer(), nullable=False), - Column('namespace_id', Integer(), nullable=False), - Column('properties_target', - String(length=80), - nullable=True), - Column('prefix', String(length=80), nullable=True), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=True), - ForeignKeyConstraint(['namespace_id'], - ['metadef_namespaces.id'], ), - ForeignKeyConstraint(['resource_type_id'], - ['metadef_resource_types.id'], ), - PrimaryKeyConstraint('resource_type_id', 'namespace_id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_metadef_ns_res_types_namespace_id', - 'metadef_namespace_resource_types', - ['namespace_id'], - unique=False) - - -def _add_metadef_objects_table(): - ns_id_name_constraint = 'uq_metadef_objects_namespace_id_name' - - op.create_table('metadef_objects', - Column('id', Integer(), nullable=False), - Column('namespace_id', Integer(), nullable=False), - Column('name', String(length=80), nullable=False), - Column('description', Text(), nullable=True), - Column('required', Text(), nullable=True), - Column('json_schema', JSONEncodedDict(), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=True), - ForeignKeyConstraint(['namespace_id'], - ['metadef_namespaces.id'], ), - PrimaryKeyConstraint('id'), - UniqueConstraint('namespace_id', - 'name', - name=ns_id_name_constraint), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_metadef_objects_name', - 'metadef_objects', - ['name'], - unique=False) - - -def _add_metadef_properties_table(): - ns_id_name_constraint = 'uq_metadef_properties_namespace_id_name' - op.create_table('metadef_properties', - Column('id', Integer(), nullable=False), - Column('namespace_id', Integer(), nullable=False), - Column('name', String(length=80), nullable=False), - Column('json_schema', JSONEncodedDict(), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=True), - ForeignKeyConstraint(['namespace_id'], - ['metadef_namespaces.id'], ), - PrimaryKeyConstraint('id'), - UniqueConstraint('namespace_id', - 'name', - name=ns_id_name_constraint), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_metadef_properties_name', - 'metadef_properties', - ['name'], - unique=False) - - -def _add_metadef_tags_table(): - op.create_table('metadef_tags', - Column('id', Integer(), nullable=False), - Column('namespace_id', Integer(), nullable=False), - Column('name', String(length=80), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=True), - ForeignKeyConstraint(['namespace_id'], - ['metadef_namespaces.id'], ), - PrimaryKeyConstraint('id'), - UniqueConstraint('namespace_id', - 'name', - name='uq_metadef_tags_namespace_id_name'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_metadef_tags_name', - 'metadef_tags', - ['name'], - unique=False) - - -def upgrade(): - _add_metadef_namespaces_table() - _add_metadef_resource_types_table() - _add_metadef_namespace_resource_types_table() - _add_metadef_objects_table() - _add_metadef_properties_table() - _add_metadef_tags_table() diff --git a/glance/db/sqlalchemy/alembic_migrations/add_tasks_tables.py b/glance/db/sqlalchemy/alembic_migrations/add_tasks_tables.py deleted file mode 100644 index d199557a..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/add_tasks_tables.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2016 Rackspace -# Copyright 2013 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from alembic import op -from sqlalchemy.schema import ( - Column, PrimaryKeyConstraint, ForeignKeyConstraint) - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, String, Text) # noqa -from glance.db.sqlalchemy.models import JSONEncodedDict - - -def _add_tasks_table(): - op.create_table('tasks', - Column('id', String(length=36), nullable=False), - Column('type', String(length=30), nullable=False), - Column('status', String(length=30), nullable=False), - Column('owner', String(length=255), nullable=False), - Column('expires_at', DateTime(), nullable=True), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=True), - Column('deleted_at', DateTime(), nullable=True), - Column('deleted', Boolean(), nullable=False), - PrimaryKeyConstraint('id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - op.create_index('ix_tasks_deleted', 'tasks', ['deleted'], unique=False) - op.create_index('ix_tasks_owner', 'tasks', ['owner'], unique=False) - op.create_index('ix_tasks_status', 'tasks', ['status'], unique=False) - op.create_index('ix_tasks_type', 'tasks', ['type'], unique=False) - op.create_index('ix_tasks_updated_at', - 'tasks', - ['updated_at'], - unique=False) - - -def _add_task_info_table(): - op.create_table('task_info', - Column('task_id', String(length=36), nullable=False), - Column('input', JSONEncodedDict(), nullable=True), - Column('result', JSONEncodedDict(), nullable=True), - Column('message', Text(), nullable=True), - ForeignKeyConstraint(['task_id'], ['tasks.id'], ), - PrimaryKeyConstraint('task_id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - -def upgrade(): - _add_tasks_table() - _add_task_info_table() diff --git a/glance/db/sqlalchemy/alembic_migrations/alembic.ini b/glance/db/sqlalchemy/alembic_migrations/alembic.ini deleted file mode 100644 index 640a9af4..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/alembic.ini +++ /dev/null @@ -1,69 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = %(here)s - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# max length of characters to apply to the -# "slug" field -#truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; this defaults -# to alembic_migrations/versions. When using multiple version -# directories, initial revisions must be specified with --version-path -# version_locations = %(here)s/bar %(here)s/bat alembic_migrations/versions - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -# Uncomment and update to your sql connection string if wishing to run -# alembic directly from command line -#sqlalchemy.url = - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/glance/db/sqlalchemy/alembic_migrations/data_migrations/__init__.py b/glance/db/sqlalchemy/alembic_migrations/data_migrations/__init__.py deleted file mode 100644 index 4d65ff1c..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/data_migrations/__init__.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2016 Rackspace -# Copyright 2016 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import importlib -import os.path -import pkgutil - -from glance.common import exception -from glance.db import migration as db_migrations -from glance.db.sqlalchemy import api as db_api - - -def _find_migration_modules(release): - migrations = list() - for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(__file__)]): - if module_name.startswith(release): - migrations.append(module_name) - - migration_modules = list() - for migration in sorted(migrations): - module = importlib.import_module('.'.join([__package__, migration])) - has_migrations_function = getattr(module, 'has_migrations', None) - migrate_function = getattr(module, 'migrate', None) - - if has_migrations_function is None or migrate_function is None: - raise exception.InvalidDataMigrationScript(script=module.__name__) - - migration_modules.append(module) - - return migration_modules - - -def _run_migrations(engine, migrations): - rows_migrated = 0 - for migration in migrations: - if migration.has_migrations(engine): - rows_migrated += migration.migrate(engine) - - return rows_migrated - - -def has_pending_migrations(engine=None): - if not engine: - engine = db_api.get_engine() - - migrations = _find_migration_modules(db_migrations.CURRENT_RELEASE) - if not migrations: - return False - return any([x.has_migrations(engine) for x in migrations]) - - -def migrate(engine=None): - if not engine: - engine = db_api.get_engine() - - migrations = _find_migration_modules(db_migrations.CURRENT_RELEASE) - rows_migrated = _run_migrations(engine, migrations) - return rows_migrated diff --git a/glance/db/sqlalchemy/alembic_migrations/data_migrations/ocata_migrate01_community_images.py b/glance/db/sqlalchemy/alembic_migrations/data_migrations/ocata_migrate01_community_images.py deleted file mode 100644 index 4666d448..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/data_migrations/ocata_migrate01_community_images.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2016 Rackspace -# Copyright 2016 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, select, Table, and_, not_ - - -def has_migrations(engine): - """Returns true if at least one data row can be migrated. - - There are rows left to migrate if: - #1 There exists a row with visibility not set yet. - Or - #2 There exists a private image with active members but its visibility - isn't set to 'shared' yet. - - Note: This method can return a false positive if data migrations - are running in the background as it's being called. - """ - meta = MetaData(engine) - images = Table('images', meta, autoload=True) - - rows_with_null_visibility = (select([images.c.id]) - .where(images.c.visibility == None) - .limit(1) - .execute()) - - if rows_with_null_visibility.rowcount == 1: - return True - - image_members = Table('image_members', meta, autoload=True) - rows_with_pending_shared = (select([images.c.id]) - .where(and_( - images.c.visibility == 'private', - images.c.id.in_( - select([image_members.c.image_id]) - .distinct() - .where(not_(image_members.c.deleted)))) - ) - .limit(1) - .execute()) - if rows_with_pending_shared.rowcount == 1: - return True - - return False - - -def _mark_all_public_images_with_public_visibility(images): - migrated_rows = (images - .update().values(visibility='public') - .where(images.c.is_public) - .execute()) - return migrated_rows.rowcount - - -def _mark_all_non_public_images_with_private_visibility(images): - migrated_rows = (images - .update().values(visibility='private') - .where(not_(images.c.is_public)) - .execute()) - return migrated_rows.rowcount - - -def _mark_all_private_images_with_members_as_shared_visibility(images, - image_members): - migrated_rows = (images - .update().values(visibility='shared') - .where(and_(images.c.visibility == 'private', - images.c.id.in_( - select([image_members.c.image_id]) - .distinct() - .where(not_(image_members.c.deleted))))) - .execute()) - return migrated_rows.rowcount - - -def _migrate_all(engine): - meta = MetaData(engine) - images = Table('images', meta, autoload=True) - image_members = Table('image_members', meta, autoload=True) - - num_rows = _mark_all_public_images_with_public_visibility(images) - num_rows += _mark_all_non_public_images_with_private_visibility(images) - num_rows += _mark_all_private_images_with_members_as_shared_visibility( - images, image_members) - - return num_rows - - -def migrate(engine): - """Set visibility column based on is_public and image members.""" - return _migrate_all(engine) diff --git a/glance/db/sqlalchemy/alembic_migrations/data_migrations/pike_migrate01_empty.py b/glance/db/sqlalchemy/alembic_migrations/data_migrations/pike_migrate01_empty.py deleted file mode 100644 index b1f02ca4..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/data_migrations/pike_migrate01_empty.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(rosmaita): This file implements the migration interface, but doesn't -# migrate any data. The pike01 migration is contract-only. - - -def has_migrations(engine): - """Returns true if at least one data row can be migrated.""" - - return False - - -def migrate(engine): - """Return the number of rows migrated.""" - - return 0 diff --git a/glance/db/sqlalchemy/alembic_migrations/env.py b/glance/db/sqlalchemy/alembic_migrations/env.py deleted file mode 100644 index 12d29455..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/env.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2016 Rackspace -# Copyright 2013 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import with_statement -from logging import config as log_config - -from alembic import context -from sqlalchemy import engine_from_config, pool - -from glance.db.sqlalchemy import models -from glance.db.sqlalchemy import models_metadef - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -log_config.fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -target_metadata = models.BASE.metadata -for table in models_metadef.BASE_DICT.metadata.sorted_tables: - target_metadata._add_table(table.name, table.schema, table) - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, target_metadata=target_metadata, literal_binds=True) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - connectable = engine_from_config( - config.get_section(config.config_ini_section), - prefix='sqlalchemy.', - poolclass=pool.NullPool) - - with connectable.connect() as connection: - context.configure( - connection=connection, - target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/glance/db/sqlalchemy/alembic_migrations/migrate.cfg b/glance/db/sqlalchemy/alembic_migrations/migrate.cfg deleted file mode 100644 index 8ddf0500..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/migrate.cfg +++ /dev/null @@ -1,20 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id=Glance Migrations - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table=alembic_version - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs=[] diff --git a/glance/db/sqlalchemy/alembic_migrations/script.py.mako b/glance/db/sqlalchemy/alembic_migrations/script.py.mako deleted file mode 100644 index 8323caac..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/script.py.mako +++ /dev/null @@ -1,20 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} -branch_labels = ${repr(branch_labels)} -depends_on = ${repr(depends_on)} - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -def upgrade(): - ${upgrades if upgrades else "pass"} diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/__init__.py b/glance/db/sqlalchemy/alembic_migrations/versions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/liberty_initial.py b/glance/db/sqlalchemy/alembic_migrations/versions/liberty_initial.py deleted file mode 100644 index 2d56680e..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/versions/liberty_initial.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2016 Rackspace -# Copyright 2013 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""liberty initial - -Revision ID: liberty -Revises: -Create Date: 2016-08-03 16:06:59.657433 - -""" - -from glance.db.sqlalchemy.alembic_migrations import add_artifacts_tables -from glance.db.sqlalchemy.alembic_migrations import add_images_tables -from glance.db.sqlalchemy.alembic_migrations import add_metadefs_tables -from glance.db.sqlalchemy.alembic_migrations import add_tasks_tables - -# revision identifiers, used by Alembic. -revision = 'liberty' -down_revision = None -branch_labels = None -depends_on = None - - -def upgrade(): - add_images_tables.upgrade() - add_tasks_tables.upgrade() - add_metadefs_tables.upgrade() - add_artifacts_tables.upgrade() diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/mitaka01_add_image_created_updated_idx.py b/glance/db/sqlalchemy/alembic_migrations/versions/mitaka01_add_image_created_updated_idx.py deleted file mode 100644 index 5180c675..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/versions/mitaka01_add_image_created_updated_idx.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2016 Rackspace -# Copyright 2013 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add index on created_at and updated_at columns of 'images' table - -Revision ID: mitaka01 -Revises: liberty -Create Date: 2016-08-03 17:19:35.306161 - -""" - -from alembic import op -from sqlalchemy import MetaData, Table, Index - - -# revision identifiers, used by Alembic. -revision = 'mitaka01' -down_revision = 'liberty' -branch_labels = None -depends_on = None - -CREATED_AT_INDEX = 'created_at_image_idx' -UPDATED_AT_INDEX = 'updated_at_image_idx' - - -def upgrade(): - migrate_engine = op.get_bind() - meta = MetaData(bind=migrate_engine) - - images = Table('images', meta, autoload=True) - - created_index = Index(CREATED_AT_INDEX, images.c.created_at) - created_index.create(migrate_engine) - updated_index = Index(UPDATED_AT_INDEX, images.c.updated_at) - updated_index.create(migrate_engine) diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/mitaka02_update_metadef_os_nova_server.py b/glance/db/sqlalchemy/alembic_migrations/versions/mitaka02_update_metadef_os_nova_server.py deleted file mode 100644 index 9416c68a..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/versions/mitaka02_update_metadef_os_nova_server.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2016 Rackspace -# Copyright 2013 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""update metadef os_nova_server - -Revision ID: mitaka02 -Revises: mitaka01 -Create Date: 2016-08-03 17:23:23.041663 - -""" - -from alembic import op -from sqlalchemy import MetaData, Table - - -# revision identifiers, used by Alembic. -revision = 'mitaka02' -down_revision = 'mitaka01' -branch_labels = None -depends_on = None - - -def upgrade(): - migrate_engine = op.get_bind() - meta = MetaData(bind=migrate_engine) - - resource_types_table = Table('metadef_resource_types', meta, autoload=True) - - resource_types_table.update(values={'name': 'OS::Nova::Server'}).where( - resource_types_table.c.name == 'OS::Nova::Instance').execute() diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/ocata01_add_visibility_remove_is_public.py b/glance/db/sqlalchemy/alembic_migrations/versions/ocata01_add_visibility_remove_is_public.py deleted file mode 100644 index 5d66513e..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/versions/ocata01_add_visibility_remove_is_public.py +++ /dev/null @@ -1,72 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add visibility to and remove is_public from images - -Revision ID: ocata01 -Revises: mitaka02 -Create Date: 2017-01-20 12:58:16.647499 - -""" - -import os - -from alembic import op -from sqlalchemy import Column, Enum, MetaData, select, Table, not_, and_ -import sqlparse - -# revision identifiers, used by Alembic. -revision = 'ocata01' -down_revision = 'mitaka02' -branch_labels = None -depends_on = None - - -def upgrade(): - migrate_engine = op.get_bind() - meta = MetaData(bind=migrate_engine) - - engine_name = migrate_engine.engine.name - if engine_name == 'sqlite': - sql_file = os.path.splitext(__file__)[0] - sql_file += '.sql' - with open(sql_file, 'r') as sqlite_script: - sql = sqlparse.format(sqlite_script.read(), strip_comments=True) - for statement in sqlparse.split(sql): - op.execute(statement) - return - - enum = Enum('private', 'public', 'shared', 'community', metadata=meta, - name='image_visibility') - enum.create() - v_col = Column('visibility', enum, nullable=False, server_default='shared') - op.add_column('images', v_col) - - op.create_index('visibility_image_idx', 'images', ['visibility']) - - images = Table('images', meta, autoload=True) - images.update(values={'visibility': 'public'}).where( - images.c.is_public).execute() - - image_members = Table('image_members', meta, autoload=True) - - # NOTE(dharinic): Mark all the non-public images as 'private' first - images.update().values(visibility='private').where( - not_(images.c.is_public)).execute() - # NOTE(dharinic): Identify 'shared' images from the above - images.update().values(visibility='shared').where(and_( - images.c.visibility == 'private', images.c.id.in_(select( - [image_members.c.image_id]).distinct().where( - not_(image_members.c.deleted))))).execute() - - op.drop_index('ix_images_is_public', 'images') - op.drop_column('images', 'is_public') diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/ocata01_add_visibility_remove_is_public.sql b/glance/db/sqlalchemy/alembic_migrations/versions/ocata01_add_visibility_remove_is_public.sql deleted file mode 100644 index 0e848cce..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/versions/ocata01_add_visibility_remove_is_public.sql +++ /dev/null @@ -1,162 +0,0 @@ -CREATE TEMPORARY TABLE images_backup ( - id VARCHAR(36) NOT NULL, - name VARCHAR(255), - size INTEGER, - status VARCHAR(30) NOT NULL, - is_public BOOLEAN NOT NULL, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - disk_format VARCHAR(20), - container_format VARCHAR(20), - checksum VARCHAR(32), - owner VARCHAR(255), - min_disk INTEGER NOT NULL, - min_ram INTEGER NOT NULL, - protected BOOLEAN DEFAULT 0 NOT NULL, - virtual_size INTEGER, - PRIMARY KEY (id), - CHECK (is_public IN (0, 1)), - CHECK (deleted IN (0, 1)), - CHECK (protected IN (0, 1)) -); - -INSERT INTO images_backup - SELECT id, - name, - size, - status, - is_public, - created_at, - updated_at, - deleted_at, - deleted, - disk_format, - container_format, - checksum, - owner, - min_disk, - min_ram, - protected, - virtual_size - FROM images; - -DROP TABLE images; - -CREATE TABLE images ( - id VARCHAR(36) NOT NULL, - name VARCHAR(255), - size INTEGER, - status VARCHAR(30) NOT NULL, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - disk_format VARCHAR(20), - container_format VARCHAR(20), - checksum VARCHAR(32), - owner VARCHAR(255), - min_disk INTEGER NOT NULL, - min_ram INTEGER NOT NULL, - protected BOOLEAN DEFAULT 0 NOT NULL, - virtual_size INTEGER, - visibility VARCHAR(9) DEFAULT 'shared' NOT NULL, - PRIMARY KEY (id), - CHECK (deleted IN (0, 1)), - CHECK (protected IN (0, 1)), - CONSTRAINT image_visibility CHECK (visibility IN ('private', 'public', 'shared', 'community')) -); - -CREATE INDEX checksum_image_idx ON images (checksum); -CREATE INDEX visibility_image_idx ON images (visibility); -CREATE INDEX ix_images_deleted ON images (deleted); -CREATE INDEX owner_image_idx ON images (owner); -CREATE INDEX created_at_image_idx ON images (created_at); -CREATE INDEX updated_at_image_idx ON images (updated_at); - --- Copy over all the 'public' rows - -INSERT INTO images ( - id, - name, - size, - status, - created_at, - updated_at, - deleted_at, - deleted, - disk_format, - container_format, - checksum, - owner, - min_disk, - min_ram, - protected, - virtual_size - ) - SELECT id, - name, - size, - status, - created_at, - updated_at, - deleted_at, - deleted, - disk_format, - container_format, - checksum, - owner, - min_disk, - min_ram, - protected, - virtual_size - FROM images_backup - WHERE is_public=1; - - -UPDATE images SET visibility='public'; - --- Now copy over the 'private' rows - -INSERT INTO images ( - id, - name, - size, - status, - created_at, - updated_at, - deleted_at, - deleted, - disk_format, - container_format, - checksum, - owner, - min_disk, - min_ram, - protected, - virtual_size - ) - SELECT id, - name, - size, - status, - created_at, - updated_at, - deleted_at, - deleted, - disk_format, - container_format, - checksum, - owner, - min_disk, - min_ram, - protected, - virtual_size - FROM images_backup - WHERE is_public=0; - -UPDATE images SET visibility='private' WHERE visibility='shared'; -UPDATE images SET visibility='shared' WHERE visibility='private' AND id IN (SELECT DISTINCT image_id FROM image_members WHERE deleted != 1); - -DROP TABLE images_backup; diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/ocata_contract01_drop_is_public.py b/glance/db/sqlalchemy/alembic_migrations/versions/ocata_contract01_drop_is_public.py deleted file mode 100644 index 48cb12c0..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/versions/ocata_contract01_drop_is_public.py +++ /dev/null @@ -1,67 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""remove is_public from images - -Revision ID: ocata_contract01 -Revises: mitaka02 -Create Date: 2017-01-27 12:58:16.647499 - -""" - -from alembic import op -from sqlalchemy import MetaData, Table - -from glance.db import migration - -# revision identifiers, used by Alembic. -revision = 'ocata_contract01' -down_revision = 'mitaka02' -branch_labels = migration.CONTRACT_BRANCH -depends_on = 'ocata_expand01' - - -MYSQL_DROP_INSERT_TRIGGER = """ -DROP TRIGGER insert_visibility; -""" - -MYSQL_DROP_UPDATE_TRIGGER = """ -DROP TRIGGER update_visibility; -""" - - -def _drop_column(): - op.drop_index('ix_images_is_public', 'images') - op.drop_column('images', 'is_public') - - -def _drop_triggers(engine): - engine_name = engine.engine.name - if engine_name == "mysql": - op.execute(MYSQL_DROP_INSERT_TRIGGER) - op.execute(MYSQL_DROP_UPDATE_TRIGGER) - - -def _set_nullability_and_default_on_visibility(meta): - # NOTE(hemanthm): setting the default on 'visibility' column - # to 'shared'. Also, marking it as non-nullable. - images = Table('images', meta, autoload=True) - images.c.visibility.alter(nullable=False, server_default='shared') - - -def upgrade(): - migrate_engine = op.get_bind() - meta = MetaData(bind=migrate_engine) - - _drop_column() - _drop_triggers(migrate_engine) - _set_nullability_and_default_on_visibility(meta) diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/ocata_expand01_add_visibility.py b/glance/db/sqlalchemy/alembic_migrations/versions/ocata_expand01_add_visibility.py deleted file mode 100644 index 665f260c..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/versions/ocata_expand01_add_visibility.py +++ /dev/null @@ -1,151 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""add visibility to images - -Revision ID: ocata_expand01 -Revises: mitaka02 -Create Date: 2017-01-27 12:58:16.647499 - -""" - -from alembic import op -from sqlalchemy import Column, Enum, MetaData, Table - -from glance.db import migration - -# revision identifiers, used by Alembic. -revision = 'ocata_expand01' -down_revision = 'mitaka02' -branch_labels = migration.EXPAND_BRANCH -depends_on = None - -ERROR_MESSAGE = 'Invalid visibility value' -MYSQL_INSERT_TRIGGER = """ -CREATE TRIGGER insert_visibility BEFORE INSERT ON images -FOR EACH ROW -BEGIN - -- NOTE(abashmak): - -- The following IF/ELSE block implements a priority decision tree. - -- Strict order MUST be followed to correctly cover all the edge cases. - - -- Edge case: neither is_public nor visibility specified - -- (or both specified as NULL): - IF NEW.is_public <=> NULL AND NEW.visibility <=> NULL THEN - SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; - -- Edge case: both is_public and visibility specified: - ELSEIF NOT(NEW.is_public <=> NULL OR NEW.visibility <=> NULL) THEN - SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; - -- Inserting with is_public, set visibility accordingly: - ELSEIF NOT NEW.is_public <=> NULL THEN - IF NEW.is_public = 1 THEN - SET NEW.visibility = 'public'; - ELSE - SET NEW.visibility = 'shared'; - END IF; - -- Inserting with visibility, set is_public accordingly: - ELSEIF NOT NEW.visibility <=> NULL THEN - IF NEW.visibility = 'public' THEN - SET NEW.is_public = 1; - ELSE - SET NEW.is_public = 0; - END IF; - -- Edge case: either one of: is_public or visibility, - -- is explicitly set to NULL: - ELSE - SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; - END IF; -END; -""" - -MYSQL_UPDATE_TRIGGER = """ -CREATE TRIGGER update_visibility BEFORE UPDATE ON images -FOR EACH ROW -BEGIN - -- Case: new value specified for is_public: - IF NOT NEW.is_public <=> OLD.is_public THEN - -- Edge case: is_public explicitly set to NULL: - IF NEW.is_public <=> NULL THEN - SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; - -- Edge case: new value also specified for visibility - ELSEIF NOT NEW.visibility <=> OLD.visibility THEN - SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; - -- Case: visibility not specified or specified as OLD value: - -- NOTE(abashmak): There is no way to reliably determine which - -- of the above two cases occurred, but allowing to proceed with - -- the update in either case does not break the model for both - -- N and N-1 services. - ELSE - -- Set visibility according to the value of is_public: - IF NEW.is_public <=> 1 THEN - SET NEW.visibility = 'public'; - ELSE - SET NEW.visibility = 'shared'; - END IF; - END IF; - -- Case: new value specified for visibility: - ELSEIF NOT NEW.visibility <=> OLD.visibility THEN - -- Edge case: visibility explicitly set to NULL: - IF NEW.visibility <=> NULL THEN - SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; - -- Edge case: new value also specified for is_public - ELSEIF NOT NEW.is_public <=> OLD.is_public THEN - SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; - -- Case: is_public not specified or specified as OLD value: - -- NOTE(abashmak): There is no way to reliably determine which - -- of the above two cases occurred, but allowing to proceed with - -- the update in either case does not break the model for both - -- N and N-1 services. - ELSE - -- Set is_public according to the value of visibility: - IF NEW.visibility <=> 'public' THEN - SET NEW.is_public = 1; - ELSE - SET NEW.is_public = 0; - END IF; - END IF; - END IF; -END; -""" - - -def _add_visibility_column(meta): - enum = Enum('private', 'public', 'shared', 'community', metadata=meta, - name='image_visibility') - enum.create() - v_col = Column('visibility', enum, nullable=True, server_default=None) - op.add_column('images', v_col) - op.create_index('visibility_image_idx', 'images', ['visibility']) - - -def _add_triggers(engine): - if engine.engine.name == 'mysql': - op.execute(MYSQL_INSERT_TRIGGER % (ERROR_MESSAGE, ERROR_MESSAGE, - ERROR_MESSAGE)) - op.execute(MYSQL_UPDATE_TRIGGER % (ERROR_MESSAGE, ERROR_MESSAGE, - ERROR_MESSAGE, ERROR_MESSAGE)) - - -def _change_nullability_and_default_on_is_public(meta): - # NOTE(hemanthm): we mark is_public as nullable so that when new versions - # add data only to be visibility column, is_public can be null. - images = Table('images', meta, autoload=True) - images.c.is_public.alter(nullable=True, server_default=None) - - -def upgrade(): - migrate_engine = op.get_bind() - meta = MetaData(bind=migrate_engine) - - _add_visibility_column(meta) - _change_nullability_and_default_on_is_public(meta) - _add_triggers(migrate_engine) diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/pike01_drop_artifacts_tables.py b/glance/db/sqlalchemy/alembic_migrations/versions/pike01_drop_artifacts_tables.py deleted file mode 100644 index b7886c47..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/versions/pike01_drop_artifacts_tables.py +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""drop glare artifacts tables - -Revision ID: pike01 -Revises: ocata01 -Create Date: 2017-02-08 20:32:51.200867 - -""" - -from alembic import op - -# revision identifiers, used by Alembic. -revision = 'pike01' -down_revision = 'ocata01' -branch_labels = None -depends_on = None - - -def upgrade(): - # create list of artifact tables in reverse order of their creation - table_names = [] - table_names.append('artifact_blob_locations') - table_names.append('artifact_properties') - table_names.append('artifact_blobs') - table_names.append('artifact_dependencies') - table_names.append('artifact_tags') - table_names.append('artifacts') - - for table_name in table_names: - op.drop_table(table_name=table_name) diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/pike_contract01_drop_artifacts_tables.py b/glance/db/sqlalchemy/alembic_migrations/versions/pike_contract01_drop_artifacts_tables.py deleted file mode 100644 index 11e4eb41..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/versions/pike_contract01_drop_artifacts_tables.py +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""drop glare artifacts tables - -Revision ID: pike_contract01 -Revises: ocata_contract01 -Create Date: 2017-02-09 20:32:51.222867 - -""" - -from alembic import op - -# revision identifiers, used by Alembic. -revision = 'pike_contract01' -down_revision = 'ocata_contract01' -branch_labels = None -depends_on = 'pike_expand01' - - -def upgrade(): - # create list of artifact tables in reverse order of their creation - table_names = [] - table_names.append('artifact_blob_locations') - table_names.append('artifact_properties') - table_names.append('artifact_blobs') - table_names.append('artifact_dependencies') - table_names.append('artifact_tags') - table_names.append('artifacts') - - for table_name in table_names: - op.drop_table(table_name=table_name) diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/pike_expand01_empty.py b/glance/db/sqlalchemy/alembic_migrations/versions/pike_expand01_empty.py deleted file mode 100644 index 24cc6708..00000000 --- a/glance/db/sqlalchemy/alembic_migrations/versions/pike_expand01_empty.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""empty expand for symmetry with pike_contract01 - -Revision ID: pike_expand01 -Revises: ocata_expand01 -Create Date: 2017-02-09 19:55:16.657499 - -""" - -# revision identifiers, used by Alembic. -revision = 'pike_expand01' -down_revision = 'ocata_expand01' -branch_labels = None -depends_on = None - - -def upgrade(): - pass diff --git a/glance/db/sqlalchemy/api.py b/glance/db/sqlalchemy/api.py deleted file mode 100644 index e2bd2129..00000000 --- a/glance/db/sqlalchemy/api.py +++ /dev/null @@ -1,1880 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2010-2011 OpenStack Foundation -# Copyright 2012 Justin Santa Barbara -# Copyright 2013 IBM Corp. -# Copyright 2015 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Defines interface for DB access.""" - -import datetime -import threading - -from oslo_config import cfg -from oslo_db import exception as db_exception -from oslo_db.sqlalchemy import session -from oslo_log import log as logging -from oslo_utils import excutils -import osprofiler.sqlalchemy -from retrying import retry -import six -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range -import sqlalchemy -from sqlalchemy.ext.compiler import compiles -from sqlalchemy import MetaData, Table -import sqlalchemy.orm as sa_orm -from sqlalchemy import sql -import sqlalchemy.sql as sa_sql - -from glance.common import exception -from glance.common import timeutils -from glance.common import utils -from glance.db.sqlalchemy.metadef_api import (resource_type - as metadef_resource_type_api) -from glance.db.sqlalchemy.metadef_api import (resource_type_association - as metadef_association_api) -from glance.db.sqlalchemy.metadef_api import namespace as metadef_namespace_api -from glance.db.sqlalchemy.metadef_api import object as metadef_object_api -from glance.db.sqlalchemy.metadef_api import property as metadef_property_api -from glance.db.sqlalchemy.metadef_api import tag as metadef_tag_api -from glance.db.sqlalchemy import models -from glance.db import utils as db_utils -from glance.i18n import _, _LW, _LI, _LE - -sa_logger = None -LOG = logging.getLogger(__name__) - - -STATUSES = ['active', 'saving', 'queued', 'killed', 'pending_delete', - 'deleted', 'deactivated'] - -CONF = cfg.CONF -CONF.import_group("profiler", "glance.common.wsgi") - -_FACADE = None -_LOCK = threading.Lock() - - -def _retry_on_deadlock(exc): - """Decorator to retry a DB API call if Deadlock was received.""" - - if isinstance(exc, db_exception.DBDeadlock): - LOG.warn(_LW("Deadlock detected. Retrying...")) - return True - return False - - -def _create_facade_lazily(): - global _LOCK, _FACADE - if _FACADE is None: - with _LOCK: - if _FACADE is None: - _FACADE = session.EngineFacade.from_config(CONF) - - if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy: - osprofiler.sqlalchemy.add_tracing(sqlalchemy, - _FACADE.get_engine(), - "db") - return _FACADE - - -def get_engine(): - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(autocommit=True, expire_on_commit=False): - facade = _create_facade_lazily() - return facade.get_session(autocommit=autocommit, - expire_on_commit=expire_on_commit) - - -def _validate_db_int(**kwargs): - """Make sure that all arguments are less than or equal to 2 ** 31 - 1. - - This limitation is introduced because databases stores INT in 4 bytes. - If the validation fails for some argument, exception.Invalid is raised with - appropriate information. - """ - max_int = (2 ** 31) - 1 - - for param_key, param_value in kwargs.items(): - if param_value and param_value > max_int: - msg = _("'%(param)s' value out of range, " - "must not exceed %(max)d.") % {"param": param_key, - "max": max_int} - raise exception.Invalid(msg) - - -def clear_db_env(): - """ - Unset global configuration variables for database. - """ - global _FACADE - _FACADE = None - - -def _check_mutate_authorization(context, image_ref): - if not is_image_mutable(context, image_ref): - LOG.warn(_LW("Attempted to modify image user did not own.")) - msg = _("You do not own this image") - if image_ref.visibility in ['private', 'shared']: - exc_class = exception.Forbidden - else: - # 'public', or 'community' - exc_class = exception.ForbiddenPublicImage - - raise exc_class(msg) - - -def image_create(context, values, v1_mode=False): - """Create an image from the values dictionary.""" - image = _image_update(context, values, None, purge_props=False) - if v1_mode: - image = db_utils.mutate_image_dict_to_v1(image) - return image - - -def image_update(context, image_id, values, purge_props=False, - from_state=None, v1_mode=False): - """ - Set the given properties on an image and update it. - - :raises: ImageNotFound if image does not exist. - """ - image = _image_update(context, values, image_id, purge_props, - from_state=from_state) - if v1_mode: - image = db_utils.mutate_image_dict_to_v1(image) - return image - - -@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, - stop_max_attempt_number=50) -def image_destroy(context, image_id): - """Destroy the image or raise if it does not exist.""" - session = get_session() - with session.begin(): - image_ref = _image_get(context, image_id, session=session) - - # Perform authorization check - _check_mutate_authorization(context, image_ref) - - image_ref.delete(session=session) - delete_time = image_ref.deleted_at - - _image_locations_delete_all(context, image_id, delete_time, session) - - _image_property_delete_all(context, image_id, delete_time, session) - - _image_member_delete_all(context, image_id, delete_time, session) - - _image_tag_delete_all(context, image_id, delete_time, session) - - return _normalize_locations(context, image_ref) - - -def _normalize_locations(context, image, force_show_deleted=False): - """ - Generate suitable dictionary list for locations field of image. - - We don't need to set other data fields of location record which return - from image query. - """ - - if image['status'] == 'deactivated' and not context.is_admin: - # Locations are not returned for a deactivated image for non-admin user - image['locations'] = [] - return image - - if force_show_deleted: - locations = image['locations'] - else: - locations = [x for x in image['locations'] if not x.deleted] - image['locations'] = [{'id': loc['id'], - 'url': loc['value'], - 'metadata': loc['meta_data'], - 'status': loc['status']} - for loc in locations] - return image - - -def _normalize_tags(image): - undeleted_tags = [x for x in image['tags'] if not x.deleted] - image['tags'] = [tag['value'] for tag in undeleted_tags] - return image - - -def image_get(context, image_id, session=None, force_show_deleted=False, - v1_mode=False): - image = _image_get(context, image_id, session=session, - force_show_deleted=force_show_deleted) - image = _normalize_locations(context, image.to_dict(), - force_show_deleted=force_show_deleted) - if v1_mode: - image = db_utils.mutate_image_dict_to_v1(image) - return image - - -def _check_image_id(image_id): - """ - check if the given image id is valid before executing operations. For - now, we only check its length. The original purpose of this method is - wrapping the different behaviors between MySql and DB2 when the image id - length is longer than the defined length in database model. - :param image_id: The id of the image we want to check - :returns: Raise NoFound exception if given image id is invalid - """ - if (image_id and - len(image_id) > models.Image.id.property.columns[0].type.length): - raise exception.ImageNotFound() - - -def _image_get(context, image_id, session=None, force_show_deleted=False): - """Get an image or raise if it does not exist.""" - _check_image_id(image_id) - session = session or get_session() - - try: - query = session.query(models.Image).options( - sa_orm.joinedload(models.Image.properties)).options( - sa_orm.joinedload( - models.Image.locations)).filter_by(id=image_id) - - # filter out deleted images if context disallows it - if not force_show_deleted and not context.can_see_deleted: - query = query.filter_by(deleted=False) - - image = query.one() - - except sa_orm.exc.NoResultFound: - msg = "No image found with ID %s" % image_id - LOG.debug(msg) - raise exception.ImageNotFound(msg) - - # Make sure they can look at it - if not is_image_visible(context, image): - msg = "Forbidding request, image %s not visible" % image_id - LOG.debug(msg) - raise exception.Forbidden(msg) - - return image - - -def is_image_mutable(context, image): - """Return True if the image is mutable in this context.""" - # Is admin == image mutable - if context.is_admin: - return True - - # No owner == image not mutable - if image['owner'] is None or context.owner is None: - return False - - # Image only mutable by its owner - return image['owner'] == context.owner - - -def is_image_visible(context, image, status=None): - """Return True if the image is visible in this context.""" - return db_utils.is_image_visible(context, image, image_member_find, status) - - -def _get_default_column_value(column_type): - """Return the default value of the columns from DB table - - In postgreDB case, if no right default values are being set, an - psycopg2.DataError will be thrown. - """ - type_schema = { - 'datetime': None, - 'big_integer': 0, - 'integer': 0, - 'string': '' - } - - if isinstance(column_type, sa_sql.type_api.Variant): - return _get_default_column_value(column_type.impl) - - return type_schema[column_type.__visit_name__] - - -def _paginate_query(query, model, limit, sort_keys, marker=None, - sort_dir=None, sort_dirs=None): - """Returns a query with sorting / pagination criteria added. - - Pagination works by requiring a unique sort_key, specified by sort_keys. - (If sort_keys is not unique, then we risk looping through values.) - We use the last row in the previous page as the 'marker' for pagination. - So we must return values that follow the passed marker in the order. - With a single-valued sort_key, this would be easy: sort_key > X. - With a compound-values sort_key, (k1, k2, k3) we must do this to repeat - the lexicographical ordering: - (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) - - We also have to cope with different sort_directions. - - Typically, the id of the last row is used as the client-facing pagination - marker, then the actual marker object must be fetched from the db and - passed in to us as marker. - - :param query: the query object to which we should add paging/sorting - :param model: the ORM model class - :param limit: maximum number of items to return - :param sort_keys: array of attributes by which results should be sorted - :param marker: the last item of the previous page; we returns the next - results after this value. - :param sort_dir: direction in which results should be sorted (asc, desc) - :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys - - :rtype: sqlalchemy.orm.query.Query - :returns: The query with sorting/pagination added. - """ - - if 'id' not in sort_keys: - # TODO(justinsb): If this ever gives a false-positive, check - # the actual primary key, rather than assuming its id - LOG.warn(_LW('Id not in sort_keys; is sort_keys unique?')) - - assert(not (sort_dir and sort_dirs)) # nosec - # nosec: This function runs safely if the assertion fails. - - # Default the sort direction to ascending - if sort_dir is None: - sort_dir = 'asc' - - # Ensure a per-column sort direction - if sort_dirs is None: - sort_dirs = [sort_dir] * len(sort_keys) - - assert(len(sort_dirs) == len(sort_keys)) # nosec - # nosec: This function runs safely if the assertion fails. - if len(sort_dirs) < len(sort_keys): - sort_dirs += [sort_dir] * (len(sort_keys) - len(sort_dirs)) - - # Add sorting - for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): - sort_dir_func = { - 'asc': sqlalchemy.asc, - 'desc': sqlalchemy.desc, - }[current_sort_dir] - - try: - sort_key_attr = getattr(model, current_sort_key) - except AttributeError: - raise exception.InvalidSortKey() - query = query.order_by(sort_dir_func(sort_key_attr)) - - default = '' # Default to an empty string if NULL - - # Add pagination - if marker is not None: - marker_values = [] - for sort_key in sort_keys: - v = getattr(marker, sort_key) - if v is None: - v = default - marker_values.append(v) - - # Build up an array of sort criteria as in the docstring - criteria_list = [] - for i in range(len(sort_keys)): - crit_attrs = [] - for j in range(i): - model_attr = getattr(model, sort_keys[j]) - default = _get_default_column_value( - model_attr.property.columns[0].type) - attr = sa_sql.expression.case([(model_attr != None, - model_attr), ], - else_=default) - crit_attrs.append((attr == marker_values[j])) - - model_attr = getattr(model, sort_keys[i]) - default = _get_default_column_value( - model_attr.property.columns[0].type) - attr = sa_sql.expression.case([(model_attr != None, - model_attr), ], - else_=default) - if sort_dirs[i] == 'desc': - crit_attrs.append((attr < marker_values[i])) - elif sort_dirs[i] == 'asc': - crit_attrs.append((attr > marker_values[i])) - else: - raise ValueError(_("Unknown sort direction, " - "must be 'desc' or 'asc'")) - - criteria = sa_sql.and_(*crit_attrs) - criteria_list.append(criteria) - - f = sa_sql.or_(*criteria_list) - query = query.filter(f) - - if limit is not None: - query = query.limit(limit) - - return query - - -def _make_conditions_from_filters(filters, is_public=None): - # NOTE(venkatesh) make copy of the filters are to be altered in this - # method. - filters = filters.copy() - - image_conditions = [] - prop_conditions = [] - tag_conditions = [] - - if is_public is not None: - if is_public: - image_conditions.append(models.Image.visibility == 'public') - else: - image_conditions.append(models.Image.visibility != 'public') - - if 'checksum' in filters: - checksum = filters.pop('checksum') - image_conditions.append(models.Image.checksum == checksum) - - for (k, v) in filters.pop('properties', {}).items(): - prop_filters = _make_image_property_condition(key=k, value=v) - prop_conditions.append(prop_filters) - - if 'changes-since' in filters: - # normalize timestamp to UTC, as sqlalchemy doesn't appear to - # respect timezone offsets - changes_since = timeutils.normalize_time(filters.pop('changes-since')) - image_conditions.append(models.Image.updated_at > changes_since) - - if 'deleted' in filters: - deleted_filter = filters.pop('deleted') - image_conditions.append(models.Image.deleted == deleted_filter) - # TODO(bcwaldon): handle this logic in registry server - if not deleted_filter: - image_statuses = [s for s in STATUSES if s != 'killed'] - image_conditions.append(models.Image.status.in_(image_statuses)) - - if 'tags' in filters: - tags = filters.pop('tags') - for tag in tags: - tag_filters = [models.ImageTag.deleted == False] - tag_filters.extend([models.ImageTag.value == tag]) - tag_conditions.append(tag_filters) - - filters = {k: v for k, v in filters.items() if v is not None} - - # need to copy items because filters is modified in the loop body - # (filters.pop(k)) - keys = list(filters.keys()) - for k in keys: - key = k - if k.endswith('_min') or k.endswith('_max'): - key = key[0:-4] - try: - v = int(filters.pop(k)) - except ValueError: - msg = _("Unable to filter on a range " - "with a non-numeric value.") - raise exception.InvalidFilterRangeValue(msg) - - if k.endswith('_min'): - image_conditions.append(getattr(models.Image, key) >= v) - if k.endswith('_max'): - image_conditions.append(getattr(models.Image, key) <= v) - elif k in ['created_at', 'updated_at']: - attr_value = getattr(models.Image, key) - operator, isotime = utils.split_filter_op(filters.pop(k)) - try: - parsed_time = timeutils.parse_isotime(isotime) - threshold = timeutils.normalize_time(parsed_time) - except ValueError: - msg = (_("Bad \"%s\" query filter format. " - "Use ISO 8601 DateTime notation.") % k) - raise exception.InvalidParameterValue(msg) - - comparison = utils.evaluate_filter_op(attr_value, operator, - threshold) - image_conditions.append(comparison) - - elif k in ['name', 'id', 'status', 'container_format', 'disk_format']: - attr_value = getattr(models.Image, key) - operator, list_value = utils.split_filter_op(filters.pop(k)) - if operator == 'in': - threshold = utils.split_filter_value_for_quotes(list_value) - comparison = attr_value.in_(threshold) - image_conditions.append(comparison) - elif operator == 'eq': - image_conditions.append(attr_value == list_value) - else: - msg = (_("Unable to filter by unknown operator '%s'.") - % operator) - raise exception.InvalidFilterOperatorValue(msg) - - for (k, value) in filters.items(): - if hasattr(models.Image, k): - image_conditions.append(getattr(models.Image, k) == value) - else: - prop_filters = _make_image_property_condition(key=k, value=value) - prop_conditions.append(prop_filters) - - return image_conditions, prop_conditions, tag_conditions - - -def _make_image_property_condition(key, value): - prop_filters = [models.ImageProperty.deleted == False] - prop_filters.extend([models.ImageProperty.name == key]) - prop_filters.extend([models.ImageProperty.value == value]) - return prop_filters - - -def _select_images_query(context, image_conditions, admin_as_user, - member_status, visibility): - session = get_session() - - img_conditional_clause = sa_sql.and_(*image_conditions) - - regular_user = (not context.is_admin) or admin_as_user - - query_member = session.query(models.Image).join( - models.Image.members).filter(img_conditional_clause) - if regular_user: - member_filters = [models.ImageMember.deleted == False] - member_filters.extend([models.Image.visibility == 'shared']) - if context.owner is not None: - member_filters.extend([models.ImageMember.member == context.owner]) - if member_status != 'all': - member_filters.extend([ - models.ImageMember.status == member_status]) - query_member = query_member.filter(sa_sql.and_(*member_filters)) - - query_image = session.query(models.Image).filter(img_conditional_clause) - if regular_user: - visibility_filters = [ - models.Image.visibility == 'public', - models.Image.visibility == 'community', - ] - query_image = query_image .filter(sa_sql.or_(*visibility_filters)) - query_image_owner = None - if context.owner is not None: - query_image_owner = session.query(models.Image).filter( - models.Image.owner == context.owner).filter( - img_conditional_clause) - if query_image_owner is not None: - query = query_image.union(query_image_owner, query_member) - else: - query = query_image.union(query_member) - return query - else: - # Admin user - return query_image - - -def image_get_all(context, filters=None, marker=None, limit=None, - sort_key=None, sort_dir=None, - member_status='accepted', is_public=None, - admin_as_user=False, return_tag=False, v1_mode=False): - """ - Get all images that match zero or more filters. - - :param filters: dict of filter keys and values. If a 'properties' - key is present, it is treated as a dict of key/value - filters on the image properties attribute - :param marker: image id after which to start page - :param limit: maximum number of images to return - :param sort_key: list of image attributes by which results should be sorted - :param sort_dir: directions in which results should be sorted (asc, desc) - :param member_status: only return shared images that have this membership - status - :param is_public: If true, return only public images. If false, return - only private and shared images. - :param admin_as_user: For backwards compatibility. If true, then return to - an admin the equivalent set of images which it would see - if it was a regular user - :param return_tag: To indicates whether image entry in result includes it - relevant tag entries. This could improve upper-layer - query performance, to prevent using separated calls - :param v1_mode: If true, mutates the 'visibility' value of each image - into the v1-compatible field 'is_public' - """ - sort_key = ['created_at'] if not sort_key else sort_key - - default_sort_dir = 'desc' - - if not sort_dir: - sort_dir = [default_sort_dir] * len(sort_key) - elif len(sort_dir) == 1: - default_sort_dir = sort_dir[0] - sort_dir *= len(sort_key) - - filters = filters or {} - - visibility = filters.pop('visibility', None) - showing_deleted = 'changes-since' in filters or filters.get('deleted', - False) - - img_cond, prop_cond, tag_cond = _make_conditions_from_filters( - filters, is_public) - - query = _select_images_query(context, - img_cond, - admin_as_user, - member_status, - visibility) - if visibility is not None: - # with a visibility, we always and only include images with that - # visibility - query = query.filter(models.Image.visibility == visibility) - elif context.owner is None: - # without either a visibility or an owner, we never include - # 'community' images - query = query.filter(models.Image.visibility != 'community') - else: - # without a visibility and with an owner, we only want to include - # 'community' images if and only if they are owned by this owner - community_filters = [ - models.Image.owner == context.owner, - models.Image.visibility != 'community', - ] - query = query.filter(sa_sql.or_(*community_filters)) - - if prop_cond: - for prop_condition in prop_cond: - query = query.join(models.ImageProperty, aliased=True).filter( - sa_sql.and_(*prop_condition)) - - if tag_cond: - for tag_condition in tag_cond: - query = query.join(models.ImageTag, aliased=True).filter( - sa_sql.and_(*tag_condition)) - - marker_image = None - if marker is not None: - marker_image = _image_get(context, - marker, - force_show_deleted=showing_deleted) - - for key in ['created_at', 'id']: - if key not in sort_key: - sort_key.append(key) - sort_dir.append(default_sort_dir) - - query = _paginate_query(query, models.Image, limit, - sort_key, - marker=marker_image, - sort_dir=None, - sort_dirs=sort_dir) - - query = query.options(sa_orm.joinedload( - models.Image.properties)).options( - sa_orm.joinedload(models.Image.locations)) - if return_tag: - query = query.options(sa_orm.joinedload(models.Image.tags)) - - images = [] - for image in query.all(): - image_dict = image.to_dict() - image_dict = _normalize_locations(context, image_dict, - force_show_deleted=showing_deleted) - if return_tag: - image_dict = _normalize_tags(image_dict) - if v1_mode: - image_dict = db_utils.mutate_image_dict_to_v1(image_dict) - images.append(image_dict) - return images - - -def _drop_protected_attrs(model_class, values): - """ - Removed protected attributes from values dictionary using the models - __protected_attributes__ field. - """ - for attr in model_class.__protected_attributes__: - if attr in values: - del values[attr] - - -def _image_get_disk_usage_by_owner(owner, session, image_id=None): - query = session.query(models.Image) - query = query.filter(models.Image.owner == owner) - if image_id is not None: - query = query.filter(models.Image.id != image_id) - query = query.filter(models.Image.size > 0) - query = query.filter(~models.Image.status.in_(['killed', 'deleted'])) - images = query.all() - - total = 0 - for i in images: - locations = [l for l in i.locations if l['status'] != 'deleted'] - total += (i.size * len(locations)) - return total - - -def _validate_image(values, mandatory_status=True): - """ - Validates the incoming data and raises a Invalid exception - if anything is out of order. - - :param values: Mapping of image metadata to check - :param mandatory_status: Whether to validate status from values - """ - - if mandatory_status: - status = values.get('status') - if not status: - msg = "Image status is required." - raise exception.Invalid(msg) - - if status not in STATUSES: - msg = "Invalid image status '%s' for image." % status - raise exception.Invalid(msg) - - # validate integer values to eliminate DBError on save - _validate_db_int(min_disk=values.get('min_disk'), - min_ram=values.get('min_ram')) - - return values - - -def _update_values(image_ref, values): - for k in values: - if getattr(image_ref, k) != values[k]: - setattr(image_ref, k, values[k]) - - -@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, - stop_max_attempt_number=50) -@utils.no_4byte_params -def _image_update(context, values, image_id, purge_props=False, - from_state=None): - """ - Used internally by image_create and image_update - - :param context: Request context - :param values: A dict of attributes to set - :param image_id: If None, create the image, otherwise, find and update it - """ - - # NOTE(jbresnah) values is altered in this so a copy is needed - values = values.copy() - - session = get_session() - with session.begin(): - - # Remove the properties passed in the values mapping. We - # handle properties separately from base image attributes, - # and leaving properties in the values mapping will cause - # a SQLAlchemy model error because SQLAlchemy expects the - # properties attribute of an Image model to be a list and - # not a dict. - properties = values.pop('properties', {}) - - location_data = values.pop('locations', None) - - new_status = values.get('status') - if image_id: - image_ref = _image_get(context, image_id, session=session) - current = image_ref.status - # Perform authorization check - _check_mutate_authorization(context, image_ref) - else: - if values.get('size') is not None: - values['size'] = int(values['size']) - - if 'min_ram' in values: - values['min_ram'] = int(values['min_ram'] or 0) - - if 'min_disk' in values: - values['min_disk'] = int(values['min_disk'] or 0) - - values['protected'] = bool(values.get('protected', False)) - image_ref = models.Image() - - values = db_utils.ensure_image_dict_v2_compliant(values) - - # Need to canonicalize ownership - if 'owner' in values and not values['owner']: - values['owner'] = None - - if image_id: - # Don't drop created_at if we're passing it in... - _drop_protected_attrs(models.Image, values) - # NOTE(iccha-sethi): updated_at must be explicitly set in case - # only ImageProperty table was modifited - values['updated_at'] = timeutils.utcnow() - - if image_id: - query = session.query(models.Image).filter_by(id=image_id) - if from_state: - query = query.filter_by(status=from_state) - - mandatory_status = True if new_status else False - _validate_image(values, mandatory_status=mandatory_status) - - # Validate fields for Images table. This is similar to what is done - # for the query result update except that we need to do it prior - # in this case. - values = {key: values[key] for key in values - if key in image_ref.to_dict()} - updated = query.update(values, synchronize_session='fetch') - - if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) - raise exception.Conflict(msg) - - image_ref = _image_get(context, image_id, session=session) - else: - image_ref.update(values) - # Validate the attributes before we go any further. From my - # investigation, the @validates decorator does not validate - # on new records, only on existing records, which is, well, - # idiotic. - values = _validate_image(image_ref.to_dict()) - _update_values(image_ref, values) - - try: - image_ref.save(session=session) - except db_exception.DBDuplicateEntry: - raise exception.Duplicate("Image ID %s already exists!" - % values['id']) - - _set_properties_for_image(context, image_ref, properties, purge_props, - session) - - if location_data: - _image_locations_set(context, image_ref.id, location_data, - session=session) - - return image_get(context, image_ref.id) - - -@utils.no_4byte_params -def image_location_add(context, image_id, location, session=None): - deleted = location['status'] in ('deleted', 'pending_delete') - delete_time = timeutils.utcnow() if deleted else None - location_ref = models.ImageLocation(image_id=image_id, - value=location['url'], - meta_data=location['metadata'], - status=location['status'], - deleted=deleted, - deleted_at=delete_time) - session = session or get_session() - location_ref.save(session=session) - - -@utils.no_4byte_params -def image_location_update(context, image_id, location, session=None): - loc_id = location.get('id') - if loc_id is None: - msg = _("The location data has an invalid ID: %d") % loc_id - raise exception.Invalid(msg) - - try: - session = session or get_session() - location_ref = session.query(models.ImageLocation).filter_by( - id=loc_id).filter_by(image_id=image_id).one() - - deleted = location['status'] in ('deleted', 'pending_delete') - updated_time = timeutils.utcnow() - delete_time = updated_time if deleted else None - - location_ref.update({"value": location['url'], - "meta_data": location['metadata'], - "status": location['status'], - "deleted": deleted, - "updated_at": updated_time, - "deleted_at": delete_time}) - location_ref.save(session=session) - except sa_orm.exc.NoResultFound: - msg = (_("No location found with ID %(loc)s from image %(img)s") % - dict(loc=loc_id, img=image_id)) - LOG.warn(msg) - raise exception.NotFound(msg) - - -def image_location_delete(context, image_id, location_id, status, - delete_time=None, session=None): - if status not in ('deleted', 'pending_delete'): - msg = _("The status of deleted image location can only be set to " - "'pending_delete' or 'deleted'") - raise exception.Invalid(msg) - - try: - session = session or get_session() - location_ref = session.query(models.ImageLocation).filter_by( - id=location_id).filter_by(image_id=image_id).one() - - delete_time = delete_time or timeutils.utcnow() - - location_ref.update({"deleted": True, - "status": status, - "updated_at": delete_time, - "deleted_at": delete_time}) - location_ref.save(session=session) - except sa_orm.exc.NoResultFound: - msg = (_("No location found with ID %(loc)s from image %(img)s") % - dict(loc=location_id, img=image_id)) - LOG.warn(msg) - raise exception.NotFound(msg) - - -def _image_locations_set(context, image_id, locations, session=None): - # NOTE(zhiyan): 1. Remove records from DB for deleted locations - session = session or get_session() - query = session.query(models.ImageLocation).filter_by( - image_id=image_id).filter_by(deleted=False) - - loc_ids = [loc['id'] for loc in locations if loc.get('id')] - if loc_ids: - query = query.filter(~models.ImageLocation.id.in_(loc_ids)) - - for loc_id in [loc_ref.id for loc_ref in query.all()]: - image_location_delete(context, image_id, loc_id, 'deleted', - session=session) - - # NOTE(zhiyan): 2. Adding or update locations - for loc in locations: - if loc.get('id') is None: - image_location_add(context, image_id, loc, session=session) - else: - image_location_update(context, image_id, loc, session=session) - - -def _image_locations_delete_all(context, image_id, - delete_time=None, session=None): - """Delete all image locations for given image""" - session = session or get_session() - location_refs = session.query(models.ImageLocation).filter_by( - image_id=image_id).filter_by(deleted=False).all() - - for loc_id in [loc_ref.id for loc_ref in location_refs]: - image_location_delete(context, image_id, loc_id, 'deleted', - delete_time=delete_time, session=session) - - -@utils.no_4byte_params -def _set_properties_for_image(context, image_ref, properties, - purge_props=False, session=None): - """ - Create or update a set of image_properties for a given image - - :param context: Request context - :param image_ref: An Image object - :param properties: A dict of properties to set - :param session: A SQLAlchemy session to use (if present) - """ - orig_properties = {} - for prop_ref in image_ref.properties: - orig_properties[prop_ref.name] = prop_ref - - for name, value in six.iteritems(properties): - prop_values = {'image_id': image_ref.id, - 'name': name, - 'value': value} - if name in orig_properties: - prop_ref = orig_properties[name] - _image_property_update(context, prop_ref, prop_values, - session=session) - else: - image_property_create(context, prop_values, session=session) - - if purge_props: - for key in orig_properties.keys(): - if key not in properties: - prop_ref = orig_properties[key] - image_property_delete(context, prop_ref.name, - image_ref.id, session=session) - - -def _image_child_entry_delete_all(child_model_cls, image_id, delete_time=None, - session=None): - """Deletes all the child entries for the given image id. - - Deletes all the child entries of the given child entry ORM model class - using the parent image's id. - - The child entry ORM model class can be one of the following: - model.ImageLocation, model.ImageProperty, model.ImageMember and - model.ImageTag. - - :param child_model_cls: the ORM model class. - :param image_id: id of the image whose child entries are to be deleted. - :param delete_time: datetime of deletion to be set. - If None, uses current datetime. - :param session: A SQLAlchemy session to use (if present) - - :rtype: int - :returns: The number of child entries got soft-deleted. - """ - session = session or get_session() - - query = session.query(child_model_cls).filter_by( - image_id=image_id).filter_by(deleted=False) - - delete_time = delete_time or timeutils.utcnow() - - count = query.update({"deleted": True, "deleted_at": delete_time}) - return count - - -def image_property_create(context, values, session=None): - """Create an ImageProperty object.""" - prop_ref = models.ImageProperty() - prop = _image_property_update(context, prop_ref, values, session=session) - return prop.to_dict() - - -def _image_property_update(context, prop_ref, values, session=None): - """ - Used internally by image_property_create and image_property_update. - """ - _drop_protected_attrs(models.ImageProperty, values) - values["deleted"] = False - prop_ref.update(values) - prop_ref.save(session=session) - return prop_ref - - -def image_property_delete(context, prop_ref, image_ref, session=None): - """ - Used internally by image_property_create and image_property_update. - """ - session = session or get_session() - prop = session.query(models.ImageProperty).filter_by(image_id=image_ref, - name=prop_ref).one() - prop.delete(session=session) - return prop - - -def _image_property_delete_all(context, image_id, delete_time=None, - session=None): - """Delete all image properties for given image""" - props_updated_count = _image_child_entry_delete_all(models.ImageProperty, - image_id, - delete_time, - session) - return props_updated_count - - -def image_member_create(context, values, session=None): - """Create an ImageMember object.""" - memb_ref = models.ImageMember() - _image_member_update(context, memb_ref, values, session=session) - return _image_member_format(memb_ref) - - -def _image_member_format(member_ref): - """Format a member ref for consumption outside of this module.""" - return { - 'id': member_ref['id'], - 'image_id': member_ref['image_id'], - 'member': member_ref['member'], - 'can_share': member_ref['can_share'], - 'status': member_ref['status'], - 'created_at': member_ref['created_at'], - 'updated_at': member_ref['updated_at'], - 'deleted': member_ref['deleted'] - } - - -def image_member_update(context, memb_id, values): - """Update an ImageMember object.""" - session = get_session() - memb_ref = _image_member_get(context, memb_id, session) - _image_member_update(context, memb_ref, values, session) - return _image_member_format(memb_ref) - - -def _image_member_update(context, memb_ref, values, session=None): - """Apply supplied dictionary of values to a Member object.""" - _drop_protected_attrs(models.ImageMember, values) - values["deleted"] = False - values.setdefault('can_share', False) - memb_ref.update(values) - memb_ref.save(session=session) - return memb_ref - - -def image_member_delete(context, memb_id, session=None): - """Delete an ImageMember object.""" - session = session or get_session() - member_ref = _image_member_get(context, memb_id, session) - _image_member_delete(context, member_ref, session) - - -def _image_member_delete(context, memb_ref, session): - memb_ref.delete(session=session) - - -def _image_member_delete_all(context, image_id, delete_time=None, - session=None): - """Delete all image members for given image""" - members_updated_count = _image_child_entry_delete_all(models.ImageMember, - image_id, - delete_time, - session) - return members_updated_count - - -def _image_member_get(context, memb_id, session): - """Fetch an ImageMember entity by id.""" - query = session.query(models.ImageMember) - query = query.filter_by(id=memb_id) - return query.one() - - -def image_member_find(context, image_id=None, member=None, - status=None, include_deleted=False): - """Find all members that meet the given criteria. - - Note, currently include_deleted should be true only when create a new - image membership, as there may be a deleted image membership between - the same image and tenant, the membership will be reused in this case. - It should be false in other cases. - - :param image_id: identifier of image entity - :param member: tenant to which membership has been granted - :include_deleted: A boolean indicating whether the result should include - the deleted record of image member - """ - session = get_session() - members = _image_member_find(context, session, image_id, - member, status, include_deleted) - return [_image_member_format(m) for m in members] - - -def _image_member_find(context, session, image_id=None, - member=None, status=None, include_deleted=False): - query = session.query(models.ImageMember) - if not include_deleted: - query = query.filter_by(deleted=False) - - if not context.is_admin: - query = query.join(models.Image) - filters = [ - models.Image.owner == context.owner, - models.ImageMember.member == context.owner, - ] - query = query.filter(sa_sql.or_(*filters)) - - if image_id is not None: - query = query.filter(models.ImageMember.image_id == image_id) - if member is not None: - query = query.filter(models.ImageMember.member == member) - if status is not None: - query = query.filter(models.ImageMember.status == status) - - return query.all() - - -def image_member_count(context, image_id): - """Return the number of image members for this image - - :param image_id: identifier of image entity - """ - session = get_session() - - if not image_id: - msg = _("Image id is required.") - raise exception.Invalid(msg) - - query = session.query(models.ImageMember) - query = query.filter_by(deleted=False) - query = query.filter(models.ImageMember.image_id == str(image_id)) - - return query.count() - - -def image_tag_set_all(context, image_id, tags): - # NOTE(kragniz): tag ordering should match exactly what was provided, so a - # subsequent call to image_tag_get_all returns them in the correct order - - session = get_session() - existing_tags = image_tag_get_all(context, image_id, session) - - tags_created = [] - for tag in tags: - if tag not in tags_created and tag not in existing_tags: - tags_created.append(tag) - image_tag_create(context, image_id, tag, session) - - for tag in existing_tags: - if tag not in tags: - image_tag_delete(context, image_id, tag, session) - - -@utils.no_4byte_params -def image_tag_create(context, image_id, value, session=None): - """Create an image tag.""" - session = session or get_session() - tag_ref = models.ImageTag(image_id=image_id, value=value) - tag_ref.save(session=session) - return tag_ref['value'] - - -def image_tag_delete(context, image_id, value, session=None): - """Delete an image tag.""" - _check_image_id(image_id) - session = session or get_session() - query = session.query(models.ImageTag).filter_by( - image_id=image_id).filter_by( - value=value).filter_by(deleted=False) - try: - tag_ref = query.one() - except sa_orm.exc.NoResultFound: - raise exception.NotFound() - - tag_ref.delete(session=session) - - -def _image_tag_delete_all(context, image_id, delete_time=None, session=None): - """Delete all image tags for given image""" - tags_updated_count = _image_child_entry_delete_all(models.ImageTag, - image_id, - delete_time, - session) - return tags_updated_count - - -def image_tag_get_all(context, image_id, session=None): - """Get a list of tags for a specific image.""" - _check_image_id(image_id) - session = session or get_session() - tags = session.query(models.ImageTag.value).filter_by( - image_id=image_id).filter_by(deleted=False).all() - return [tag[0] for tag in tags] - - -class DeleteFromSelect(sa_sql.expression.UpdateBase): - def __init__(self, table, select, column): - self.table = table - self.select = select - self.column = column - - -# NOTE(abhishekk): MySQL doesn't yet support subquery with -# 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select. -@compiles(DeleteFromSelect) -def visit_delete_from_select(element, compiler, **kw): - return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % ( - compiler.process(element.table, asfrom=True), - compiler.process(element.column), - element.column.name, - compiler.process(element.select)) - - -def purge_deleted_rows(context, age_in_days, max_rows, session=None): - """Purges soft deleted rows - - Deletes rows of table images, table tasks and all dependent tables - according to given age for relevant models. - """ - # check max_rows for its maximum limit - _validate_db_int(max_rows=max_rows) - - session = session or get_session() - metadata = MetaData(get_engine()) - deleted_age = timeutils.utcnow() - datetime.timedelta(days=age_in_days) - - tables = [] - for model_class in models.__dict__.values(): - if not hasattr(model_class, '__tablename__'): - continue - if hasattr(model_class, 'deleted'): - tables.append(model_class.__tablename__) - # get rid of FK constraints - for tbl in ('images', 'tasks'): - try: - tables.remove(tbl) - except ValueError: - LOG.warning(_LW('Expected table %(tbl)s was not found in DB.'), - {'tbl': tbl}) - else: - tables.append(tbl) - - for tbl in tables: - tab = Table(tbl, metadata, autoload=True) - LOG.info( - _LI('Purging deleted rows older than %(age_in_days)d day(s) ' - 'from table %(tbl)s'), - {'age_in_days': age_in_days, 'tbl': tbl}) - - column = tab.c.id - deleted_at_column = tab.c.deleted_at - - query_delete = sql.select( - [column], deleted_at_column < deleted_age).order_by( - deleted_at_column).limit(max_rows) - - delete_statement = DeleteFromSelect(tab, query_delete, column) - - try: - with session.begin(): - result = session.execute(delete_statement) - except db_exception.DBReferenceError as ex: - with excutils.save_and_reraise_exception(): - LOG.error(_LE('DBError detected when purging from ' - "%(tablename)s: %(error)s"), - {'tablename': tbl, 'error': six.text_type(ex)}) - - rows = result.rowcount - LOG.info(_LI('Deleted %(rows)d row(s) from table %(tbl)s'), - {'rows': rows, 'tbl': tbl}) - - -def user_get_storage_usage(context, owner_id, image_id=None, session=None): - _check_image_id(image_id) - session = session or get_session() - total_size = _image_get_disk_usage_by_owner( - owner_id, session, image_id=image_id) - return total_size - - -def _task_info_format(task_info_ref): - """Format a task info ref for consumption outside of this module""" - if task_info_ref is None: - return {} - return { - 'task_id': task_info_ref['task_id'], - 'input': task_info_ref['input'], - 'result': task_info_ref['result'], - 'message': task_info_ref['message'], - } - - -def _task_info_create(context, task_id, values, session=None): - """Create an TaskInfo object""" - session = session or get_session() - task_info_ref = models.TaskInfo() - task_info_ref.task_id = task_id - task_info_ref.update(values) - task_info_ref.save(session=session) - return _task_info_format(task_info_ref) - - -def _task_info_update(context, task_id, values, session=None): - """Update an TaskInfo object""" - session = session or get_session() - task_info_ref = _task_info_get(context, task_id, session=session) - if task_info_ref: - task_info_ref.update(values) - task_info_ref.save(session=session) - return _task_info_format(task_info_ref) - - -def _task_info_get(context, task_id, session=None): - """Fetch an TaskInfo entity by task_id""" - session = session or get_session() - query = session.query(models.TaskInfo) - query = query.filter_by(task_id=task_id) - try: - task_info_ref = query.one() - except sa_orm.exc.NoResultFound: - LOG.debug("TaskInfo was not found for task with id %(task_id)s", - {'task_id': task_id}) - task_info_ref = None - - return task_info_ref - - -def task_create(context, values, session=None): - """Create a task object""" - - values = values.copy() - session = session or get_session() - with session.begin(): - task_info_values = _pop_task_info_values(values) - - task_ref = models.Task() - _task_update(context, task_ref, values, session=session) - - _task_info_create(context, - task_ref.id, - task_info_values, - session=session) - - return task_get(context, task_ref.id, session) - - -def _pop_task_info_values(values): - task_info_values = {} - for k, v in list(values.items()): - if k in ['input', 'result', 'message']: - values.pop(k) - task_info_values[k] = v - - return task_info_values - - -def task_update(context, task_id, values, session=None): - """Update a task object""" - - session = session or get_session() - - with session.begin(): - task_info_values = _pop_task_info_values(values) - - task_ref = _task_get(context, task_id, session) - _drop_protected_attrs(models.Task, values) - - values['updated_at'] = timeutils.utcnow() - - _task_update(context, task_ref, values, session) - - if task_info_values: - _task_info_update(context, - task_id, - task_info_values, - session) - - return task_get(context, task_id, session) - - -def task_get(context, task_id, session=None, force_show_deleted=False): - """Fetch a task entity by id""" - task_ref = _task_get(context, task_id, session=session, - force_show_deleted=force_show_deleted) - return _task_format(task_ref, task_ref.info) - - -def task_delete(context, task_id, session=None): - """Delete a task""" - session = session or get_session() - task_ref = _task_get(context, task_id, session=session) - task_ref.delete(session=session) - return _task_format(task_ref, task_ref.info) - - -def _task_soft_delete(context, session=None): - """Scrub task entities which are expired """ - expires_at = models.Task.expires_at - session = session or get_session() - query = session.query(models.Task) - - query = (query.filter(models.Task.owner == context.owner) - .filter_by(deleted=0) - .filter(expires_at <= timeutils.utcnow())) - values = {'deleted': 1, 'deleted_at': timeutils.utcnow()} - - with session.begin(): - query.update(values) - - -def task_get_all(context, filters=None, marker=None, limit=None, - sort_key='created_at', sort_dir='desc', admin_as_user=False): - """ - Get all tasks that match zero or more filters. - - :param filters: dict of filter keys and values. - :param marker: task id after which to start page - :param limit: maximum number of tasks to return - :param sort_key: task attribute by which results should be sorted - :param sort_dir: direction in which results should be sorted (asc, desc) - :param admin_as_user: For backwards compatibility. If true, then return to - an admin the equivalent set of tasks which it would see - if it were a regular user - :returns: tasks set - """ - filters = filters or {} - - session = get_session() - query = session.query(models.Task) - - if not (context.is_admin or admin_as_user) and context.owner is not None: - query = query.filter(models.Task.owner == context.owner) - - _task_soft_delete(context, session=session) - - showing_deleted = False - - if 'deleted' in filters: - deleted_filter = filters.pop('deleted') - query = query.filter_by(deleted=deleted_filter) - showing_deleted = deleted_filter - - for (k, v) in filters.items(): - if v is not None: - key = k - if hasattr(models.Task, key): - query = query.filter(getattr(models.Task, key) == v) - - marker_task = None - if marker is not None: - marker_task = _task_get(context, marker, - force_show_deleted=showing_deleted) - - sort_keys = ['created_at', 'id'] - if sort_key not in sort_keys: - sort_keys.insert(0, sort_key) - - query = _paginate_query(query, models.Task, limit, - sort_keys, - marker=marker_task, - sort_dir=sort_dir) - - task_refs = query.all() - - tasks = [] - for task_ref in task_refs: - tasks.append(_task_format(task_ref, task_info_ref=None)) - - return tasks - - -def _is_task_visible(context, task): - """Return True if the task is visible in this context.""" - # Is admin == task visible - if context.is_admin: - return True - - # No owner == task visible - if task['owner'] is None: - return True - - # Perform tests based on whether we have an owner - if context.owner is not None: - if context.owner == task['owner']: - return True - - return False - - -def _task_get(context, task_id, session=None, force_show_deleted=False): - """Fetch a task entity by id""" - session = session or get_session() - query = session.query(models.Task).options( - sa_orm.joinedload(models.Task.info) - ).filter_by(id=task_id) - - if not force_show_deleted and not context.can_see_deleted: - query = query.filter_by(deleted=False) - try: - task_ref = query.one() - except sa_orm.exc.NoResultFound: - LOG.debug("No task found with ID %s", task_id) - raise exception.TaskNotFound(task_id=task_id) - - # Make sure the task is visible - if not _is_task_visible(context, task_ref): - msg = "Forbidding request, task %s is not visible" % task_id - LOG.debug(msg) - raise exception.Forbidden(msg) - - return task_ref - - -def _task_update(context, task_ref, values, session=None): - """Apply supplied dictionary of values to a task object.""" - if 'deleted' not in values: - values["deleted"] = False - task_ref.update(values) - task_ref.save(session=session) - return task_ref - - -def _task_format(task_ref, task_info_ref=None): - """Format a task ref for consumption outside of this module""" - task_dict = { - 'id': task_ref['id'], - 'type': task_ref['type'], - 'status': task_ref['status'], - 'owner': task_ref['owner'], - 'expires_at': task_ref['expires_at'], - 'created_at': task_ref['created_at'], - 'updated_at': task_ref['updated_at'], - 'deleted_at': task_ref['deleted_at'], - 'deleted': task_ref['deleted'] - } - - if task_info_ref: - task_info_dict = { - 'input': task_info_ref['input'], - 'result': task_info_ref['result'], - 'message': task_info_ref['message'], - } - task_dict.update(task_info_dict) - - return task_dict - - -def metadef_namespace_get_all(context, marker=None, limit=None, sort_key=None, - sort_dir=None, filters=None, session=None): - """List all available namespaces.""" - session = session or get_session() - namespaces = metadef_namespace_api.get_all( - context, session, marker, limit, sort_key, sort_dir, filters) - return namespaces - - -def metadef_namespace_get(context, namespace_name, session=None): - """Get a namespace or raise if it does not exist or is not visible.""" - session = session or get_session() - return metadef_namespace_api.get( - context, namespace_name, session) - - -@utils.no_4byte_params -def metadef_namespace_create(context, values, session=None): - """Create a namespace or raise if it already exists.""" - session = session or get_session() - return metadef_namespace_api.create(context, values, session) - - -@utils.no_4byte_params -def metadef_namespace_update(context, namespace_id, namespace_dict, - session=None): - """Update a namespace or raise if it does not exist or not visible""" - session = session or get_session() - return metadef_namespace_api.update( - context, namespace_id, namespace_dict, session) - - -def metadef_namespace_delete(context, namespace_name, session=None): - """Delete the namespace and all foreign references""" - session = session or get_session() - return metadef_namespace_api.delete_cascade( - context, namespace_name, session) - - -def metadef_object_get_all(context, namespace_name, session=None): - """Get a metadata-schema object or raise if it does not exist.""" - session = session or get_session() - return metadef_object_api.get_all( - context, namespace_name, session) - - -def metadef_object_get(context, namespace_name, object_name, session=None): - """Get a metadata-schema object or raise if it does not exist.""" - session = session or get_session() - return metadef_object_api.get( - context, namespace_name, object_name, session) - - -@utils.no_4byte_params -def metadef_object_create(context, namespace_name, object_dict, - session=None): - """Create a metadata-schema object or raise if it already exists.""" - session = session or get_session() - return metadef_object_api.create( - context, namespace_name, object_dict, session) - - -@utils.no_4byte_params -def metadef_object_update(context, namespace_name, object_id, object_dict, - session=None): - """Update an object or raise if it does not exist or not visible.""" - session = session or get_session() - return metadef_object_api.update( - context, namespace_name, object_id, object_dict, session) - - -def metadef_object_delete(context, namespace_name, object_name, - session=None): - """Delete an object or raise if namespace or object doesn't exist.""" - session = session or get_session() - return metadef_object_api.delete( - context, namespace_name, object_name, session) - - -def metadef_object_delete_namespace_content( - context, namespace_name, session=None): - """Delete an object or raise if namespace or object doesn't exist.""" - session = session or get_session() - return metadef_object_api.delete_by_namespace_name( - context, namespace_name, session) - - -def metadef_object_count(context, namespace_name, session=None): - """Get count of properties for a namespace, raise if ns doesn't exist.""" - session = session or get_session() - return metadef_object_api.count(context, namespace_name, session) - - -def metadef_property_get_all(context, namespace_name, session=None): - """Get a metadef property or raise if it does not exist.""" - session = session or get_session() - return metadef_property_api.get_all(context, namespace_name, session) - - -def metadef_property_get(context, namespace_name, - property_name, session=None): - """Get a metadef property or raise if it does not exist.""" - session = session or get_session() - return metadef_property_api.get( - context, namespace_name, property_name, session) - - -@utils.no_4byte_params -def metadef_property_create(context, namespace_name, property_dict, - session=None): - """Create a metadef property or raise if it already exists.""" - session = session or get_session() - return metadef_property_api.create( - context, namespace_name, property_dict, session) - - -@utils.no_4byte_params -def metadef_property_update(context, namespace_name, property_id, - property_dict, session=None): - """Update an object or raise if it does not exist or not visible.""" - session = session or get_session() - return metadef_property_api.update( - context, namespace_name, property_id, property_dict, session) - - -def metadef_property_delete(context, namespace_name, property_name, - session=None): - """Delete a property or raise if it or namespace doesn't exist.""" - session = session or get_session() - return metadef_property_api.delete( - context, namespace_name, property_name, session) - - -def metadef_property_delete_namespace_content( - context, namespace_name, session=None): - """Delete a property or raise if it or namespace doesn't exist.""" - session = session or get_session() - return metadef_property_api.delete_by_namespace_name( - context, namespace_name, session) - - -def metadef_property_count(context, namespace_name, session=None): - """Get count of properties for a namespace, raise if ns doesn't exist.""" - session = session or get_session() - return metadef_property_api.count(context, namespace_name, session) - - -def metadef_resource_type_create(context, values, session=None): - """Create a resource_type""" - session = session or get_session() - return metadef_resource_type_api.create( - context, values, session) - - -def metadef_resource_type_get(context, resource_type_name, session=None): - """Get a resource_type""" - session = session or get_session() - return metadef_resource_type_api.get( - context, resource_type_name, session) - - -def metadef_resource_type_get_all(context, session=None): - """list all resource_types""" - session = session or get_session() - return metadef_resource_type_api.get_all(context, session) - - -def metadef_resource_type_delete(context, resource_type_name, session=None): - """Get a resource_type""" - session = session or get_session() - return metadef_resource_type_api.delete( - context, resource_type_name, session) - - -def metadef_resource_type_association_get( - context, namespace_name, resource_type_name, session=None): - session = session or get_session() - return metadef_association_api.get( - context, namespace_name, resource_type_name, session) - - -def metadef_resource_type_association_create( - context, namespace_name, values, session=None): - session = session or get_session() - return metadef_association_api.create( - context, namespace_name, values, session) - - -def metadef_resource_type_association_delete( - context, namespace_name, resource_type_name, session=None): - session = session or get_session() - return metadef_association_api.delete( - context, namespace_name, resource_type_name, session) - - -def metadef_resource_type_association_get_all_by_namespace( - context, namespace_name, session=None): - session = session or get_session() - return metadef_association_api.get_all_by_namespace( - context, namespace_name, session) - - -def metadef_tag_get_all( - context, namespace_name, filters=None, marker=None, limit=None, - sort_key=None, sort_dir=None, session=None): - """Get metadata-schema tags or raise if none exist.""" - session = session or get_session() - return metadef_tag_api.get_all( - context, namespace_name, session, - filters, marker, limit, sort_key, sort_dir) - - -def metadef_tag_get(context, namespace_name, name, session=None): - """Get a metadata-schema tag or raise if it does not exist.""" - session = session or get_session() - return metadef_tag_api.get( - context, namespace_name, name, session) - - -@utils.no_4byte_params -def metadef_tag_create(context, namespace_name, tag_dict, - session=None): - """Create a metadata-schema tag or raise if it already exists.""" - session = session or get_session() - return metadef_tag_api.create( - context, namespace_name, tag_dict, session) - - -def metadef_tag_create_tags(context, namespace_name, tag_list, - session=None): - """Create a metadata-schema tag or raise if it already exists.""" - session = get_session() - return metadef_tag_api.create_tags( - context, namespace_name, tag_list, session) - - -@utils.no_4byte_params -def metadef_tag_update(context, namespace_name, id, tag_dict, - session=None): - """Update an tag or raise if it does not exist or not visible.""" - session = session or get_session() - return metadef_tag_api.update( - context, namespace_name, id, tag_dict, session) - - -def metadef_tag_delete(context, namespace_name, name, - session=None): - """Delete an tag or raise if namespace or tag doesn't exist.""" - session = session or get_session() - return metadef_tag_api.delete( - context, namespace_name, name, session) - - -def metadef_tag_delete_namespace_content( - context, namespace_name, session=None): - """Delete an tag or raise if namespace or tag doesn't exist.""" - session = session or get_session() - return metadef_tag_api.delete_by_namespace_name( - context, namespace_name, session) - - -def metadef_tag_count(context, namespace_name, session=None): - """Get count of tags for a namespace, raise if ns doesn't exist.""" - session = session or get_session() - return metadef_tag_api.count(context, namespace_name, session) diff --git a/glance/db/sqlalchemy/metadata.py b/glance/db/sqlalchemy/metadata.py deleted file mode 100644 index e9fd5c29..00000000 --- a/glance/db/sqlalchemy/metadata.py +++ /dev/null @@ -1,506 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Copyright 2013 OpenStack Foundation -# Copyright 2013 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -from os.path import isfile -from os.path import join -import re - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -import six -import sqlalchemy -from sqlalchemy import and_ -from sqlalchemy.schema import MetaData -from sqlalchemy.sql import select - -from glance.common import timeutils -from glance.i18n import _, _LE, _LI, _LW - -LOG = logging.getLogger(__name__) - -metadata_opts = [ - cfg.StrOpt('metadata_source_path', - default='/etc/glance/metadefs/', - help=_(""" -Absolute path to the directory where JSON metadefs files are stored. - -Glance Metadata Definitions ("metadefs") are served from the database, -but are stored in files in the JSON format. The files in this -directory are used to initialize the metadefs in the database. -Additionally, when metadefs are exported from the database, the files -are written to this directory. - -NOTE: If you plan to export metadefs, make sure that this directory -has write permissions set for the user being used to run the -glance-api service. - -Possible values: - * String value representing a valid absolute pathname - -Related options: - * None - -""")), -] - -CONF = cfg.CONF -CONF.register_opts(metadata_opts) - - -def get_metadef_namespaces_table(meta): - return sqlalchemy.Table('metadef_namespaces', meta, autoload=True) - - -def get_metadef_resource_types_table(meta): - return sqlalchemy.Table('metadef_resource_types', meta, autoload=True) - - -def get_metadef_namespace_resource_types_table(meta): - return sqlalchemy.Table('metadef_namespace_resource_types', meta, - autoload=True) - - -def get_metadef_properties_table(meta): - return sqlalchemy.Table('metadef_properties', meta, autoload=True) - - -def get_metadef_objects_table(meta): - return sqlalchemy.Table('metadef_objects', meta, autoload=True) - - -def get_metadef_tags_table(meta): - return sqlalchemy.Table('metadef_tags', meta, autoload=True) - - -def _get_resource_type_id(meta, name): - rt_table = get_metadef_resource_types_table(meta) - resource_type = ( - select([rt_table.c.id]). - where(rt_table.c.name == name). - select_from(rt_table). - execute().fetchone()) - if resource_type: - return resource_type[0] - return None - - -def _get_resource_type(meta, resource_type_id): - rt_table = get_metadef_resource_types_table(meta) - return ( - rt_table.select(). - where(rt_table.c.id == resource_type_id). - execute().fetchone()) - - -def _get_namespace_resource_types(meta, namespace_id): - namespace_resource_types_table = ( - get_metadef_namespace_resource_types_table(meta)) - return ( - namespace_resource_types_table.select(). - where(namespace_resource_types_table.c.namespace_id == namespace_id). - execute().fetchall()) - - -def _get_namespace_resource_type_by_ids(meta, namespace_id, rt_id): - namespace_resource_types_table = ( - get_metadef_namespace_resource_types_table(meta)) - return ( - namespace_resource_types_table.select(). - where(and_( - namespace_resource_types_table.c.namespace_id == namespace_id, - namespace_resource_types_table.c.resource_type_id == rt_id)). - execute().fetchone()) - - -def _get_properties(meta, namespace_id): - properties_table = get_metadef_properties_table(meta) - return ( - properties_table.select(). - where(properties_table.c.namespace_id == namespace_id). - execute().fetchall()) - - -def _get_objects(meta, namespace_id): - objects_table = get_metadef_objects_table(meta) - return ( - objects_table.select(). - where(objects_table.c.namespace_id == namespace_id). - execute().fetchall()) - - -def _get_tags(meta, namespace_id): - tags_table = get_metadef_tags_table(meta) - return ( - tags_table.select(). - where(tags_table.c.namespace_id == namespace_id). - execute().fetchall()) - - -def _get_resource_id(table, namespace_id, resource_name): - resource = ( - select([table.c.id]). - where(and_(table.c.namespace_id == namespace_id, - table.c.name == resource_name)). - select_from(table). - execute().fetchone()) - if resource: - return resource[0] - return None - - -def _clear_metadata(meta): - metadef_tables = [get_metadef_properties_table(meta), - get_metadef_objects_table(meta), - get_metadef_tags_table(meta), - get_metadef_namespace_resource_types_table(meta), - get_metadef_namespaces_table(meta), - get_metadef_resource_types_table(meta)] - - for table in metadef_tables: - table.delete().execute() - LOG.info(_LI("Table %s has been cleared"), table) - - -def _clear_namespace_metadata(meta, namespace_id): - metadef_tables = [get_metadef_properties_table(meta), - get_metadef_objects_table(meta), - get_metadef_tags_table(meta), - get_metadef_namespace_resource_types_table(meta)] - namespaces_table = get_metadef_namespaces_table(meta) - - for table in metadef_tables: - table.delete().where(table.c.namespace_id == namespace_id).execute() - namespaces_table.delete().where( - namespaces_table.c.id == namespace_id).execute() - - -def _populate_metadata(meta, metadata_path=None, merge=False, - prefer_new=False, overwrite=False): - if not metadata_path: - metadata_path = CONF.metadata_source_path - - try: - if isfile(metadata_path): - json_schema_files = [metadata_path] - else: - json_schema_files = [f for f in os.listdir(metadata_path) - if isfile(join(metadata_path, f)) - and f.endswith('.json')] - except OSError as e: - LOG.error(encodeutils.exception_to_unicode(e)) - return - - if not json_schema_files: - LOG.error(_LE("Json schema files not found in %s. Aborting."), - metadata_path) - return - - namespaces_table = get_metadef_namespaces_table(meta) - namespace_rt_table = get_metadef_namespace_resource_types_table(meta) - objects_table = get_metadef_objects_table(meta) - tags_table = get_metadef_tags_table(meta) - properties_table = get_metadef_properties_table(meta) - resource_types_table = get_metadef_resource_types_table(meta) - - for json_schema_file in json_schema_files: - try: - file = join(metadata_path, json_schema_file) - with open(file) as json_file: - metadata = json.load(json_file) - except Exception as e: - LOG.error(_LE("Failed to parse json file %(file_path)s while " - "populating metadata due to: %(error_msg)s"), - {"file_path": file, - "error_msg": encodeutils.exception_to_unicode(e)}) - continue - - values = { - 'namespace': metadata.get('namespace'), - 'display_name': metadata.get('display_name'), - 'description': metadata.get('description'), - 'visibility': metadata.get('visibility'), - 'protected': metadata.get('protected'), - 'owner': metadata.get('owner', 'admin') - } - - db_namespace = select( - [namespaces_table.c.id] - ).where( - namespaces_table.c.namespace == values['namespace'] - ).select_from( - namespaces_table - ).execute().fetchone() - - if db_namespace and overwrite: - LOG.info(_LI("Overwriting namespace %s"), values['namespace']) - _clear_namespace_metadata(meta, db_namespace[0]) - db_namespace = None - - if not db_namespace: - values.update({'created_at': timeutils.utcnow()}) - _insert_data_to_db(namespaces_table, values) - - db_namespace = select( - [namespaces_table.c.id] - ).where( - namespaces_table.c.namespace == values['namespace'] - ).select_from( - namespaces_table - ).execute().fetchone() - elif not merge: - LOG.info(_LI("Skipping namespace %s. It already exists in the " - "database."), values['namespace']) - continue - elif prefer_new: - values.update({'updated_at': timeutils.utcnow()}) - _update_data_in_db(namespaces_table, values, - namespaces_table.c.id, db_namespace[0]) - - namespace_id = db_namespace[0] - - for resource_type in metadata.get('resource_type_associations', []): - rt_id = _get_resource_type_id(meta, resource_type['name']) - if not rt_id: - val = { - 'name': resource_type['name'], - 'created_at': timeutils.utcnow(), - 'protected': True - } - _insert_data_to_db(resource_types_table, val) - rt_id = _get_resource_type_id(meta, resource_type['name']) - elif prefer_new: - val = {'updated_at': timeutils.utcnow()} - _update_data_in_db(resource_types_table, val, - resource_types_table.c.id, rt_id) - - values = { - 'namespace_id': namespace_id, - 'resource_type_id': rt_id, - 'properties_target': resource_type.get( - 'properties_target'), - 'prefix': resource_type.get('prefix') - } - namespace_resource_type = _get_namespace_resource_type_by_ids( - meta, namespace_id, rt_id) - if not namespace_resource_type: - values.update({'created_at': timeutils.utcnow()}) - _insert_data_to_db(namespace_rt_table, values) - elif prefer_new: - values.update({'updated_at': timeutils.utcnow()}) - _update_rt_association(namespace_rt_table, values, - rt_id, namespace_id) - - for property, schema in six.iteritems(metadata.get('properties', - {})): - values = { - 'name': property, - 'namespace_id': namespace_id, - 'json_schema': json.dumps(schema) - } - property_id = _get_resource_id(properties_table, - namespace_id, property) - if not property_id: - values.update({'created_at': timeutils.utcnow()}) - _insert_data_to_db(properties_table, values) - elif prefer_new: - values.update({'updated_at': timeutils.utcnow()}) - _update_data_in_db(properties_table, values, - properties_table.c.id, property_id) - - for object in metadata.get('objects', []): - values = { - 'name': object['name'], - 'description': object.get('description'), - 'namespace_id': namespace_id, - 'json_schema': json.dumps( - object.get('properties')) - } - object_id = _get_resource_id(objects_table, namespace_id, - object['name']) - if not object_id: - values.update({'created_at': timeutils.utcnow()}) - _insert_data_to_db(objects_table, values) - elif prefer_new: - values.update({'updated_at': timeutils.utcnow()}) - _update_data_in_db(objects_table, values, - objects_table.c.id, object_id) - - for tag in metadata.get('tags', []): - values = { - 'name': tag.get('name'), - 'namespace_id': namespace_id, - } - tag_id = _get_resource_id(tags_table, namespace_id, tag['name']) - if not tag_id: - values.update({'created_at': timeutils.utcnow()}) - _insert_data_to_db(tags_table, values) - elif prefer_new: - values.update({'updated_at': timeutils.utcnow()}) - _update_data_in_db(tags_table, values, - tags_table.c.id, tag_id) - - LOG.info(_LI("File %s loaded to database."), file) - - LOG.info(_LI("Metadata loading finished")) - - -def _insert_data_to_db(table, values, log_exception=True): - try: - table.insert(values=values).execute() - except sqlalchemy.exc.IntegrityError: - if log_exception: - LOG.warning(_LW("Duplicate entry for values: %s"), values) - - -def _update_data_in_db(table, values, column, value): - try: - (table.update(values=values). - where(column == value).execute()) - except sqlalchemy.exc.IntegrityError: - LOG.warning(_LW("Duplicate entry for values: %s"), values) - - -def _update_rt_association(table, values, rt_id, namespace_id): - try: - (table.update(values=values). - where(and_(table.c.resource_type_id == rt_id, - table.c.namespace_id == namespace_id)).execute()) - except sqlalchemy.exc.IntegrityError: - LOG.warning(_LW("Duplicate entry for values: %s"), values) - - -def _export_data_to_file(meta, path): - if not path: - path = CONF.metadata_source_path - - namespace_table = get_metadef_namespaces_table(meta) - namespaces = namespace_table.select().execute().fetchall() - - pattern = re.compile('[\W_]+', re.UNICODE) - - for id, namespace in enumerate(namespaces, start=1): - namespace_id = namespace['id'] - namespace_file_name = pattern.sub('', namespace['display_name']) - - values = { - 'namespace': namespace['namespace'], - 'display_name': namespace['display_name'], - 'description': namespace['description'], - 'visibility': namespace['visibility'], - 'protected': namespace['protected'], - 'resource_type_associations': [], - 'properties': {}, - 'objects': [], - 'tags': [] - } - - namespace_resource_types = _get_namespace_resource_types(meta, - namespace_id) - db_objects = _get_objects(meta, namespace_id) - db_properties = _get_properties(meta, namespace_id) - db_tags = _get_tags(meta, namespace_id) - - resource_types = [] - for namespace_resource_type in namespace_resource_types: - resource_type = _get_resource_type( - meta, namespace_resource_type['resource_type_id']) - resource_types.append({ - 'name': resource_type['name'], - 'prefix': namespace_resource_type['prefix'], - 'properties_target': namespace_resource_type[ - 'properties_target'] - }) - values.update({ - 'resource_type_associations': resource_types - }) - - objects = [] - for object in db_objects: - objects.append({ - "name": object['name'], - "description": object['description'], - "properties": json.loads(object['json_schema']) - }) - values.update({ - 'objects': objects - }) - - properties = {} - for property in db_properties: - properties.update({ - property['name']: json.loads(property['json_schema']) - }) - values.update({ - 'properties': properties - }) - - tags = [] - for tag in db_tags: - tags.append({ - "name": tag['name'] - }) - values.update({ - 'tags': tags - }) - - try: - file_name = ''.join([path, namespace_file_name, '.json']) - if isfile(file_name): - LOG.info(_LI("Overwriting: %s"), file_name) - with open(file_name, 'w') as json_file: - json_file.write(json.dumps(values)) - except Exception as e: - LOG.exception(encodeutils.exception_to_unicode(e)) - LOG.info(_LI("Namespace %(namespace)s saved in %(file)s"), { - 'namespace': namespace_file_name, 'file': file_name}) - - -def db_load_metadefs(engine, metadata_path=None, merge=False, - prefer_new=False, overwrite=False): - meta = MetaData() - meta.bind = engine - - if not merge and (prefer_new or overwrite): - LOG.error(_LE("To use --prefer_new or --overwrite you need to combine " - "of these options with --merge option.")) - return - - if prefer_new and overwrite and merge: - LOG.error(_LE("Please provide no more than one option from this list: " - "--prefer_new, --overwrite")) - return - - _populate_metadata(meta, metadata_path, merge, prefer_new, overwrite) - - -def db_unload_metadefs(engine): - meta = MetaData() - meta.bind = engine - - _clear_metadata(meta) - - -def db_export_metadefs(engine, metadata_path=None): - meta = MetaData() - meta.bind = engine - - _export_data_to_file(meta, metadata_path) diff --git a/glance/db/sqlalchemy/metadef_api/__init__.py b/glance/db/sqlalchemy/metadef_api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/db/sqlalchemy/metadef_api/namespace.py b/glance/db/sqlalchemy/metadef_api/namespace.py deleted file mode 100644 index d0aac991..00000000 --- a/glance/db/sqlalchemy/metadef_api/namespace.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_db import exception as db_exc -from oslo_db.sqlalchemy.utils import paginate_query -from oslo_log import log as logging -import sqlalchemy.exc as sa_exc -from sqlalchemy import or_ -import sqlalchemy.orm as sa_orm - -from glance.common import exception as exc -import glance.db.sqlalchemy.metadef_api as metadef_api -from glance.db.sqlalchemy import models_metadef as models -from glance.i18n import _ - -LOG = logging.getLogger(__name__) - - -def _is_namespace_visible(context, namespace, status=None): - """Return True if the namespace is visible in this context.""" - - # Is admin == visible - if context.is_admin: - return True - - # No owner == visible - if namespace['owner'] is None: - return True - - # Is public == visible - if 'visibility' in namespace: - if namespace['visibility'] == 'public': - return True - - # context.owner has a value and is the namespace owner == visible - if context.owner is not None: - if context.owner == namespace['owner']: - return True - - # Private - return False - - -def _select_namespaces_query(context, session): - """Build the query to get all namespaces based on the context""" - - LOG.debug("context.is_admin=%(is_admin)s; context.owner=%(owner)s", - {'is_admin': context.is_admin, 'owner': context.owner}) - - # If admin, return everything. - query_ns = session.query(models.MetadefNamespace) - if context.is_admin: - return query_ns - else: - # If regular user, return only public namespaces. - # However, if context.owner has a value, return both - # public and private namespaces of the context.owner. - if context.owner is not None: - query = ( - query_ns.filter( - or_(models.MetadefNamespace.owner == context.owner, - models.MetadefNamespace.visibility == 'public'))) - else: - query = query_ns.filter( - models.MetadefNamespace.visibility == 'public') - return query - - -def _get(context, namespace_id, session): - """Get a namespace by id, raise if not found""" - - try: - query = session.query(models.MetadefNamespace).filter_by( - id=namespace_id) - namespace_rec = query.one() - except sa_orm.exc.NoResultFound: - msg = (_("Metadata definition namespace not found for id=%s") - % namespace_id) - LOG.warn(msg) - raise exc.MetadefNamespaceNotFound(msg) - - # Make sure they are allowed to view it. - if not _is_namespace_visible(context, namespace_rec.to_dict()): - LOG.debug("Forbidding request, metadata definition namespace=%s" - " is not visible.", namespace_rec.namespace) - emsg = _("Forbidding request, metadata definition namespace=%s" - " is not visible.") % namespace_rec.namespace - raise exc.MetadefForbidden(emsg) - - return namespace_rec - - -def _get_by_name(context, name, session): - """Get a namespace by name, raise if not found""" - - try: - query = session.query(models.MetadefNamespace).filter_by( - namespace=name) - namespace_rec = query.one() - except sa_orm.exc.NoResultFound: - LOG.debug("Metadata definition namespace=%s was not found.", name) - raise exc.MetadefNamespaceNotFound(namespace_name=name) - - # Make sure they are allowed to view it. - if not _is_namespace_visible(context, namespace_rec.to_dict()): - LOG.debug("Forbidding request, metadata definition namespace=%s" - " is not visible.", name) - emsg = _("Forbidding request, metadata definition namespace=%s" - " is not visible.") % name - raise exc.MetadefForbidden(emsg) - - return namespace_rec - - -def _get_all(context, session, filters=None, marker=None, - limit=None, sort_key='created_at', sort_dir='desc'): - """Get all namespaces that match zero or more filters. - - :param filters: dict of filter keys and values. - :param marker: namespace id after which to start page - :param limit: maximum number of namespaces to return - :param sort_key: namespace attribute by which results should be sorted - :param sort_dir: direction in which results should be sorted (asc, desc) - """ - - filters = filters or {} - - query = _select_namespaces_query(context, session) - - # if visibility filter, apply it to the context based query - visibility = filters.pop('visibility', None) - if visibility is not None: - query = query.filter(models.MetadefNamespace.visibility == visibility) - - # if id_list filter, apply it to the context based query - id_list = filters.pop('id_list', None) - if id_list is not None: - query = query.filter(models.MetadefNamespace.id.in_(id_list)) - - marker_namespace = None - if marker is not None: - marker_namespace = _get(context, marker, session) - - sort_keys = ['created_at', 'id'] - sort_keys.insert(0, sort_key) if sort_key not in sort_keys else sort_keys - - query = paginate_query(query=query, - model=models.MetadefNamespace, - limit=limit, - sort_keys=sort_keys, - marker=marker_namespace, sort_dir=sort_dir) - - return query.all() - - -def _get_all_by_resource_types(context, session, filters, marker=None, - limit=None, sort_key=None, sort_dir=None): - """get all visible namespaces for the specified resource_types""" - - resource_types = filters['resource_types'] - resource_type_list = resource_types.split(',') - db_recs = ( - session.query(models.MetadefResourceType) - .join(models.MetadefResourceType.associations) - .filter(models.MetadefResourceType.name.in_(resource_type_list)) - .values(models.MetadefResourceType.name, - models.MetadefNamespaceResourceType.namespace_id) - ) - - namespace_id_list = [] - for name, namespace_id in db_recs: - namespace_id_list.append(namespace_id) - - if len(namespace_id_list) is 0: - return [] - - filters2 = filters - filters2.update({'id_list': namespace_id_list}) - - return _get_all(context, session, filters2, - marker, limit, sort_key, sort_dir) - - -def get_all(context, session, marker=None, limit=None, - sort_key=None, sort_dir=None, filters=None): - """List all visible namespaces""" - - namespaces = [] - filters = filters or {} - - if 'resource_types' in filters: - namespaces = _get_all_by_resource_types( - context, session, filters, marker, limit, sort_key, sort_dir) - else: - namespaces = _get_all( - context, session, filters, marker, limit, sort_key, sort_dir) - - return [ns.to_dict() for ns in namespaces] - - -def get(context, name, session): - """Get a namespace by name, raise if not found""" - namespace_rec = _get_by_name(context, name, session) - return namespace_rec.to_dict() - - -def create(context, values, session): - """Create a namespace, raise if namespace already exists.""" - - namespace_name = values['namespace'] - namespace = models.MetadefNamespace() - metadef_api.utils.drop_protected_attrs(models.MetadefNamespace, values) - namespace.update(values.copy()) - try: - namespace.save(session=session) - except db_exc.DBDuplicateEntry: - LOG.debug("Can not create the metadata definition namespace." - " Namespace=%s already exists.", namespace_name) - raise exc.MetadefDuplicateNamespace( - namespace_name=namespace_name) - - return namespace.to_dict() - - -def update(context, namespace_id, values, session): - """Update a namespace, raise if not found/visible or duplicate result""" - - namespace_rec = _get(context, namespace_id, session) - metadef_api.utils.drop_protected_attrs(models.MetadefNamespace, values) - - try: - namespace_rec.update(values.copy()) - namespace_rec.save(session=session) - except db_exc.DBDuplicateEntry: - LOG.debug("Invalid update. It would result in a duplicate" - " metadata definition namespace with the same name of %s", - values['namespace']) - emsg = (_("Invalid update. It would result in a duplicate" - " metadata definition namespace with the same name of %s") - % values['namespace']) - raise exc.MetadefDuplicateNamespace(emsg) - - return namespace_rec.to_dict() - - -def delete(context, name, session): - """Raise if not found, has references or not visible""" - - namespace_rec = _get_by_name(context, name, session) - try: - session.delete(namespace_rec) - session.flush() - except db_exc.DBError as e: - if isinstance(e.inner_exception, sa_exc.IntegrityError): - LOG.debug("Metadata definition namespace=%s not deleted. " - "Other records still refer to it.", name) - raise exc.MetadefIntegrityError( - record_type='namespace', record_name=name) - else: - raise - - return namespace_rec.to_dict() - - -def delete_cascade(context, name, session): - """Raise if not found, has references or not visible""" - - namespace_rec = _get_by_name(context, name, session) - with session.begin(): - try: - metadef_api.tag.delete_namespace_content( - context, namespace_rec.id, session) - metadef_api.object.delete_namespace_content( - context, namespace_rec.id, session) - metadef_api.property.delete_namespace_content( - context, namespace_rec.id, session) - metadef_api.resource_type_association.delete_namespace_content( - context, namespace_rec.id, session) - session.delete(namespace_rec) - session.flush() - except db_exc.DBError as e: - if isinstance(e.inner_exception, sa_exc.IntegrityError): - LOG.debug("Metadata definition namespace=%s not deleted. " - "Other records still refer to it.", name) - raise exc.MetadefIntegrityError( - record_type='namespace', record_name=name) - else: - raise - - return namespace_rec.to_dict() diff --git a/glance/db/sqlalchemy/metadef_api/object.py b/glance/db/sqlalchemy/metadef_api/object.py deleted file mode 100644 index 48862522..00000000 --- a/glance/db/sqlalchemy/metadef_api/object.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_db import exception as db_exc -from oslo_log import log as logging -from sqlalchemy import func -import sqlalchemy.orm as sa_orm - -from glance.common import exception as exc -from glance.db.sqlalchemy.metadef_api import namespace as namespace_api -import glance.db.sqlalchemy.metadef_api.utils as metadef_utils -from glance.db.sqlalchemy import models_metadef as models -from glance.i18n import _ - -LOG = logging.getLogger(__name__) - - -def _get(context, object_id, session): - try: - query = session.query(models.MetadefObject).filter_by(id=object_id) - metadef_object = query.one() - except sa_orm.exc.NoResultFound: - msg = (_("Metadata definition object not found for id=%s") - % object_id) - LOG.warn(msg) - raise exc.MetadefObjectNotFound(msg) - - return metadef_object - - -def _get_by_name(context, namespace_name, name, session): - namespace = namespace_api.get(context, namespace_name, session) - try: - query = session.query(models.MetadefObject).filter_by( - name=name, namespace_id=namespace['id']) - metadef_object = query.one() - except sa_orm.exc.NoResultFound: - LOG.debug("The metadata definition object with name=%(name)s" - " was not found in namespace=%(namespace_name)s.", - {'name': name, 'namespace_name': namespace_name}) - raise exc.MetadefObjectNotFound(object_name=name, - namespace_name=namespace_name) - - return metadef_object - - -def get_all(context, namespace_name, session): - namespace = namespace_api.get(context, namespace_name, session) - query = session.query(models.MetadefObject).filter_by( - namespace_id=namespace['id']) - md_objects = query.all() - - md_objects_list = [] - for obj in md_objects: - md_objects_list.append(obj.to_dict()) - return md_objects_list - - -def create(context, namespace_name, values, session): - namespace = namespace_api.get(context, namespace_name, session) - values.update({'namespace_id': namespace['id']}) - - md_object = models.MetadefObject() - metadef_utils.drop_protected_attrs(models.MetadefObject, values) - md_object.update(values.copy()) - try: - md_object.save(session=session) - except db_exc.DBDuplicateEntry: - LOG.debug("A metadata definition object with name=%(name)s" - " in namespace=%(namespace_name)s already exists.", - {'name': md_object.name, - 'namespace_name': namespace_name}) - raise exc.MetadefDuplicateObject( - object_name=md_object.name, namespace_name=namespace_name) - - return md_object.to_dict() - - -def get(context, namespace_name, name, session): - md_object = _get_by_name(context, namespace_name, name, session) - - return md_object.to_dict() - - -def update(context, namespace_name, object_id, values, session): - """Update an object, raise if ns not found/visible or duplicate result""" - namespace_api.get(context, namespace_name, session) - - md_object = _get(context, object_id, session) - metadef_utils.drop_protected_attrs(models.MetadefObject, values) - # values['updated_at'] = timeutils.utcnow() - done by TS mixin - try: - md_object.update(values.copy()) - md_object.save(session=session) - except db_exc.DBDuplicateEntry: - LOG.debug("Invalid update. It would result in a duplicate" - " metadata definition object with same name=%(name)s" - " in namespace=%(namespace_name)s.", - {'name': md_object.name, 'namespace_name': namespace_name}) - emsg = (_("Invalid update. It would result in a duplicate" - " metadata definition object with the same name=%(name)s" - " in namespace=%(namespace_name)s.") - % {'name': md_object.name, 'namespace_name': namespace_name}) - raise exc.MetadefDuplicateObject(emsg) - - return md_object.to_dict() - - -def delete(context, namespace_name, object_name, session): - namespace_api.get(context, namespace_name, session) - md_object = _get_by_name(context, namespace_name, object_name, session) - - session.delete(md_object) - session.flush() - - return md_object.to_dict() - - -def delete_namespace_content(context, namespace_id, session): - """Use this def only if the ns for the id has been verified as visible""" - - count = 0 - query = session.query(models.MetadefObject).filter_by( - namespace_id=namespace_id) - count = query.delete(synchronize_session='fetch') - return count - - -def delete_by_namespace_name(context, namespace_name, session): - namespace = namespace_api.get(context, namespace_name, session) - return delete_namespace_content(context, namespace['id'], session) - - -def count(context, namespace_name, session): - """Get the count of objects for a namespace, raise if ns not found""" - namespace = namespace_api.get(context, namespace_name, session) - - query = session.query(func.count(models.MetadefObject.id)).filter_by( - namespace_id=namespace['id']) - return query.scalar() diff --git a/glance/db/sqlalchemy/metadef_api/property.py b/glance/db/sqlalchemy/metadef_api/property.py deleted file mode 100644 index eba5c0fc..00000000 --- a/glance/db/sqlalchemy/metadef_api/property.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_db import exception as db_exc -from oslo_log import log as logging -from sqlalchemy import func -import sqlalchemy.orm as sa_orm - -from glance.common import exception as exc -from glance.db.sqlalchemy.metadef_api import namespace as namespace_api -from glance.db.sqlalchemy.metadef_api import utils as metadef_utils -from glance.db.sqlalchemy import models_metadef as models -from glance.i18n import _ - -LOG = logging.getLogger(__name__) - - -def _get(context, property_id, session): - - try: - query = session.query(models.MetadefProperty).filter_by(id=property_id) - property_rec = query.one() - - except sa_orm.exc.NoResultFound: - msg = (_("Metadata definition property not found for id=%s") - % property_id) - LOG.warn(msg) - raise exc.MetadefPropertyNotFound(msg) - - return property_rec - - -def _get_by_name(context, namespace_name, name, session): - """get a property; raise if ns not found/visible or property not found""" - - namespace = namespace_api.get(context, namespace_name, session) - try: - query = session.query(models.MetadefProperty).filter_by( - name=name, namespace_id=namespace['id']) - property_rec = query.one() - - except sa_orm.exc.NoResultFound: - LOG.debug("The metadata definition property with name=%(name)s" - " was not found in namespace=%(namespace_name)s.", - {'name': name, 'namespace_name': namespace_name}) - raise exc.MetadefPropertyNotFound(property_name=name, - namespace_name=namespace_name) - - return property_rec - - -def get(context, namespace_name, name, session): - """get a property; raise if ns not found/visible or property not found""" - - property_rec = _get_by_name(context, namespace_name, name, session) - return property_rec.to_dict() - - -def get_all(context, namespace_name, session): - namespace = namespace_api.get(context, namespace_name, session) - query = session.query(models.MetadefProperty).filter_by( - namespace_id=namespace['id']) - properties = query.all() - - properties_list = [] - for prop in properties: - properties_list.append(prop.to_dict()) - return properties_list - - -def create(context, namespace_name, values, session): - namespace = namespace_api.get(context, namespace_name, session) - values.update({'namespace_id': namespace['id']}) - - property_rec = models.MetadefProperty() - metadef_utils.drop_protected_attrs(models.MetadefProperty, values) - property_rec.update(values.copy()) - - try: - property_rec.save(session=session) - except db_exc.DBDuplicateEntry: - LOG.debug("Can not create metadata definition property. A property" - " with name=%(name)s already exists in" - " namespace=%(namespace_name)s.", - {'name': property_rec.name, - 'namespace_name': namespace_name}) - raise exc.MetadefDuplicateProperty( - property_name=property_rec.name, - namespace_name=namespace_name) - - return property_rec.to_dict() - - -def update(context, namespace_name, property_id, values, session): - """Update a property, raise if ns not found/visible or duplicate result""" - - namespace_api.get(context, namespace_name, session) - property_rec = _get(context, property_id, session) - metadef_utils.drop_protected_attrs(models.MetadefProperty, values) - # values['updated_at'] = timeutils.utcnow() - done by TS mixin - try: - property_rec.update(values.copy()) - property_rec.save(session=session) - except db_exc.DBDuplicateEntry: - LOG.debug("Invalid update. It would result in a duplicate" - " metadata definition property with the same name=%(name)s" - " in namespace=%(namespace_name)s.", - {'name': property_rec.name, - 'namespace_name': namespace_name}) - emsg = (_("Invalid update. It would result in a duplicate" - " metadata definition property with the same name=%(name)s" - " in namespace=%(namespace_name)s.") - % {'name': property_rec.name, - 'namespace_name': namespace_name}) - raise exc.MetadefDuplicateProperty(emsg) - - return property_rec.to_dict() - - -def delete(context, namespace_name, property_name, session): - property_rec = _get_by_name( - context, namespace_name, property_name, session) - if property_rec: - session.delete(property_rec) - session.flush() - - return property_rec.to_dict() - - -def delete_namespace_content(context, namespace_id, session): - """Use this def only if the ns for the id has been verified as visible""" - - count = 0 - query = session.query(models.MetadefProperty).filter_by( - namespace_id=namespace_id) - count = query.delete(synchronize_session='fetch') - return count - - -def delete_by_namespace_name(context, namespace_name, session): - namespace = namespace_api.get(context, namespace_name, session) - return delete_namespace_content(context, namespace['id'], session) - - -def count(context, namespace_name, session): - """Get the count of properties for a namespace, raise if ns not found""" - - namespace = namespace_api.get(context, namespace_name, session) - - query = session.query(func.count(models.MetadefProperty.id)).filter_by( - namespace_id=namespace['id']) - return query.scalar() diff --git a/glance/db/sqlalchemy/metadef_api/resource_type.py b/glance/db/sqlalchemy/metadef_api/resource_type.py deleted file mode 100644 index 2b970e3c..00000000 --- a/glance/db/sqlalchemy/metadef_api/resource_type.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_db import exception as db_exc -from oslo_log import log as logging -import sqlalchemy.exc as sa_exc -import sqlalchemy.orm as sa_orm - -from glance.common import exception as exc -import glance.db.sqlalchemy.metadef_api.utils as metadef_utils -from glance.db.sqlalchemy import models_metadef as models - -LOG = logging.getLogger(__name__) - - -def get(context, name, session): - """Get a resource type, raise if not found""" - - try: - query = session.query(models.MetadefResourceType).filter_by(name=name) - resource_type = query.one() - except sa_orm.exc.NoResultFound: - LOG.debug("No metadata definition resource-type found with name %s", - name) - raise exc.MetadefResourceTypeNotFound(resource_type_name=name) - - return resource_type.to_dict() - - -def get_all(context, session): - """Get a list of all resource types""" - - query = session.query(models.MetadefResourceType) - resource_types = query.all() - - resource_types_list = [] - for rt in resource_types: - resource_types_list.append(rt.to_dict()) - - return resource_types_list - - -def create(context, values, session): - """Create a resource_type, raise if it already exists.""" - - resource_type = models.MetadefResourceType() - metadef_utils.drop_protected_attrs(models.MetadefResourceType, values) - resource_type.update(values.copy()) - try: - resource_type.save(session=session) - except db_exc.DBDuplicateEntry: - LOG.debug("Can not create the metadata definition resource-type. " - "A resource-type with name=%s already exists.", - resource_type.name) - raise exc.MetadefDuplicateResourceType( - resource_type_name=resource_type.name) - - return resource_type.to_dict() - - -def update(context, values, session): - """Update a resource type, raise if not found""" - - name = values['name'] - metadef_utils.drop_protected_attrs(models.MetadefResourceType, values) - db_rec = get(context, name, session) - db_rec.update(values.copy()) - db_rec.save(session=session) - - return db_rec.to_dict() - - -def delete(context, name, session): - """Delete a resource type or raise if not found or is protected""" - - db_rec = get(context, name, session) - if db_rec.protected is True: - LOG.debug("Delete forbidden. Metadata definition resource-type %s is a" - " seeded-system type and can not be deleted.", name) - raise exc.ProtectedMetadefResourceTypeSystemDelete( - resource_type_name=name) - - try: - session.delete(db_rec) - session.flush() - except db_exc.DBError as e: - if isinstance(e.inner_exception, sa_exc.IntegrityError): - LOG.debug("Could not delete Metadata definition resource-type %s" - ". It still has content", name) - raise exc.MetadefIntegrityError( - record_type='resource-type', record_name=name) - else: - raise - - return db_rec.to_dict() diff --git a/glance/db/sqlalchemy/metadef_api/resource_type_association.py b/glance/db/sqlalchemy/metadef_api/resource_type_association.py deleted file mode 100644 index 420c9cd7..00000000 --- a/glance/db/sqlalchemy/metadef_api/resource_type_association.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_db import exception as db_exc -from oslo_log import log as logging -import sqlalchemy.orm as sa_orm - -from glance.common import exception as exc -from glance.db.sqlalchemy.metadef_api import namespace as namespace_api -from glance.db.sqlalchemy.metadef_api import resource_type as resource_type_api -from glance.db.sqlalchemy.metadef_api import utils as metadef_utils -from glance.db.sqlalchemy import models_metadef as models - -LOG = logging.getLogger(__name__) - - -def _to_db_dict(namespace_id, resource_type_id, model_dict): - """transform a model dict to a metadef_namespace_resource_type dict""" - db_dict = {'namespace_id': namespace_id, - 'resource_type_id': resource_type_id, - 'properties_target': model_dict['properties_target'], - 'prefix': model_dict['prefix']} - return db_dict - - -def _to_model_dict(resource_type_name, ns_res_type_dict): - """transform a metadef_namespace_resource_type dict to a model dict""" - model_dict = {'name': resource_type_name, - 'properties_target': ns_res_type_dict['properties_target'], - 'prefix': ns_res_type_dict['prefix'], - 'created_at': ns_res_type_dict['created_at'], - 'updated_at': ns_res_type_dict['updated_at']} - return model_dict - - -def _set_model_dict(resource_type_name, properties_target, prefix, - created_at, updated_at): - """return a model dict set with the passed in key values""" - model_dict = {'name': resource_type_name, - 'properties_target': properties_target, - 'prefix': prefix, - 'created_at': created_at, - 'updated_at': updated_at} - return model_dict - - -def _get(context, namespace_name, resource_type_name, - namespace_id, resource_type_id, session): - """Get a namespace resource_type association""" - - # visibility check assumed done in calling routine via namespace_get - try: - query = session.query(models.MetadefNamespaceResourceType).filter_by( - namespace_id=namespace_id, resource_type_id=resource_type_id) - db_rec = query.one() - except sa_orm.exc.NoResultFound: - LOG.debug("The metadata definition resource-type association of" - " resource_type=%(resource_type_name)s to" - " namespace_name=%(namespace_name)s was not found.", - {'resource_type_name': resource_type_name, - 'namespace_name': namespace_name}) - raise exc.MetadefResourceTypeAssociationNotFound( - resource_type_name=resource_type_name, - namespace_name=namespace_name) - - return db_rec - - -def _create_association( - context, namespace_name, resource_type_name, values, session): - """Create an association, raise if it already exists.""" - - namespace_resource_type_rec = models.MetadefNamespaceResourceType() - metadef_utils.drop_protected_attrs( - models.MetadefNamespaceResourceType, values) - # values['updated_at'] = timeutils.utcnow() # TS mixin should do this - namespace_resource_type_rec.update(values.copy()) - try: - namespace_resource_type_rec.save(session=session) - except db_exc.DBDuplicateEntry: - LOG.debug("The metadata definition resource-type association of" - " resource_type=%(resource_type_name)s to" - " namespace=%(namespace_name)s, already exists.", - {'resource_type_name': resource_type_name, - 'namespace_name': namespace_name}) - raise exc.MetadefDuplicateResourceTypeAssociation( - resource_type_name=resource_type_name, - namespace_name=namespace_name) - - return namespace_resource_type_rec.to_dict() - - -def _delete(context, namespace_name, resource_type_name, - namespace_id, resource_type_id, session): - """Delete a resource type association or raise if not found.""" - - db_rec = _get(context, namespace_name, resource_type_name, - namespace_id, resource_type_id, session) - session.delete(db_rec) - session.flush() - - return db_rec.to_dict() - - -def get(context, namespace_name, resource_type_name, session): - """Get a resource_type associations; raise if not found""" - namespace = namespace_api.get( - context, namespace_name, session) - - resource_type = resource_type_api.get( - context, resource_type_name, session) - - found = _get(context, namespace_name, resource_type_name, - namespace['id'], resource_type['id'], session) - - return _to_model_dict(resource_type_name, found) - - -def get_all_by_namespace(context, namespace_name, session): - """List resource_type associations by namespace, raise if not found""" - - # namespace get raises an exception if not visible - namespace = namespace_api.get( - context, namespace_name, session) - - db_recs = ( - session.query(models.MetadefResourceType) - .join(models.MetadefResourceType.associations) - .filter_by(namespace_id=namespace['id']) - .values(models.MetadefResourceType.name, - models.MetadefNamespaceResourceType.properties_target, - models.MetadefNamespaceResourceType.prefix, - models.MetadefNamespaceResourceType.created_at, - models.MetadefNamespaceResourceType.updated_at)) - - model_dict_list = [] - for name, properties_target, prefix, created_at, updated_at in db_recs: - model_dict_list.append( - _set_model_dict - (name, properties_target, prefix, created_at, updated_at) - ) - - return model_dict_list - - -def create(context, namespace_name, values, session): - """Create an association, raise if already exists or ns not found.""" - - namespace = namespace_api.get( - context, namespace_name, session) - - # if the resource_type does not exist, create it - resource_type_name = values['name'] - metadef_utils.drop_protected_attrs( - models.MetadefNamespaceResourceType, values) - try: - resource_type = resource_type_api.get( - context, resource_type_name, session) - except exc.NotFound: - resource_type = None - LOG.debug("Creating resource-type %s", resource_type_name) - - if resource_type is None: - resource_type_dict = {'name': resource_type_name, 'protected': False} - resource_type = resource_type_api.create( - context, resource_type_dict, session) - - # Create the association record, set the field values - ns_resource_type_dict = _to_db_dict( - namespace['id'], resource_type['id'], values) - new_rec = _create_association(context, namespace_name, resource_type_name, - ns_resource_type_dict, session) - - return _to_model_dict(resource_type_name, new_rec) - - -def delete(context, namespace_name, resource_type_name, session): - """Delete an association or raise if not found""" - - namespace = namespace_api.get( - context, namespace_name, session) - - resource_type = resource_type_api.get( - context, resource_type_name, session) - - deleted = _delete(context, namespace_name, resource_type_name, - namespace['id'], resource_type['id'], session) - - return _to_model_dict(resource_type_name, deleted) - - -def delete_namespace_content(context, namespace_id, session): - """Use this def only if the ns for the id has been verified as visible""" - - count = 0 - query = session.query(models.MetadefNamespaceResourceType).filter_by( - namespace_id=namespace_id) - count = query.delete(synchronize_session='fetch') - return count diff --git a/glance/db/sqlalchemy/metadef_api/tag.py b/glance/db/sqlalchemy/metadef_api/tag.py deleted file mode 100644 index 556ef788..00000000 --- a/glance/db/sqlalchemy/metadef_api/tag.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_db import exception as db_exc -from oslo_db.sqlalchemy.utils import paginate_query -from oslo_log import log as logging -from sqlalchemy import func -import sqlalchemy.orm as sa_orm - -from glance.common import exception as exc -from glance.db.sqlalchemy.metadef_api import namespace as namespace_api -import glance.db.sqlalchemy.metadef_api.utils as metadef_utils -from glance.db.sqlalchemy import models_metadef as models -from glance.i18n import _LW - -LOG = logging.getLogger(__name__) - - -def _get(context, id, session): - try: - query = (session.query(models.MetadefTag).filter_by(id=id)) - metadef_tag = query.one() - except sa_orm.exc.NoResultFound: - msg = (_LW("Metadata tag not found for id %s") % id) - LOG.warn(msg) - raise exc.MetadefTagNotFound(message=msg) - return metadef_tag - - -def _get_by_name(context, namespace_name, name, session): - namespace = namespace_api.get(context, namespace_name, session) - try: - query = (session.query(models.MetadefTag).filter_by( - name=name, namespace_id=namespace['id'])) - metadef_tag = query.one() - except sa_orm.exc.NoResultFound: - LOG.debug("The metadata tag with name=%(name)s" - " was not found in namespace=%(namespace_name)s.", - {'name': name, 'namespace_name': namespace_name}) - raise exc.MetadefTagNotFound(name=name, - namespace_name=namespace_name) - return metadef_tag - - -def get_all(context, namespace_name, session, filters=None, marker=None, - limit=None, sort_key='created_at', sort_dir='desc'): - """Get all tags that match zero or more filters. - - :param filters: dict of filter keys and values. - :param marker: tag id after which to start page - :param limit: maximum number of namespaces to return - :param sort_key: namespace attribute by which results should be sorted - :param sort_dir: direction in which results should be sorted (asc, desc) - """ - - namespace = namespace_api.get(context, namespace_name, session) - query = (session.query(models.MetadefTag).filter_by( - namespace_id=namespace['id'])) - - marker_tag = None - if marker is not None: - marker_tag = _get(context, marker, session) - - sort_keys = ['created_at', 'id'] - sort_keys.insert(0, sort_key) if sort_key not in sort_keys else sort_keys - - query = paginate_query(query=query, - model=models.MetadefTag, - limit=limit, - sort_keys=sort_keys, - marker=marker_tag, sort_dir=sort_dir) - metadef_tag = query.all() - metadef_tag_list = [] - for tag in metadef_tag: - metadef_tag_list.append(tag.to_dict()) - - return metadef_tag_list - - -def create(context, namespace_name, values, session): - namespace = namespace_api.get(context, namespace_name, session) - values.update({'namespace_id': namespace['id']}) - - metadef_tag = models.MetadefTag() - metadef_utils.drop_protected_attrs(models.MetadefTag, values) - metadef_tag.update(values.copy()) - try: - metadef_tag.save(session=session) - except db_exc.DBDuplicateEntry: - LOG.debug("A metadata tag name=%(name)s" - " already exists in namespace=%(namespace_name)s." - " (Please note that metadata tag names are" - " case insensitive).", - {'name': metadef_tag.name, - 'namespace_name': namespace_name}) - raise exc.MetadefDuplicateTag( - name=metadef_tag.name, namespace_name=namespace_name) - - return metadef_tag.to_dict() - - -def create_tags(context, namespace_name, tag_list, session): - - metadef_tags_list = [] - if tag_list: - namespace = namespace_api.get(context, namespace_name, session) - - try: - with session.begin(): - query = (session.query(models.MetadefTag).filter_by( - namespace_id=namespace['id'])) - query.delete(synchronize_session='fetch') - - for value in tag_list: - value.update({'namespace_id': namespace['id']}) - metadef_utils.drop_protected_attrs( - models.MetadefTag, value) - metadef_tag = models.MetadefTag() - metadef_tag.update(value.copy()) - metadef_tag.save(session=session) - metadef_tags_list.append(metadef_tag.to_dict()) - except db_exc.DBDuplicateEntry: - LOG.debug("A metadata tag name=%(name)s" - " in namespace=%(namespace_name)s already exists.", - {'name': metadef_tag.name, - 'namespace_name': namespace_name}) - raise exc.MetadefDuplicateTag( - name=metadef_tag.name, namespace_name=namespace_name) - - return metadef_tags_list - - -def get(context, namespace_name, name, session): - metadef_tag = _get_by_name(context, namespace_name, name, session) - return metadef_tag.to_dict() - - -def update(context, namespace_name, id, values, session): - """Update an tag, raise if ns not found/visible or duplicate result""" - namespace_api.get(context, namespace_name, session) - - metadata_tag = _get(context, id, session) - metadef_utils.drop_protected_attrs(models.MetadefTag, values) - # values['updated_at'] = timeutils.utcnow() - done by TS mixin - try: - metadata_tag.update(values.copy()) - metadata_tag.save(session=session) - except db_exc.DBDuplicateEntry: - LOG.debug("Invalid update. It would result in a duplicate" - " metadata tag with same name=%(name)s" - " in namespace=%(namespace_name)s.", - {'name': values['name'], - 'namespace_name': namespace_name}) - raise exc.MetadefDuplicateTag( - name=values['name'], namespace_name=namespace_name) - - return metadata_tag.to_dict() - - -def delete(context, namespace_name, name, session): - namespace_api.get(context, namespace_name, session) - md_tag = _get_by_name(context, namespace_name, name, session) - - session.delete(md_tag) - session.flush() - - return md_tag.to_dict() - - -def delete_namespace_content(context, namespace_id, session): - """Use this def only if the ns for the id has been verified as visible""" - count = 0 - query = (session.query(models.MetadefTag).filter_by( - namespace_id=namespace_id)) - count = query.delete(synchronize_session='fetch') - return count - - -def delete_by_namespace_name(context, namespace_name, session): - namespace = namespace_api.get(context, namespace_name, session) - return delete_namespace_content(context, namespace['id'], session) - - -def count(context, namespace_name, session): - """Get the count of objects for a namespace, raise if ns not found""" - namespace = namespace_api.get(context, namespace_name, session) - query = (session.query(func.count(models.MetadefTag.id)).filter_by( - namespace_id=namespace['id'])) - return query.scalar() diff --git a/glance/db/sqlalchemy/metadef_api/utils.py b/glance/db/sqlalchemy/metadef_api/utils.py deleted file mode 100644 index ca20150e..00000000 --- a/glance/db/sqlalchemy/metadef_api/utils.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def drop_protected_attrs(model_class, values): - """ - Removed protected attributes from values dictionary using the models - __protected_attributes__ field. - """ - for attr in model_class.__protected_attributes__: - if attr in values: - del values[attr] diff --git a/glance/db/sqlalchemy/migrate_repo/README b/glance/db/sqlalchemy/migrate_repo/README deleted file mode 100644 index d2c55f65..00000000 --- a/glance/db/sqlalchemy/migrate_repo/README +++ /dev/null @@ -1,4 +0,0 @@ -This is a database migration repository. - -More information at -https://git.openstack.org/cgit/openstack/sqlalchemy-migrate/ diff --git a/glance/db/sqlalchemy/migrate_repo/__init__.py b/glance/db/sqlalchemy/migrate_repo/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/db/sqlalchemy/migrate_repo/manage.py b/glance/db/sqlalchemy/migrate_repo/manage.py deleted file mode 100644 index 4cb1fa46..00000000 --- a/glance/db/sqlalchemy/migrate_repo/manage.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from migrate.versioning.shell import main - -# This should probably be a console script entry point. -if __name__ == '__main__': - main(debug='False', repository='.') diff --git a/glance/db/sqlalchemy/migrate_repo/migrate.cfg b/glance/db/sqlalchemy/migrate_repo/migrate.cfg deleted file mode 100644 index 6761c459..00000000 --- a/glance/db/sqlalchemy/migrate_repo/migrate.cfg +++ /dev/null @@ -1,20 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id=Glance Migrations - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table=migrate_version - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs=[] diff --git a/glance/db/sqlalchemy/migrate_repo/schema.py b/glance/db/sqlalchemy/migrate_repo/schema.py deleted file mode 100644 index 9d40ea64..00000000 --- a/glance/db/sqlalchemy/migrate_repo/schema.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Various conveniences used for migration scripts -""" - -from oslo_log import log as logging -import sqlalchemy.types - -from glance.i18n import _LI - - -LOG = logging.getLogger(__name__) - - -String = lambda length: sqlalchemy.types.String( - length=length, convert_unicode=False, - unicode_error=None, _warn_on_bytestring=False) - - -Text = lambda: sqlalchemy.types.Text( - length=None, convert_unicode=False, - unicode_error=None, _warn_on_bytestring=False) - - -Boolean = lambda: sqlalchemy.types.Boolean(create_constraint=True, name=None) - - -DateTime = lambda: sqlalchemy.types.DateTime(timezone=False) - - -Integer = lambda: sqlalchemy.types.Integer() - - -BigInteger = lambda: sqlalchemy.types.BigInteger() - - -PickleType = lambda: sqlalchemy.types.PickleType() - - -Numeric = lambda: sqlalchemy.types.Numeric() - - -def from_migration_import(module_name, fromlist): - """ - Import a migration file and return the module - - :param module_name: name of migration module to import from - (ex: 001_add_images_table) - :param fromlist: list of items to import (ex: define_images_table) - :returns: module object - - This bit of ugliness warrants an explanation: - - As you're writing migrations, you'll frequently want to refer to - tables defined in previous migrations. - - In the interest of not repeating yourself, you need a way of importing - that table into a 'future' migration. - - However, tables are bound to metadata, so what you need to import is - really a table factory, which you can late-bind to your current - metadata object. - - Moreover, migrations begin with a number (001...), which means they - aren't valid Python identifiers. This means we can't perform a - 'normal' import on them (the Python lexer will 'splode). Instead, we - need to use __import__ magic to bring the table-factory into our - namespace. - - Example Usage: - - (define_images_table,) = from_migration_import( - '001_add_images_table', ['define_images_table']) - - images = define_images_table(meta) - - # Refer to images table - """ - module_path = 'glance.db.sqlalchemy.migrate_repo.versions.%s' % module_name - module = __import__(module_path, globals(), locals(), fromlist, 0) - return [getattr(module, item) for item in fromlist] - - -def create_tables(tables): - for table in tables: - LOG.info(_LI("creating table %(table)s"), {'table': table}) - table.create() - - -def drop_tables(tables): - for table in tables: - LOG.info(_LI("dropping table %(table)s"), {'table': table}) - table.drop() diff --git a/glance/db/sqlalchemy/migrate_repo/versions/001_add_images_table.py b/glance/db/sqlalchemy/migrate_repo/versions/001_add_images_table.py deleted file mode 100644 index 35de3562..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/001_add_images_table.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy.schema import (Column, MetaData, Table) - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, Integer, String, Text, create_tables) # noqa - - -def define_images_table(meta): - images = Table('images', - meta, - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', String(255)), - Column('type', String(30)), - Column('size', Integer()), - Column('status', String(30), nullable=False), - Column('is_public', - Boolean(), - nullable=False, - default=False, - index=True), - Column('location', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - return images - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - tables = [define_images_table(meta)] - create_tables(tables) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/002_add_image_properties_table.py b/glance/db/sqlalchemy/migrate_repo/versions/002_add_image_properties_table.py deleted file mode 100644 index 0759a311..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/002_add_image_properties_table.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy.schema import ( - Column, ForeignKey, Index, MetaData, Table, UniqueConstraint) - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, Integer, String, Text, create_tables, - from_migration_import) # noqa - - -def define_image_properties_table(meta): - (define_images_table,) = from_migration_import( - '001_add_images_table', ['define_images_table']) - - images = define_images_table(meta) # noqa - - # NOTE(dperaza) DB2: specify the UniqueConstraint option when creating the - # table will cause an index being created to specify the index - # name and skip the step of creating another index with the same columns. - # The index name is needed so it can be dropped and re-created later on. - - constr_kwargs = {} - if meta.bind.name == 'ibm_db_sa': - constr_kwargs['name'] = 'ix_image_properties_image_id_key' - - image_properties = Table('image_properties', - meta, - Column('id', - Integer(), - primary_key=True, - nullable=False), - Column('image_id', - Integer(), - ForeignKey('images.id'), - nullable=False, - index=True), - Column('key', String(255), nullable=False), - Column('value', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - UniqueConstraint('image_id', 'key', - **constr_kwargs), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - if meta.bind.name != 'ibm_db_sa': - Index('ix_image_properties_image_id_key', - image_properties.c.image_id, - image_properties.c.key) - - return image_properties - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - tables = [define_image_properties_table(meta)] - create_tables(tables) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/003_add_disk_format.py b/glance/db/sqlalchemy/migrate_repo/versions/003_add_disk_format.py deleted file mode 100644 index c67b32a7..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/003_add_disk_format.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * # noqa - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, Integer, String, Text, from_migration_import) # noqa - - -def get_images_table(meta): - """ - Returns the Table object for the images table that - corresponds to the images table definition of this version. - """ - images = Table('images', - meta, - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', String(255)), - Column('disk_format', String(20)), - Column('container_format', String(20)), - Column('size', Integer()), - Column('status', String(30), nullable=False), - Column('is_public', - Boolean(), - nullable=False, - default=False, - index=True), - Column('location', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) - - return images - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - (define_images_table,) = from_migration_import( - '001_add_images_table', ['define_images_table']) - (define_image_properties_table,) = from_migration_import( - '002_add_image_properties_table', ['define_image_properties_table']) - - conn = migrate_engine.connect() - images = define_images_table(meta) - image_properties = define_image_properties_table(meta) - - # Steps to take, in this order: - # 1) Move the existing type column from Image into - # ImageProperty for all image records that have a non-NULL - # type column - # 2) Drop the type column in images - # 3) Add the new columns to images - - # The below wackiness correlates to the following ANSI SQL: - # SELECT images.* FROM images - # LEFT JOIN image_properties - # ON images.id = image_properties.image_id - # AND image_properties.key = 'type' - # WHERE image_properties.image_id IS NULL - # AND images.type IS NOT NULL - # - # which returns all the images that have a type set - # but that DO NOT yet have an image_property record - # with key of type. - from_stmt = [ - images.outerjoin(image_properties, - and_(images.c.id == image_properties.c.image_id, - image_properties.c.key == 'type')) - ] - and_stmt = and_(image_properties.c.image_id == None, - images.c.type != None) - sel = select([images], from_obj=from_stmt).where(and_stmt) - image_records = conn.execute(sel).fetchall() - property_insert = image_properties.insert() - for record in image_records: - conn.execute(property_insert, - image_id=record.id, - key='type', - created_at=record.created_at, - deleted=False, - value=record.type) - conn.close() - - disk_format = Column('disk_format', String(20)) - disk_format.create(images) - container_format = Column('container_format', String(20)) - container_format.create(images) - - images.columns['type'].drop() diff --git a/glance/db/sqlalchemy/migrate_repo/versions/003_sqlite_upgrade.sql b/glance/db/sqlalchemy/migrate_repo/versions/003_sqlite_upgrade.sql deleted file mode 100644 index 1340f343..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/003_sqlite_upgrade.sql +++ /dev/null @@ -1,61 +0,0 @@ --- Move type column from base images table --- to be records in image_properties table -CREATE TEMPORARY TABLE tmp_type_records (id INTEGER NOT NULL, type VARCHAR(30) NOT NULL); -INSERT INTO tmp_type_records -SELECT id, type -FROM images -WHERE type IS NOT NULL; - -REPLACE INTO image_properties -(image_id, key, value, created_at, deleted) -SELECT id, 'type', type, date('now'), 0 -FROM tmp_type_records; - -DROP TABLE tmp_type_records; - --- Make changes to the base images table -CREATE TEMPORARY TABLE images_backup ( - id INTEGER NOT NULL, - name VARCHAR(255), - size INTEGER, - status VARCHAR(30) NOT NULL, - is_public BOOLEAN NOT NULL, - location TEXT, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - PRIMARY KEY (id) -); - -INSERT INTO images_backup -SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted -FROM images; - -DROP TABLE images; - -CREATE TABLE images ( - id INTEGER NOT NULL, - name VARCHAR(255), - size INTEGER, - status VARCHAR(30) NOT NULL, - is_public BOOLEAN NOT NULL, - location TEXT, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - disk_format VARCHAR(20), - container_format VARCHAR(20), - PRIMARY KEY (id), - CHECK (is_public IN (0, 1)), - CHECK (deleted IN (0, 1)) -); -CREATE INDEX ix_images_deleted ON images (deleted); -CREATE INDEX ix_images_is_public ON images (is_public); - -INSERT INTO images (id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted) -SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted -FROM images_backup; - -DROP TABLE images_backup; diff --git a/glance/db/sqlalchemy/migrate_repo/versions/004_add_checksum.py b/glance/db/sqlalchemy/migrate_repo/versions/004_add_checksum.py deleted file mode 100644 index ea70fe21..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/004_add_checksum.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * # noqa - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, Integer, String, Text, from_migration_import) # noqa - - -def get_images_table(meta): - """ - Returns the Table object for the images table that - corresponds to the images table definition of this version. - """ - images = Table('images', - meta, - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', String(255)), - Column('disk_format', String(20)), - Column('container_format', String(20)), - Column('size', Integer()), - Column('status', String(30), nullable=False), - Column('is_public', - Boolean(), - nullable=False, - default=False, - index=True), - Column('location', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - Column('checksum', String(32)), - mysql_engine='InnoDB', - extend_existing=True) - - return images - - -def get_image_properties_table(meta): - """ - No changes to the image properties table from 002... - """ - (define_image_properties_table,) = from_migration_import( - '002_add_image_properties_table', ['define_image_properties_table']) - - image_properties = define_image_properties_table(meta) - return image_properties - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - images = get_images_table(meta) - - checksum = Column('checksum', String(32)) - checksum.create(images) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/005_size_big_integer.py b/glance/db/sqlalchemy/migrate_repo/versions/005_size_big_integer.py deleted file mode 100644 index b3d86169..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/005_size_big_integer.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * # noqa - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, BigInteger, Integer, String, - Text, from_migration_import) # noqa - - -def get_images_table(meta): - """ - Returns the Table object for the images table that - corresponds to the images table definition of this version. - """ - images = Table('images', - meta, - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', String(255)), - Column('disk_format', String(20)), - Column('container_format', String(20)), - Column('size', BigInteger()), - Column('status', String(30), nullable=False), - Column('is_public', - Boolean(), - nullable=False, - default=False, - index=True), - Column('location', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) - - return images - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - # No changes to SQLite stores are necessary, since - # there is no BIG INTEGER type in SQLite. Unfortunately, - # running the Python 005_size_big_integer.py migration script - # on a SQLite datastore results in an error in the sa-migrate - # code that does the workarounds for SQLite not having - # ALTER TABLE MODIFY COLUMN ability - - dialect = migrate_engine.url.get_dialect().name - - if not dialect.startswith('sqlite'): - (get_images_table,) = from_migration_import( - '003_add_disk_format', ['get_images_table']) - - images = get_images_table(meta) - images.columns['size'].alter(type=BigInteger()) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/006_key_to_name.py b/glance/db/sqlalchemy/migrate_repo/versions/006_key_to_name.py deleted file mode 100644 index 2ec99e96..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/006_key_to_name.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * # noqa - -from glance.db.sqlalchemy.migrate_repo.schema import from_migration_import - - -def get_images_table(meta): - """ - No changes to the image properties table from 002... - """ - (get_images_table,) = from_migration_import( - '004_add_checksum', ['get_images_table']) - - images = get_images_table(meta) - return images - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - (get_image_properties_table,) = from_migration_import( - '004_add_checksum', ['get_image_properties_table']) - image_properties = get_image_properties_table(meta) - - if migrate_engine.name == "ibm_db_sa": - # NOTE(dperaza) ibm db2 does not allow ALTER INDEX so we will drop - # the index, rename the column, then re-create the index - sql_commands = [ - """ALTER TABLE image_properties DROP UNIQUE - ix_image_properties_image_id_key;""", - """ALTER TABLE image_properties RENAME COLUMN \"key\" to name;""", - """ALTER TABLE image_properties ADD CONSTRAINT - ix_image_properties_image_id_name UNIQUE(image_id, name);""", - ] - for command in sql_commands: - meta.bind.execute(command) - else: - index = Index('ix_image_properties_image_id_key', - image_properties.c.image_id, - image_properties.c.key) - index.rename('ix_image_properties_image_id_name') - - image_properties = get_image_properties_table(meta) - image_properties.columns['key'].alter(name="name") diff --git a/glance/db/sqlalchemy/migrate_repo/versions/006_mysql_upgrade.sql b/glance/db/sqlalchemy/migrate_repo/versions/006_mysql_upgrade.sql deleted file mode 100644 index e30e90a5..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/006_mysql_upgrade.sql +++ /dev/null @@ -1,11 +0,0 @@ --- --- This file is necessary because MySQL does not support --- renaming indexes. --- -DROP INDEX ix_image_properties_image_id_key ON image_properties; - --- Rename the `key` column to `name` -ALTER TABLE image_properties -CHANGE COLUMN `key` name VARCHAR(255) NOT NULL; - -CREATE UNIQUE INDEX ix_image_properties_image_id_name ON image_properties (image_id, name); diff --git a/glance/db/sqlalchemy/migrate_repo/versions/006_sqlite_upgrade.sql b/glance/db/sqlalchemy/migrate_repo/versions/006_sqlite_upgrade.sql deleted file mode 100644 index 108d1ee9..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/006_sqlite_upgrade.sql +++ /dev/null @@ -1,44 +0,0 @@ --- --- This is necessary because SQLite does not support --- RENAME INDEX or ALTER TABLE CHANGE COLUMN. --- -CREATE TEMPORARY TABLE image_properties_backup ( - id INTEGER NOT NULL, - image_id INTEGER NOT NULL, - name VARCHAR(255) NOT NULL, - value TEXT, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - PRIMARY KEY (id) -); - -INSERT INTO image_properties_backup -SELECT id, image_id, key, value, created_at, updated_at, deleted_at, deleted -FROM image_properties; - -DROP TABLE image_properties; - -CREATE TABLE image_properties ( - id INTEGER NOT NULL, - image_id INTEGER NOT NULL, - name VARCHAR(255) NOT NULL, - value TEXT, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - PRIMARY KEY (id), - CHECK (deleted IN (0, 1)), - UNIQUE (image_id, name), - FOREIGN KEY(image_id) REFERENCES images (id) -); -CREATE INDEX ix_image_properties_name ON image_properties (name); -CREATE INDEX ix_image_properties_deleted ON image_properties (deleted); - -INSERT INTO image_properties (id, image_id, name, value, created_at, updated_at, deleted_at, deleted) -SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted -FROM image_properties_backup; - -DROP TABLE image_properties_backup; diff --git a/glance/db/sqlalchemy/migrate_repo/versions/007_add_owner.py b/glance/db/sqlalchemy/migrate_repo/versions/007_add_owner.py deleted file mode 100644 index e43136be..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/007_add_owner.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * # noqa - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, BigInteger, Integer, String, - Text) # noqa - - -def get_images_table(meta): - """ - Returns the Table object for the images table that corresponds to - the images table definition of this version. - """ - images = Table('images', - meta, - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', String(255)), - Column('disk_format', String(20)), - Column('container_format', String(20)), - Column('size', BigInteger()), - Column('status', String(30), nullable=False), - Column('is_public', - Boolean(), - nullable=False, - default=False, - index=True), - Column('location', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - Column('checksum', String(32)), - Column('owner', String(255)), - mysql_engine='InnoDB', - extend_existing=True) - - return images - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - images = get_images_table(meta) - - owner = Column('owner', String(255)) - owner.create(images) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/008_add_image_members_table.py b/glance/db/sqlalchemy/migrate_repo/versions/008_add_image_members_table.py deleted file mode 100644 index 2172e105..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/008_add_image_members_table.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * # noqa - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, Integer, String, create_tables, - from_migration_import) # noqa - - -def get_images_table(meta): - """ - No changes to the images table from 007... - """ - (get_images_table,) = from_migration_import( - '007_add_owner', ['get_images_table']) - - images = get_images_table(meta) - return images - - -def get_image_members_table(meta): - images = get_images_table(meta) # noqa - - image_members = Table('image_members', - meta, - Column('id', - Integer(), - primary_key=True, - nullable=False), - Column('image_id', - Integer(), - ForeignKey('images.id'), - nullable=False, - index=True), - Column('member', String(255), nullable=False), - Column('can_share', - Boolean(), - nullable=False, - default=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - UniqueConstraint('image_id', 'member'), - mysql_charset='utf8', - mysql_engine='InnoDB', - extend_existing=True) - - # DB2: an index has already been created for the UniqueConstraint option - # specified on the Table() statement above. - if meta.bind.name != "ibm_db_sa": - Index('ix_image_members_image_id_member', image_members.c.image_id, - image_members.c.member) - - return image_members - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - tables = [get_image_members_table(meta)] - create_tables(tables) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/009_add_mindisk_and_minram.py b/glance/db/sqlalchemy/migrate_repo/versions/009_add_mindisk_and_minram.py deleted file mode 100644 index 0c45ab76..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/009_add_mindisk_and_minram.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * # noqa - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, Integer, String, Text) # noqa - - -def get_images_table(meta): - """ - Returns the Table object for the images table that - corresponds to the images table definition of this version. - """ - images = Table('images', - meta, - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', String(255)), - Column('disk_format', String(20)), - Column('container_format', String(20)), - Column('size', Integer()), - Column('status', String(30), nullable=False), - Column('is_public', - Boolean(), - nullable=False, - default=False, - index=True), - Column('location', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - Column('checksum', String(32)), - Column('owner', String(255)), - Column('min_disk', Integer(), default=0), - Column('min_ram', Integer(), default=0), - mysql_engine='InnoDB', - extend_existing=True) - - return images - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - images = get_images_table(meta) - - min_disk = Column('min_disk', Integer(), default=0) - min_disk.create(images) - - min_ram = Column('min_ram', Integer(), default=0) - min_ram.create(images) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/010_default_update_at.py b/glance/db/sqlalchemy/migrate_repo/versions/010_default_update_at.py deleted file mode 100644 index 9d1b1516..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/010_default_update_at.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * # noqa - -from glance.db.sqlalchemy.migrate_repo.schema import from_migration_import - - -def get_images_table(meta): - """ - No changes to the images table from 008... - """ - (get_images_table,) = from_migration_import( - '008_add_image_members_table', ['get_images_table']) - - images = get_images_table(meta) - return images - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - images_table = get_images_table(meta) - - # set updated_at to created_at if equal to None - conn = migrate_engine.connect() - conn.execute( - images_table.update( - images_table.c.updated_at == None, - {images_table.c.updated_at: images_table.c.created_at})) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/011_make_mindisk_and_minram_notnull.py b/glance/db/sqlalchemy/migrate_repo/versions/011_make_mindisk_and_minram_notnull.py deleted file mode 100644 index 120e3adc..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/011_make_mindisk_and_minram_notnull.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy - - -meta = sqlalchemy.MetaData() - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - images = sqlalchemy.Table('images', meta, autoload=True) - images.c.min_disk.alter(nullable=False) - images.c.min_ram.alter(nullable=False) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/011_sqlite_upgrade.sql b/glance/db/sqlalchemy/migrate_repo/versions/011_sqlite_upgrade.sql deleted file mode 100644 index 7f77b4f4..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/011_sqlite_upgrade.sql +++ /dev/null @@ -1,59 +0,0 @@ -CREATE TEMPORARY TABLE images_backup ( - id INTEGER NOT NULL, - name VARCHAR(255), - size INTEGER, - status VARCHAR(30) NOT NULL, - is_public BOOLEAN NOT NULL, - location TEXT, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - disk_format VARCHAR(20), - container_format VARCHAR(20), - checksum VARCHAR(32), - owner VARCHAR(255), - min_disk INTEGER, - min_ram INTEGER, - PRIMARY KEY (id), - CHECK (is_public IN (0, 1)), - CHECK (deleted IN (0, 1)) -); - -INSERT INTO images_backup -SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram -FROM images; - -DROP TABLE images; - -CREATE TABLE images ( - id INTEGER NOT NULL, - name VARCHAR(255), - size INTEGER, - status VARCHAR(30) NOT NULL, - is_public BOOLEAN NOT NULL, - location TEXT, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - disk_format VARCHAR(20), - container_format VARCHAR(20), - checksum VARCHAR(32), - owner VARCHAR(255), - min_disk INTEGER NOT NULL, - min_ram INTEGER NOT NULL, - PRIMARY KEY (id), - CHECK (is_public IN (0, 1)), - CHECK (deleted IN (0, 1)) -); - -CREATE INDEX ix_images_deleted ON images (deleted); -CREATE INDEX ix_images_is_public ON images (is_public); - - -INSERT INTO images -SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram -FROM images_backup; - -DROP TABLE images_backup; diff --git a/glance/db/sqlalchemy/migrate_repo/versions/012_id_to_uuid.py b/glance/db/sqlalchemy/migrate_repo/versions/012_id_to_uuid.py deleted file mode 100644 index 0701f14f..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/012_id_to_uuid.py +++ /dev/null @@ -1,355 +0,0 @@ -# Copyright 2013 IBM Corp. -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -While SQLAlchemy/sqlalchemy-migrate should abstract this correctly, -there are known issues with these libraries so SQLite and non-SQLite -migrations must be done separately. -""" - -import uuid - -import migrate -import sqlalchemy - - -and_ = sqlalchemy.and_ -or_ = sqlalchemy.or_ - - -def upgrade(migrate_engine): - """ - Call the correct dialect-specific upgrade. - """ - meta = sqlalchemy.MetaData() - meta.bind = migrate_engine - - t_images = _get_table('images', meta) - t_image_members = _get_table('image_members', meta) - t_image_properties = _get_table('image_properties', meta) - - dialect = migrate_engine.url.get_dialect().name - if dialect == "sqlite": - _upgrade_sqlite(meta, t_images, t_image_members, t_image_properties) - _update_all_ids_to_uuids(t_images, t_image_members, t_image_properties) - elif dialect == "ibm_db_sa": - _upgrade_db2(meta, t_images, t_image_members, t_image_properties) - _update_all_ids_to_uuids(t_images, t_image_members, t_image_properties) - _add_db2_constraints(meta) - else: - _upgrade_other(t_images, t_image_members, t_image_properties, dialect) - - -def _upgrade_sqlite(meta, t_images, t_image_members, t_image_properties): - """ - Upgrade 011 -> 012 with special SQLite-compatible logic. - """ - - sql_commands = [ - """CREATE TABLE images_backup ( - id VARCHAR(36) NOT NULL, - name VARCHAR(255), - size INTEGER, - status VARCHAR(30) NOT NULL, - is_public BOOLEAN NOT NULL, - location TEXT, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - disk_format VARCHAR(20), - container_format VARCHAR(20), - checksum VARCHAR(32), - owner VARCHAR(255), - min_disk INTEGER NOT NULL, - min_ram INTEGER NOT NULL, - PRIMARY KEY (id), - CHECK (is_public IN (0, 1)), - CHECK (deleted IN (0, 1)) - );""", - """INSERT INTO images_backup - SELECT * FROM images;""", - """CREATE TABLE image_members_backup ( - id INTEGER NOT NULL, - image_id VARCHAR(36) NOT NULL, - member VARCHAR(255) NOT NULL, - can_share BOOLEAN NOT NULL, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - PRIMARY KEY (id), - UNIQUE (image_id, member), - CHECK (can_share IN (0, 1)), - CHECK (deleted IN (0, 1)), - FOREIGN KEY(image_id) REFERENCES images (id) - );""", - """INSERT INTO image_members_backup - SELECT * FROM image_members;""", - """CREATE TABLE image_properties_backup ( - id INTEGER NOT NULL, - image_id VARCHAR(36) NOT NULL, - name VARCHAR(255) NOT NULL, - value TEXT, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - PRIMARY KEY (id), - CHECK (deleted IN (0, 1)), - UNIQUE (image_id, name), - FOREIGN KEY(image_id) REFERENCES images (id) - );""", - """INSERT INTO image_properties_backup - SELECT * FROM image_properties;""", - ] - - for command in sql_commands: - meta.bind.execute(command) - - _sqlite_table_swap(meta, t_image_members, t_image_properties, t_images) - - -def _upgrade_db2(meta, t_images, t_image_members, t_image_properties): - """ - Upgrade for DB2. - """ - t_images.c.id.alter(sqlalchemy.String(36), primary_key=True) - - image_members_backup = sqlalchemy.Table( - 'image_members_backup', - meta, - sqlalchemy.Column('id', - sqlalchemy.Integer(), - primary_key=True, - nullable=False), - sqlalchemy.Column('image_id', - sqlalchemy.String(36), - nullable=False, - index=True), - sqlalchemy.Column('member', - sqlalchemy.String(255), - nullable=False), - sqlalchemy.Column('can_share', - sqlalchemy.Boolean(), - nullable=False, - default=False), - sqlalchemy.Column('created_at', - sqlalchemy.DateTime(), - nullable=False), - sqlalchemy.Column('updated_at', - sqlalchemy.DateTime()), - sqlalchemy.Column('deleted_at', - sqlalchemy.DateTime()), - sqlalchemy.Column('deleted', - sqlalchemy.Boolean(), - nullable=False, - default=False, - index=True), - sqlalchemy.UniqueConstraint('image_id', 'member'), - extend_existing=True) - - image_properties_backup = sqlalchemy.Table( - 'image_properties_backup', - meta, - sqlalchemy.Column('id', - sqlalchemy.Integer(), - primary_key=True, - nullable=False), - sqlalchemy.Column('image_id', - sqlalchemy.String(36), - nullable=False, - index=True), - sqlalchemy.Column('name', - sqlalchemy.String(255), - nullable=False), - sqlalchemy.Column('value', - sqlalchemy.Text()), - sqlalchemy.Column('created_at', - sqlalchemy.DateTime(), - nullable=False), - sqlalchemy.Column('updated_at', - sqlalchemy.DateTime()), - sqlalchemy.Column('deleted_at', - sqlalchemy.DateTime()), - sqlalchemy.Column('deleted', - sqlalchemy.Boolean(), - nullable=False, - default=False, - index=True), - sqlalchemy.UniqueConstraint( - 'image_id', 'name', - name='ix_image_properties_image_id_name'), - extend_existing=True) - - image_members_backup.create() - image_properties_backup.create() - - sql_commands = [ - """INSERT INTO image_members_backup - SELECT * FROM image_members;""", - """INSERT INTO image_properties_backup - SELECT * FROM image_properties;""", - ] - - for command in sql_commands: - meta.bind.execute(command) - - t_image_members.drop() - t_image_properties.drop() - - image_members_backup.rename(name='image_members') - image_properties_backup.rename(name='image_properties') - - -def _add_db2_constraints(meta): - # Create the foreign keys - sql_commands = [ - """ALTER TABLE image_members ADD CONSTRAINT member_image_id - FOREIGN KEY (image_id) - REFERENCES images (id);""", - """ALTER TABLE image_properties ADD CONSTRAINT property_image_id - FOREIGN KEY (image_id) - REFERENCES images (id);""", - ] - for command in sql_commands: - meta.bind.execute(command) - - -def _upgrade_other(t_images, t_image_members, t_image_properties, dialect): - """ - Upgrade 011 -> 012 with logic for non-SQLite databases. - """ - foreign_keys = _get_foreign_keys(t_images, - t_image_members, - t_image_properties, dialect) - - for fk in foreign_keys: - fk.drop() - - t_images.c.id.alter(sqlalchemy.String(36), primary_key=True) - t_image_members.c.image_id.alter(sqlalchemy.String(36)) - t_image_properties.c.image_id.alter(sqlalchemy.String(36)) - - _update_all_ids_to_uuids(t_images, t_image_members, t_image_properties) - - for fk in foreign_keys: - fk.create() - - -def _sqlite_table_swap(meta, t_image_members, t_image_properties, t_images): - t_image_members.drop() - t_image_properties.drop() - t_images.drop() - - meta.bind.execute("ALTER TABLE images_backup " - "RENAME TO images") - meta.bind.execute("ALTER TABLE image_members_backup " - "RENAME TO image_members") - meta.bind.execute("ALTER TABLE image_properties_backup " - "RENAME TO image_properties") - meta.bind.execute("""CREATE INDEX ix_image_properties_deleted - ON image_properties (deleted);""") - meta.bind.execute("""CREATE INDEX ix_image_properties_name - ON image_properties (name);""") - - -def _get_table(table_name, metadata): - """Return a sqlalchemy Table definition with associated metadata.""" - return sqlalchemy.Table(table_name, metadata, autoload=True) - - -def _get_foreign_keys(t_images, t_image_members, t_image_properties, dialect): - """Retrieve and return foreign keys for members/properties tables.""" - foreign_keys = [] - if t_image_members.foreign_keys: - img_members_fk_name = list(t_image_members.foreign_keys)[0].name - if dialect == 'mysql': - fk1 = migrate.ForeignKeyConstraint([t_image_members.c.image_id], - [t_images.c.id], - name=img_members_fk_name) - else: - fk1 = migrate.ForeignKeyConstraint([t_image_members.c.image_id], - [t_images.c.id]) - foreign_keys.append(fk1) - - if t_image_properties.foreign_keys: - img_properties_fk_name = list(t_image_properties.foreign_keys)[0].name - if dialect == 'mysql': - fk2 = migrate.ForeignKeyConstraint([t_image_properties.c.image_id], - [t_images.c.id], - name=img_properties_fk_name) - else: - fk2 = migrate.ForeignKeyConstraint([t_image_properties.c.image_id], - [t_images.c.id]) - foreign_keys.append(fk2) - - return foreign_keys - - -def _update_all_ids_to_uuids(t_images, t_image_members, t_image_properties): - """Transition from INTEGER id to VARCHAR(36) id.""" - images = list(t_images.select().execute()) - - for image in images: - old_id = image["id"] - new_id = str(uuid.uuid4()) - - t_images.update().where( - t_images.c.id == old_id).values(id=new_id).execute() - - t_image_members.update().where( - t_image_members.c.image_id == old_id).values( - image_id=new_id).execute() - - t_image_properties.update().where( - t_image_properties.c.image_id == old_id).values( - image_id=new_id).execute() - - t_image_properties.update().where( - and_(or_(t_image_properties.c.name == 'kernel_id', - t_image_properties.c.name == 'ramdisk_id'), - t_image_properties.c.value == old_id)).values( - value=new_id).execute() - - -def _update_all_uuids_to_ids(t_images, t_image_members, t_image_properties): - """Transition from VARCHAR(36) id to INTEGER id.""" - images = list(t_images.select().execute()) - - new_id = 1 - for image in images: - old_id = image["id"] - - t_images.update().where( - t_images.c.id == old_id).values( - id=str(new_id)).execute() - - t_image_members.update().where( - t_image_members.c.image_id == old_id).values( - image_id=str(new_id)).execute() - - t_image_properties.update().where( - t_image_properties.c.image_id == old_id).values( - image_id=str(new_id)).execute() - - t_image_properties.update().where( - and_(or_(t_image_properties.c.name == 'kernel_id', - t_image_properties.c.name == 'ramdisk_id'), - t_image_properties.c.value == old_id)).values( - value=str(new_id)).execute() - - new_id += 1 diff --git a/glance/db/sqlalchemy/migrate_repo/versions/013_add_protected.py b/glance/db/sqlalchemy/migrate_repo/versions/013_add_protected.py deleted file mode 100644 index 29afeef6..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/013_add_protected.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Column, Boolean - - -meta = MetaData() - -protected = Column('protected', Boolean, default=False) - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - images = Table('images', meta, autoload=True) - images.create_column(protected) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/014_add_image_tags_table.py b/glance/db/sqlalchemy/migrate_repo/versions/014_add_image_tags_table.py deleted file mode 100644 index 84d28a3c..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/014_add_image_tags_table.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import schema - -from glance.db.sqlalchemy.migrate_repo import schema as glance_schema - - -def define_image_tags_table(meta): - # Load the images table so the foreign key can be set up properly - schema.Table('images', meta, autoload=True) - - image_tags = schema.Table('image_tags', - meta, - schema.Column('id', - glance_schema.Integer(), - primary_key=True, - nullable=False), - schema.Column('image_id', - glance_schema.String(36), - schema.ForeignKey('images.id'), - nullable=False), - schema.Column('value', - glance_schema.String(255), - nullable=False), - schema.Column('created_at', - glance_schema.DateTime(), - nullable=False), - schema.Column('updated_at', - glance_schema.DateTime()), - schema.Column('deleted_at', - glance_schema.DateTime()), - schema.Column('deleted', - glance_schema.Boolean(), - nullable=False, - default=False), - mysql_engine='InnoDB', - mysql_charset='utf8') - - schema.Index('ix_image_tags_image_id', - image_tags.c.image_id) - - schema.Index('ix_image_tags_image_id_tag_value', - image_tags.c.image_id, - image_tags.c.value) - - return image_tags - - -def upgrade(migrate_engine): - meta = schema.MetaData() - meta.bind = migrate_engine - tables = [define_image_tags_table(meta)] - glance_schema.create_tables(tables) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py b/glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py deleted file mode 100644 index ad3a31f9..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_utils import encodeutils -import six.moves.urllib.parse as urlparse -import sqlalchemy - -from glance.common import exception -from glance.i18n import _, _LE - -LOG = logging.getLogger(__name__) - - -def upgrade(migrate_engine): - migrate_location_credentials(migrate_engine, to_quoted=True) - - -def migrate_location_credentials(migrate_engine, to_quoted): - """ - Migrate location credentials for swift uri's between the quoted - and unquoted forms. - - :param migrate_engine: The configured db engine - :param to_quoted: If True, migrate location credentials from - unquoted to quoted form. If False, do the - reverse. - """ - meta = sqlalchemy.schema.MetaData() - meta.bind = migrate_engine - - images_table = sqlalchemy.Table('images', meta, autoload=True) - - images = list(images_table.select(images_table.c.location.startswith( - 'swift')).execute()) - - for image in images: - try: - fixed_uri = legacy_parse_uri(image['location'], to_quoted) - images_table.update().where( - images_table.c.id == image['id']).values( - location=fixed_uri).execute() - except exception.BadStoreUri as e: - reason = encodeutils.exception_to_unicode(e) - msg = _LE("Invalid store uri for image: %(image_id)s. " - "Details: %(reason)s") % {'image_id': image.id, - 'reason': reason} - LOG.exception(msg) - raise - - -def legacy_parse_uri(uri, to_quote): - """ - Parse URLs. This method fixes an issue where credentials specified - in the URL are interpreted differently in Python 2.6.1+ than prior - versions of Python. It also deals with the peculiarity that new-style - Swift URIs have where a username can contain a ':', like so: - - swift://account:user:pass@authurl.com/container/obj - - If to_quoted is True, the uri is assumed to have credentials that - have not been quoted, and the resulting uri will contain quoted - credentials. - - If to_quoted is False, the uri is assumed to have credentials that - have been quoted, and the resulting uri will contain credentials - that have not been quoted. - """ - # Make sure that URIs that contain multiple schemes, such as: - # swift://user:pass@http://authurl.com/v1/container/obj - # are immediately rejected. - if uri.count('://') != 1: - reason = _("URI cannot contain more than one occurrence of a scheme." - "If you have specified a URI like " - "swift://user:pass@http://authurl.com/v1/container/obj" - ", you need to change it to use the swift+http:// scheme, " - "like so: " - "swift+http://user:pass@authurl.com/v1/container/obj") - - raise exception.BadStoreUri(message=reason) - - pieces = urlparse.urlparse(uri) - if pieces.scheme not in ('swift', 'swift+http', 'swift+https'): - raise exception.BadStoreUri(message="Unacceptable scheme: '%s'" % - pieces.scheme) - scheme = pieces.scheme - netloc = pieces.netloc - path = pieces.path.lstrip('/') - if netloc != '': - # > Python 2.6.1 - if '@' in netloc: - creds, netloc = netloc.split('@') - else: - creds = None - else: - # Python 2.6.1 compat - # see lp659445 and Python issue7904 - if '@' in path: - creds, path = path.split('@') - else: - creds = None - netloc = path[0:path.find('/')].strip('/') - path = path[path.find('/'):].strip('/') - if creds: - cred_parts = creds.split(':') - - # User can be account:user, in which case cred_parts[0:2] will be - # the account and user. Combine them into a single username of - # account:user - if to_quote: - if len(cred_parts) == 1: - reason = (_("Badly formed credentials '%(creds)s' in Swift " - "URI") % {'creds': creds}) - raise exception.BadStoreUri(message=reason) - elif len(cred_parts) == 3: - user = ':'.join(cred_parts[0:2]) - else: - user = cred_parts[0] - key = cred_parts[-1] - user = user - key = key - else: - if len(cred_parts) != 2: - reason = (_("Badly formed credentials in Swift URI.")) - raise exception.BadStoreUri(message=reason) - user, key = cred_parts - user = urlparse.unquote(user) - key = urlparse.unquote(key) - else: - user = None - key = None - path_parts = path.split('/') - try: - obj = path_parts.pop() - container = path_parts.pop() - if not netloc.startswith('http'): - # push hostname back into the remaining to build full authurl - path_parts.insert(0, netloc) - auth_or_store_url = '/'.join(path_parts) - except IndexError: - reason = _("Badly formed S3 URI: %(uri)s") % {'uri': uri} - raise exception.BadStoreUri(message=reason) - - if auth_or_store_url.startswith('http://'): - auth_or_store_url = auth_or_store_url[len('http://'):] - elif auth_or_store_url.startswith('https://'): - auth_or_store_url = auth_or_store_url[len('https://'):] - - credstring = '' - if user and key: - if to_quote: - quote_user = urlparse.quote(user) - quote_key = urlparse.quote(key) - else: - quote_user = user - quote_key = key - credstring = '%s:%s@' % (quote_user, quote_key) - - auth_or_store_url = auth_or_store_url.strip('/') - container = container.strip('/') - obj = obj.strip('/') - - return '%s://%s%s/%s/%s' % (scheme, credstring, auth_or_store_url, - container, obj) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/016_add_status_image_member.py b/glance/db/sqlalchemy/migrate_repo/versions/016_add_status_image_member.py deleted file mode 100644 index 45d3b72b..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/016_add_status_image_member.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Column, String - - -meta = MetaData() - -status = Column('status', String(20), default="pending") - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - image_members = Table('image_members', meta, autoload=True) - image_members.create_column(status) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py b/glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py deleted file mode 100644 index 0c8b4c49..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -This migration handles migrating encrypted image location values from -the unquoted form to the quoted form. - -If 'metadata_encryption_key' is specified in the config then this -migration performs the following steps for every entry in the images table: -1. Decrypt the location value with the metadata_encryption_key -2. Changes the value to its quoted form -3. Encrypts the new value with the metadata_encryption_key -4. Inserts the new value back into the row - -Fixes bug #1081043 -""" -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -import six.moves.urllib.parse as urlparse -import sqlalchemy - -from glance.common import crypt -from glance.common import exception -from glance.i18n import _, _LE, _LI, _LW - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -CONF.import_opt('metadata_encryption_key', 'glance.common.config') - - -def upgrade(migrate_engine): - migrate_location_credentials(migrate_engine, to_quoted=True) - - -def migrate_location_credentials(migrate_engine, to_quoted): - """ - Migrate location credentials for encrypted swift uri's between the - quoted and unquoted forms. - - :param migrate_engine: The configured db engine - :param to_quoted: If True, migrate location credentials from - unquoted to quoted form. If False, do the - reverse. - """ - if not CONF.metadata_encryption_key: - msg = _LI("'metadata_encryption_key' was not specified in the config" - " file or a config file was not specified. This means that" - " this migration is a NOOP.") - LOG.info(msg) - return - - meta = sqlalchemy.schema.MetaData() - meta.bind = migrate_engine - - images_table = sqlalchemy.Table('images', meta, autoload=True) - - images = list(images_table.select().execute()) - - for image in images: - try: - fixed_uri = fix_uri_credentials(image['location'], to_quoted) - images_table.update().where( - images_table.c.id == image['id']).values( - location=fixed_uri).execute() - except exception.Invalid: - msg = _LW("Failed to decrypt location value for image" - " %(image_id)s") % {'image_id': image['id']} - LOG.warn(msg) - except exception.BadStoreUri as e: - reason = encodeutils.exception_to_unicode(e) - msg = _LE("Invalid store uri for image: %(image_id)s. " - "Details: %(reason)s") % {'image_id': image.id, - 'reason': reason} - LOG.exception(msg) - raise - - -def decrypt_location(uri): - return crypt.urlsafe_decrypt(CONF.metadata_encryption_key, uri) - - -def encrypt_location(uri): - return crypt.urlsafe_encrypt(CONF.metadata_encryption_key, uri, 64) - - -def fix_uri_credentials(uri, to_quoted): - """ - Fix the given uri's embedded credentials by round-tripping with - StoreLocation. - - If to_quoted is True, the uri is assumed to have credentials that - have not been quoted, and the resulting uri will contain quoted - credentials. - - If to_quoted is False, the uri is assumed to have credentials that - have been quoted, and the resulting uri will contain credentials - that have not been quoted. - """ - if not uri: - return - try: - decrypted_uri = decrypt_location(uri) - # NOTE (ameade): If a uri is not encrypted or incorrectly encoded then we - # we raise an exception. - except (TypeError, ValueError) as e: - raise exception.Invalid(str(e)) - - return legacy_parse_uri(decrypted_uri, to_quoted) - - -def legacy_parse_uri(uri, to_quote): - """ - Parse URLs. This method fixes an issue where credentials specified - in the URL are interpreted differently in Python 2.6.1+ than prior - versions of Python. It also deals with the peculiarity that new-style - Swift URIs have where a username can contain a ':', like so: - - swift://account:user:pass@authurl.com/container/obj - - If to_quoted is True, the uri is assumed to have credentials that - have not been quoted, and the resulting uri will contain quoted - credentials. - - If to_quoted is False, the uri is assumed to have credentials that - have been quoted, and the resulting uri will contain credentials - that have not been quoted. - """ - # Make sure that URIs that contain multiple schemes, such as: - # swift://user:pass@http://authurl.com/v1/container/obj - # are immediately rejected. - if uri.count('://') != 1: - reason = _("URI cannot contain more than one occurrence of a scheme." - "If you have specified a URI like " - "swift://user:pass@http://authurl.com/v1/container/obj" - ", you need to change it to use the swift+http:// scheme, " - "like so: " - "swift+http://user:pass@authurl.com/v1/container/obj") - raise exception.BadStoreUri(message=reason) - - pieces = urlparse.urlparse(uri) - if pieces.scheme not in ('swift', 'swift+http', 'swift+https'): - raise exception.BadStoreUri(message="Unacceptable scheme: '%s'" % - pieces.scheme) - scheme = pieces.scheme - netloc = pieces.netloc - path = pieces.path.lstrip('/') - if netloc != '': - # > Python 2.6.1 - if '@' in netloc: - creds, netloc = netloc.split('@') - else: - creds = None - else: - # Python 2.6.1 compat - # see lp659445 and Python issue7904 - if '@' in path: - creds, path = path.split('@') - else: - creds = None - netloc = path[0:path.find('/')].strip('/') - path = path[path.find('/'):].strip('/') - if creds: - cred_parts = creds.split(':') - - # User can be account:user, in which case cred_parts[0:2] will be - # the account and user. Combine them into a single username of - # account:user - if to_quote: - if len(cred_parts) == 1: - reason = (_("Badly formed credentials '%(creds)s' in Swift " - "URI") % {'creds': creds}) - raise exception.BadStoreUri(message=reason) - elif len(cred_parts) == 3: - user = ':'.join(cred_parts[0:2]) - else: - user = cred_parts[0] - key = cred_parts[-1] - user = user - key = key - else: - if len(cred_parts) != 2: - reason = (_("Badly formed credentials in Swift URI.")) - raise exception.BadStoreUri(message=reason) - user, key = cred_parts - user = urlparse.unquote(user) - key = urlparse.unquote(key) - else: - user = None - key = None - path_parts = path.split('/') - try: - obj = path_parts.pop() - container = path_parts.pop() - if not netloc.startswith('http'): - # push hostname back into the remaining to build full authurl - path_parts.insert(0, netloc) - auth_or_store_url = '/'.join(path_parts) - except IndexError: - reason = _("Badly formed S3 URI: %(uri)s") % {'uri': uri} - raise exception.BadStoreUri(message=reason) - - if auth_or_store_url.startswith('http://'): - auth_or_store_url = auth_or_store_url[len('http://'):] - elif auth_or_store_url.startswith('https://'): - auth_or_store_url = auth_or_store_url[len('https://'):] - - credstring = '' - if user and key: - if to_quote: - quote_user = urlparse.quote(user) - quote_key = urlparse.quote(key) - else: - quote_user = user - quote_key = key - credstring = '%s:%s@' % (quote_user, quote_key) - - auth_or_store_url = auth_or_store_url.strip('/') - container = container.strip('/') - obj = obj.strip('/') - - uri = '%s://%s%s/%s/%s' % (scheme, credstring, auth_or_store_url, - container, obj) - return encrypt_location(uri) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/018_add_image_locations_table.py b/glance/db/sqlalchemy/migrate_repo/versions/018_add_image_locations_table.py deleted file mode 100644 index 3ce38c6b..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/018_add_image_locations_table.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy - -from glance.db.sqlalchemy.migrate_repo import schema - - -def upgrade(migrate_engine): - meta = sqlalchemy.schema.MetaData(migrate_engine) - - # NOTE(bcwaldon): load the images table for the ForeignKey below - sqlalchemy.Table('images', meta, autoload=True) - - image_locations_table = sqlalchemy.Table( - 'image_locations', meta, - sqlalchemy.Column('id', - schema.Integer(), - primary_key=True, - nullable=False), - sqlalchemy.Column('image_id', - schema.String(36), - sqlalchemy.ForeignKey('images.id'), - nullable=False, - index=True), - sqlalchemy.Column('value', - schema.Text(), - nullable=False), - sqlalchemy.Column('created_at', - schema.DateTime(), - nullable=False), - sqlalchemy.Column('updated_at', - schema.DateTime()), - sqlalchemy.Column('deleted_at', - schema.DateTime()), - sqlalchemy.Column('deleted', - schema.Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - schema.create_tables([image_locations_table]) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/019_migrate_image_locations.py b/glance/db/sqlalchemy/migrate_repo/versions/019_migrate_image_locations.py deleted file mode 100644 index 5e7e0136..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/019_migrate_image_locations.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy - - -def get_images_table(meta): - return sqlalchemy.Table('images', meta, autoload=True) - - -def get_image_locations_table(meta): - return sqlalchemy.Table('image_locations', meta, autoload=True) - - -def upgrade(migrate_engine): - meta = sqlalchemy.schema.MetaData(migrate_engine) - - images_table = get_images_table(meta) - image_locations_table = get_image_locations_table(meta) - - image_records = images_table.select().execute().fetchall() - for image in image_records: - if image.location is not None: - values = { - 'image_id': image.id, - 'value': image.location, - 'created_at': image.created_at, - 'updated_at': image.updated_at, - 'deleted': image.deleted, - 'deleted_at': image.deleted_at, - } - image_locations_table.insert(values=values).execute() diff --git a/glance/db/sqlalchemy/migrate_repo/versions/020_drop_images_table_location.py b/glance/db/sqlalchemy/migrate_repo/versions/020_drop_images_table_location.py deleted file mode 100644 index c4c5b0de..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/020_drop_images_table_location.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy - - -def get_images_table(meta): - return sqlalchemy.Table('images', meta, autoload=True) - - -def upgrade(migrate_engine): - meta = sqlalchemy.schema.MetaData(migrate_engine) - images_table = get_images_table(meta) - images_table.columns['location'].drop() diff --git a/glance/db/sqlalchemy/migrate_repo/versions/021_set_engine_mysql_innodb.py b/glance/db/sqlalchemy/migrate_repo/versions/021_set_engine_mysql_innodb.py deleted file mode 100644 index 3891d597..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/021_set_engine_mysql_innodb.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData - -tables = ['image_locations'] - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - if migrate_engine.name == "mysql": - d = migrate_engine.execute("SHOW TABLE STATUS WHERE Engine!='InnoDB';") - for row in d.fetchall(): - table_name = row[0] - if table_name in tables: - migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % - table_name) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/022_image_member_index.py b/glance/db/sqlalchemy/migrate_repo/versions/022_image_member_index.py deleted file mode 100644 index f39de5f9..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/022_image_member_index.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from migrate.changeset import UniqueConstraint -from oslo_db import exception as db_exception -from sqlalchemy import MetaData, Table -from sqlalchemy.exc import OperationalError, ProgrammingError - - -NEW_KEYNAME = 'image_members_image_id_member_deleted_at_key' -ORIGINAL_KEYNAME_RE = re.compile('image_members_image_id.*_key') - - -def upgrade(migrate_engine): - image_members = _get_image_members_table(migrate_engine) - - if migrate_engine.name in ('mysql', 'postgresql'): - try: - UniqueConstraint('image_id', - name=_get_original_keyname(migrate_engine.name), - table=image_members).drop() - except (OperationalError, ProgrammingError, db_exception.DBError): - UniqueConstraint('image_id', - name=_infer_original_keyname(image_members), - table=image_members).drop() - UniqueConstraint('image_id', - 'member', - 'deleted_at', - name=NEW_KEYNAME, - table=image_members).create() - - -def _get_image_members_table(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - return Table('image_members', meta, autoload=True) - - -def _get_original_keyname(db): - return {'mysql': 'image_id', - 'postgresql': 'image_members_image_id_member_key'}[db] - - -def _infer_original_keyname(table): - for i in table.indexes: - if ORIGINAL_KEYNAME_RE.match(i.name): - return i.name diff --git a/glance/db/sqlalchemy/migrate_repo/versions/023_placeholder.py b/glance/db/sqlalchemy/migrate_repo/versions/023_placeholder.py deleted file mode 100644 index 43f3c572..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/023_placeholder.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def upgrade(migrate_engine): - pass diff --git a/glance/db/sqlalchemy/migrate_repo/versions/024_placeholder.py b/glance/db/sqlalchemy/migrate_repo/versions/024_placeholder.py deleted file mode 100644 index 43f3c572..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/024_placeholder.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def upgrade(migrate_engine): - pass diff --git a/glance/db/sqlalchemy/migrate_repo/versions/025_placeholder.py b/glance/db/sqlalchemy/migrate_repo/versions/025_placeholder.py deleted file mode 100644 index 43f3c572..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/025_placeholder.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def upgrade(migrate_engine): - pass diff --git a/glance/db/sqlalchemy/migrate_repo/versions/026_add_location_storage_information.py b/glance/db/sqlalchemy/migrate_repo/versions/026_add_location_storage_information.py deleted file mode 100644 index 881d8bb4..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/026_add_location_storage_information.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy - -from glance.db.sqlalchemy.migrate_repo import schema - - -def upgrade(migrate_engine): - meta = sqlalchemy.schema.MetaData() - meta.bind = migrate_engine - - image_locations_table = sqlalchemy.Table('image_locations', - meta, - autoload=True) - - meta_data = sqlalchemy.Column('meta_data', - schema.PickleType(), - default={}) - meta_data.create(image_locations_table) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/027_checksum_index.py b/glance/db/sqlalchemy/migrate_repo/versions/027_checksum_index.py deleted file mode 100644 index 1db7d2e3..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/027_checksum_index.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Index - -INDEX_NAME = 'checksum_image_idx' - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - images = Table('images', meta, autoload=True) - - index = Index(INDEX_NAME, images.c.checksum) - index.create(migrate_engine) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/028_owner_index.py b/glance/db/sqlalchemy/migrate_repo/versions/028_owner_index.py deleted file mode 100644 index f8b24c90..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/028_owner_index.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Index - -INDEX_NAME = 'owner_image_idx' - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - images = Table('images', meta, autoload=True) - - index = Index(INDEX_NAME, images.c.owner) - index.create(migrate_engine) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/029_location_meta_data_pickle_to_string.py b/glance/db/sqlalchemy/migrate_repo/versions/029_location_meta_data_pickle_to_string.py deleted file mode 100644 index 41e04632..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/029_location_meta_data_pickle_to_string.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pickle - -import sqlalchemy -from sqlalchemy import Table, Column # noqa -from glance.db.sqlalchemy import models - - -def upgrade(migrate_engine): - meta = sqlalchemy.schema.MetaData(migrate_engine) - image_locations = Table('image_locations', meta, autoload=True) - new_meta_data = Column('storage_meta_data', models.JSONEncodedDict, - default={}) - new_meta_data.create(image_locations) - - noe = pickle.dumps({}) - s = sqlalchemy.sql.select([image_locations]).where( - image_locations.c.meta_data != noe) - conn = migrate_engine.connect() - res = conn.execute(s) - - for row in res: - meta_data = row['meta_data'] - x = pickle.loads(meta_data) - if x != {}: - stmt = image_locations.update().where( - image_locations.c.id == row['id']).values(storage_meta_data=x) - conn.execute(stmt) - conn.close() - image_locations.columns['meta_data'].drop() - image_locations.columns['storage_meta_data'].alter(name='meta_data') diff --git a/glance/db/sqlalchemy/migrate_repo/versions/030_add_tasks_table.py b/glance/db/sqlalchemy/migrate_repo/versions/030_add_tasks_table.py deleted file mode 100644 index 4f03100f..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/030_add_tasks_table.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy.schema import (Column, MetaData, Table, Index) - -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, String, Text, create_tables) # noqa - - -def define_tasks_table(meta): - tasks = Table('tasks', - meta, - Column('id', String(36), primary_key=True, nullable=False), - Column('type', String(30), nullable=False), - Column('status', String(30), nullable=False), - Column('owner', String(255), nullable=False), - Column('input', Text()), # json blob - Column('result', Text()), # json blob - Column('message', Text()), - Column('expires_at', DateTime(), nullable=True), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - Index('ix_tasks_type', tasks.c.type) - Index('ix_tasks_status', tasks.c.status) - Index('ix_tasks_owner', tasks.c.owner) - Index('ix_tasks_deleted', tasks.c.deleted) - Index('ix_tasks_updated_at', tasks.c.updated_at) - - return tasks - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - tables = [define_tasks_table(meta)] - create_tables(tables) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/031_remove_duplicated_locations.py b/glance/db/sqlalchemy/migrate_repo/versions/031_remove_duplicated_locations.py deleted file mode 100644 index f8846215..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/031_remove_duplicated_locations.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy -from sqlalchemy import func -from sqlalchemy import orm -from sqlalchemy import sql -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = sqlalchemy.schema.MetaData(migrate_engine) - image_locations = Table('image_locations', meta, autoload=True) - - if migrate_engine.name == "ibm_db_sa": - il = orm.aliased(image_locations) - # NOTE(wenchma): Get all duplicated rows. - qry = (sql.select([il.c.id]) - .where(il.c.id > (sql.select([func.min(image_locations.c.id)]) - .where(image_locations.c.image_id == il.c.image_id) - .where(image_locations.c.value == il.c.value) - .where(image_locations.c.meta_data == il.c.meta_data) - .where(image_locations.c.deleted == False))) - .where(il.c.deleted == False) - .execute() - ) - - for row in qry: - stmt = (image_locations.delete() - .where(image_locations.c.id == row[0]) - .where(image_locations.c.deleted == False)) - stmt.execute() - - else: - session = orm.sessionmaker(bind=migrate_engine)() - - # NOTE(flaper87): Lets group by - # image_id, location and metadata. - grp = [image_locations.c.image_id, - image_locations.c.value, - image_locations.c.meta_data] - - # NOTE(flaper87): Get all duplicated rows - qry = (session.query(*grp) - .filter(image_locations.c.deleted == False) - .group_by(*grp) - .having(func.count() > 1)) - - for row in qry: - # NOTE(flaper87): Not the fastest way to do it. - # This is the best way to do it since sqlalchemy - # has a bug around delete + limit. - s = (sql.select([image_locations.c.id]) - .where(image_locations.c.image_id == row[0]) - .where(image_locations.c.value == row[1]) - .where(image_locations.c.meta_data == row[2]) - .where(image_locations.c.deleted == False) - .limit(1).execute()) - stmt = (image_locations.delete() - .where(image_locations.c.id == s.first()[0])) - stmt.execute() - - session.close() diff --git a/glance/db/sqlalchemy/migrate_repo/versions/032_add_task_info_table.py b/glance/db/sqlalchemy/migrate_repo/versions/032_add_task_info_table.py deleted file mode 100644 index a5ce70a6..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/032_add_task_info_table.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2013 Rackspace -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy.schema import (Column, ForeignKey, MetaData, Table) - -from glance.db.sqlalchemy.migrate_repo.schema import (String, - Text, - create_tables) # noqa - -TASKS_MIGRATE_COLUMNS = ['input', 'message', 'result'] - - -def define_task_info_table(meta): - Table('tasks', meta, autoload=True) - # NOTE(nikhil): input and result are stored as text in the DB. - # SQLAlchemy marshals the data to/from JSON using custom type - # JSONEncodedDict. It uses simplejson underneath. - task_info = Table('task_info', - meta, - Column('task_id', String(36), - ForeignKey('tasks.id'), - primary_key=True, - nullable=False), - Column('input', Text()), - Column('result', Text()), - Column('message', Text()), - mysql_engine='InnoDB', - mysql_charset='utf8') - - return task_info - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - tables = [define_task_info_table(meta)] - create_tables(tables) - - tasks_table = Table('tasks', meta, autoload=True) - task_info_table = Table('task_info', meta, autoload=True) - - tasks = tasks_table.select().execute().fetchall() - for task in tasks: - values = { - 'task_id': task.id, - 'input': task.input, - 'result': task.result, - 'message': task.message, - } - task_info_table.insert(values=values).execute() - - for col_name in TASKS_MIGRATE_COLUMNS: - tasks_table.columns[col_name].drop() diff --git a/glance/db/sqlalchemy/migrate_repo/versions/033_add_location_status.py b/glance/db/sqlalchemy/migrate_repo/versions/033_add_location_status.py deleted file mode 100644 index 798cc4b9..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/033_add_location_status.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six -import sqlalchemy - -from glance.db.sqlalchemy.migrate_repo import schema - - -def upgrade(migrate_engine): - meta = sqlalchemy.schema.MetaData() - meta.bind = migrate_engine - - images_table = sqlalchemy.Table('images', meta, autoload=True) - image_locations_table = sqlalchemy.Table('image_locations', meta, - autoload=True) - - # Create 'status' column for image_locations table - status = sqlalchemy.Column('status', schema.String(30), - server_default='active', nullable=False) - status.create(image_locations_table) - - # Set 'status' column initial value for image_locations table - mapping = {'active': 'active', 'pending_delete': 'pending_delete', - 'deleted': 'deleted', 'killed': 'deleted'} - for src, dst in six.iteritems(mapping): - subq = sqlalchemy.sql.select([images_table.c.id]).where( - images_table.c.status == src) - image_locations_table.update(values={'status': dst}).where( - image_locations_table.c.image_id.in_(subq)).execute() diff --git a/glance/db/sqlalchemy/migrate_repo/versions/034_add_virtual_size.py b/glance/db/sqlalchemy/migrate_repo/versions/034_add_virtual_size.py deleted file mode 100644 index a80ae5d0..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/034_add_virtual_size.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy - - -def upgrade(migrate_engine): - meta = sqlalchemy.MetaData() - meta.bind = migrate_engine - - images = sqlalchemy.Table('images', meta, autoload=True) - virtual_size = sqlalchemy.Column('virtual_size', - sqlalchemy.BigInteger) - images.create_column(virtual_size) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/035_add_metadef_tables.py b/glance/db/sqlalchemy/migrate_repo/versions/035_add_metadef_tables.py deleted file mode 100644 index f7dd2501..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/035_add_metadef_tables.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy -from sqlalchemy.schema import ( - Column, ForeignKey, Index, MetaData, Table, UniqueConstraint) # noqa - -from glance.common import timeutils -from glance.db.sqlalchemy.migrate_repo.schema import ( - Boolean, DateTime, Integer, String, Text, create_tables) # noqa - - -RESOURCE_TYPES = [u'OS::Glance::Image', u'OS::Cinder::Volume', - u'OS::Nova::Flavor', u'OS::Nova::Aggregate', - u'OS::Nova::Server'] - - -def _get_metadef_resource_types_table(meta): - return sqlalchemy.Table('metadef_resource_types', meta, autoload=True) - - -def _populate_resource_types(resource_types_table): - now = timeutils.utcnow() - for resource_type in RESOURCE_TYPES: - values = { - 'name': resource_type, - 'protected': True, - 'created_at': now, - 'updated_at': now - } - resource_types_table.insert(values=values).execute() - - -def define_metadef_namespaces_table(meta): - - # NOTE: For DB2 if UniqueConstraint is used when creating a table - # an index will automatically be created. So, for DB2 specify the - # index name up front. If not DB2 then create the Index. - _constr_kwargs = {} - if meta.bind.name == 'ibm_db_sa': - _constr_kwargs['name'] = 'ix_namespaces_namespace' - - namespaces = Table('metadef_namespaces', - meta, - Column('id', Integer(), primary_key=True, - nullable=False), - Column('namespace', String(80), nullable=False), - Column('display_name', String(80)), - Column('description', Text()), - Column('visibility', String(32)), - Column('protected', Boolean()), - Column('owner', String(255), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - UniqueConstraint('namespace', **_constr_kwargs), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - if meta.bind.name != 'ibm_db_sa': - Index('ix_namespaces_namespace', namespaces.c.namespace) - - return namespaces - - -def define_metadef_objects_table(meta): - - _constr_kwargs = {} - if meta.bind.name == 'ibm_db_sa': - _constr_kwargs['name'] = 'ix_objects_namespace_id_name' - - objects = Table('metadef_objects', - meta, - Column('id', Integer(), primary_key=True, nullable=False), - Column('namespace_id', Integer(), - ForeignKey('metadef_namespaces.id'), - nullable=False), - Column('name', String(80), nullable=False), - Column('description', Text()), - Column('required', Text()), - Column('schema', Text(), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - UniqueConstraint('namespace_id', 'name', - **_constr_kwargs), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - if meta.bind.name != 'ibm_db_sa': - Index('ix_objects_namespace_id_name', - objects.c.namespace_id, - objects.c.name) - - return objects - - -def define_metadef_properties_table(meta): - - _constr_kwargs = {} - if meta.bind.name == 'ibm_db_sa': - _constr_kwargs['name'] = 'ix_metadef_properties_namespace_id_name' - - metadef_properties = Table( - 'metadef_properties', - meta, - Column('id', Integer(), primary_key=True, nullable=False), - Column('namespace_id', Integer(), ForeignKey('metadef_namespaces.id'), - nullable=False), - Column('name', String(80), nullable=False), - Column('schema', Text(), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - UniqueConstraint('namespace_id', 'name', **_constr_kwargs), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - if meta.bind.name != 'ibm_db_sa': - Index('ix_metadef_properties_namespace_id_name', - metadef_properties.c.namespace_id, - metadef_properties.c.name) - - return metadef_properties - - -def define_metadef_resource_types_table(meta): - - _constr_kwargs = {} - if meta.bind.name == 'ibm_db_sa': - _constr_kwargs['name'] = 'ix_metadef_resource_types_name' - - metadef_res_types = Table( - 'metadef_resource_types', - meta, - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', String(80), nullable=False), - Column('protected', Boolean(), nullable=False, default=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - UniqueConstraint('name', **_constr_kwargs), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - if meta.bind.name != 'ibm_db_sa': - Index('ix_metadef_resource_types_name', - metadef_res_types.c.name) - - return metadef_res_types - - -def define_metadef_namespace_resource_types_table(meta): - - _constr_kwargs = {} - if meta.bind.name == 'ibm_db_sa': - _constr_kwargs['name'] = 'ix_metadef_ns_res_types_res_type_id_ns_id' - - metadef_associations = Table( - 'metadef_namespace_resource_types', - meta, - Column('resource_type_id', Integer(), - ForeignKey('metadef_resource_types.id'), - primary_key=True, nullable=False), - Column('namespace_id', Integer(), - ForeignKey('metadef_namespaces.id'), - primary_key=True, nullable=False), - Column('properties_target', String(80)), - Column('prefix', String(80)), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - UniqueConstraint('resource_type_id', 'namespace_id', - **_constr_kwargs), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - if meta.bind.name != 'ibm_db_sa': - Index('ix_metadef_ns_res_types_res_type_id_ns_id', - metadef_associations.c.resource_type_id, - metadef_associations.c.namespace_id) - - return metadef_associations - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - tables = [define_metadef_namespaces_table(meta), - define_metadef_objects_table(meta), - define_metadef_properties_table(meta), - define_metadef_resource_types_table(meta), - define_metadef_namespace_resource_types_table(meta)] - create_tables(tables) - - resource_types_table = _get_metadef_resource_types_table(meta) - _populate_resource_types(resource_types_table) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/036_rename_metadef_schema_columns.py b/glance/db/sqlalchemy/migrate_repo/versions/036_rename_metadef_schema_columns.py deleted file mode 100644 index 79bced6a..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/036_rename_metadef_schema_columns.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy.schema import MetaData -from sqlalchemy.schema import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - metadef_objects = Table('metadef_objects', meta, autoload=True) - metadef_objects.c.schema.alter(name='json_schema') - - metadef_properties = Table('metadef_properties', meta, autoload=True) - metadef_properties.c.schema.alter(name='json_schema') diff --git a/glance/db/sqlalchemy/migrate_repo/versions/037_add_changes_to_satisfy_models.py b/glance/db/sqlalchemy/migrate_repo/versions/037_add_changes_to_satisfy_models.py deleted file mode 100644 index 0eba9665..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/037_add_changes_to_satisfy_models.py +++ /dev/null @@ -1,84 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy -from sqlalchemy import Table, Index, UniqueConstraint -from sqlalchemy.schema import (AddConstraint, DropConstraint, - ForeignKeyConstraint) -from sqlalchemy import sql -from sqlalchemy import update - - -def upgrade(migrate_engine): - meta = sqlalchemy.MetaData() - meta.bind = migrate_engine - - if migrate_engine.name not in ['mysql', 'postgresql']: - return - - image_properties = Table('image_properties', meta, autoload=True) - image_members = Table('image_members', meta, autoload=True) - images = Table('images', meta, autoload=True) - - # We have to ensure that we doesn't have `nulls` values since we are going - # to set nullable=False - migrate_engine.execute( - update(image_members) - .where(image_members.c.status == sql.expression.null()) - .values(status='pending')) - - migrate_engine.execute( - update(images) - .where(images.c.protected == sql.expression.null()) - .values(protected=sql.expression.false())) - - image_members.c.status.alter(nullable=False, server_default='pending') - images.c.protected.alter( - nullable=False, server_default=sql.expression.false()) - - if migrate_engine.name == 'postgresql': - Index('ix_image_properties_image_id_name', - image_properties.c.image_id, - image_properties.c.name).drop() - - # We have different names of this constraint in different versions of - # postgresql. Since we have only one constraint on this table, we can - # get it in the following way. - name = migrate_engine.execute( - """SELECT conname - FROM pg_constraint - WHERE conrelid = - (SELECT oid - FROM pg_class - WHERE relname LIKE 'image_properties') - AND contype = 'u';""").scalar() - - constraint = UniqueConstraint(image_properties.c.image_id, - image_properties.c.name, - name='%s' % name) - migrate_engine.execute(DropConstraint(constraint)) - - constraint = UniqueConstraint(image_properties.c.image_id, - image_properties.c.name, - name='ix_image_properties_image_id_name') - migrate_engine.execute(AddConstraint(constraint)) - - images.c.id.alter(server_default=None) - if migrate_engine.name == 'mysql': - constraint = UniqueConstraint(image_properties.c.image_id, - image_properties.c.name, - name='image_id') - migrate_engine.execute(DropConstraint(constraint)) - image_locations = Table('image_locations', meta, autoload=True) - if len(image_locations.foreign_keys) == 0: - migrate_engine.execute(AddConstraint(ForeignKeyConstraint( - [image_locations.c.image_id], [images.c.id]))) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/037_sqlite_upgrade.sql b/glance/db/sqlalchemy/migrate_repo/versions/037_sqlite_upgrade.sql deleted file mode 100644 index c7444158..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/037_sqlite_upgrade.sql +++ /dev/null @@ -1,159 +0,0 @@ -UPDATE images SET protected = 0 WHERE protected is NULL; -UPDATE image_members SET status = 'pending' WHERE status is NULL; - -CREATE TEMPORARY TABLE images_backup ( - id VARCHAR(36) NOT NULL, - name VARCHAR(255), - size INTEGER, - status VARCHAR(30) NOT NULL, - is_public BOOLEAN NOT NULL, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - disk_format VARCHAR(20), - container_format VARCHAR(20), - checksum VARCHAR(32), - owner VARCHAR(255), - min_disk INTEGER, - min_ram INTEGER, - protected BOOLEAN NOT NULL DEFAULT 0, - virtual_size INTEGER, - PRIMARY KEY (id), - CHECK (is_public IN (0, 1)), - CHECK (deleted IN (0, 1)) -); - -INSERT INTO images_backup - SELECT id, name, size, status, is_public, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram, protected, virtual_size - FROM images; - -DROP TABLE images; - -CREATE TABLE images ( - id VARCHAR(36) NOT NULL, - name VARCHAR(255), - size INTEGER, - status VARCHAR(30) NOT NULL, - is_public BOOLEAN NOT NULL, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - disk_format VARCHAR(20), - container_format VARCHAR(20), - checksum VARCHAR(32), - owner VARCHAR(255), - min_disk INTEGER NOT NULL, - min_ram INTEGER NOT NULL, - protected BOOLEAN NOT NULL DEFAULT 0, - virtual_size INTEGER, - PRIMARY KEY (id), - CHECK (is_public IN (0, 1)), - CHECK (deleted IN (0, 1)) -); - -CREATE INDEX ix_images_deleted ON images (deleted); -CREATE INDEX ix_images_is_public ON images (is_public); -CREATE INDEX owner_image_idx ON images (owner); -CREATE INDEX checksum_image_idx ON images (checksum); - - -INSERT INTO images - SELECT id, name, size, status, is_public, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram, protected, virtual_size - FROM images_backup; - -DROP TABLE images_backup; - -CREATE TEMPORARY TABLE image_members_backup ( - id INTEGER NOT NULL, - image_id VARCHAR(36) NOT NULL, - member VARCHAR(255) NOT NULL, - can_share BOOLEAN NOT NULL, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - status VARCHAR(20) NOT NULL DEFAULT 'pending', - PRIMARY KEY (id), - UNIQUE (image_id, member), - CHECK (can_share IN (0, 1)), - CHECK (deleted IN (0, 1)), - FOREIGN KEY(image_id) REFERENCES images (id) -); - -INSERT INTO image_members_backup - SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted, status - FROM image_members; - -DROP TABLE image_members; - -CREATE TABLE image_members ( - id INTEGER NOT NULL, - image_id VARCHAR(36) NOT NULL, - member VARCHAR(255) NOT NULL, - can_share BOOLEAN NOT NULL, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - status VARCHAR(20) NOT NULL DEFAULT 'pending', - PRIMARY KEY (id), - UNIQUE (image_id, member), - CHECK (can_share IN (0, 1)), - CHECK (deleted IN (0, 1)), - FOREIGN KEY(image_id) REFERENCES images (id), - CONSTRAINT image_members_image_id_member_deleted_at_key UNIQUE (image_id, member, deleted_at) -); - -CREATE INDEX ix_image_members_deleted ON image_members (deleted); -CREATE INDEX ix_image_members_image_id ON image_members (image_id); -CREATE INDEX ix_image_members_image_id_member ON image_members (image_id, member); - -INSERT INTO image_members - SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted, status - FROM image_members_backup; - -DROP TABLE image_members_backup; - -CREATE TEMPORARY TABLE image_properties_backup ( - id INTEGER NOT NULL, - image_id VARCHAR(36) NOT NULL, - name VARCHAR(255) NOT NULL, - value TEXT, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - PRIMARY KEY (id) -); - -INSERT INTO image_properties_backup - SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted - FROM image_properties; - -DROP TABLE image_properties; - -CREATE TABLE image_properties ( - id INTEGER NOT NULL, - image_id VARCHAR(36) NOT NULL, - name VARCHAR(255) NOT NULL, - value TEXT, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - PRIMARY KEY (id), - CHECK (deleted IN (0, 1)), - FOREIGN KEY(image_id) REFERENCES images (id), - CONSTRAINT ix_image_properties_image_id_name UNIQUE (image_id, name) -); - -CREATE INDEX ix_image_properties_deleted ON image_properties (deleted); -CREATE INDEX ix_image_properties_image_id ON image_properties (image_id); - -INSERT INTO image_properties (id, image_id, name, value, created_at, updated_at, deleted_at, deleted) - SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted - FROM image_properties_backup; - -DROP TABLE image_properties_backup; diff --git a/glance/db/sqlalchemy/migrate_repo/versions/038_add_metadef_tags_table.py b/glance/db/sqlalchemy/migrate_repo/versions/038_add_metadef_tags_table.py deleted file mode 100644 index fced9f1e..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/038_add_metadef_tags_table.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy.schema import ( - Column, Index, MetaData, Table, UniqueConstraint) # noqa - -from glance.db.sqlalchemy.migrate_repo.schema import ( - DateTime, Integer, String, create_tables) # noqa - - -def define_metadef_tags_table(meta): - _constr_kwargs = {} - metadef_tags = Table('metadef_tags', - meta, - Column('id', Integer(), primary_key=True, - nullable=False), - Column('namespace_id', Integer(), - nullable=False), - Column('name', String(80), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime()), - UniqueConstraint('namespace_id', 'name', - **_constr_kwargs), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=False) - - if meta.bind.name != 'ibm_db_sa': - Index('ix_tags_namespace_id_name', - metadef_tags.c.namespace_id, - metadef_tags.c.name) - - return metadef_tags - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - tables = [define_metadef_tags_table(meta)] - create_tables(tables) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/039_add_changes_to_satisfy_models_metadef.py b/glance/db/sqlalchemy/migrate_repo/versions/039_add_changes_to_satisfy_models_metadef.py deleted file mode 100644 index 0466f3ad..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/039_add_changes_to_satisfy_models_metadef.py +++ /dev/null @@ -1,196 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import migrate -import sqlalchemy -from sqlalchemy import inspect -from sqlalchemy import (Table, Index, UniqueConstraint) -from sqlalchemy.schema import (DropConstraint) - - -def _change_db2_unique_constraint(operation_type, constraint_name, *columns): - constraint = migrate.UniqueConstraint(*columns, name=constraint_name) - - operation = getattr(constraint, operation_type) - operation() - - -def upgrade(migrate_engine): - meta = sqlalchemy.MetaData() - meta.bind = migrate_engine - inspector = inspect(migrate_engine) - - metadef_namespaces = Table('metadef_namespaces', meta, autoload=True) - metadef_properties = Table('metadef_properties', meta, autoload=True) - metadef_objects = Table('metadef_objects', meta, autoload=True) - metadef_ns_res_types = Table('metadef_namespace_resource_types', - meta, autoload=True) - metadef_resource_types = Table('metadef_resource_types', meta, - autoload=True) - metadef_tags = Table('metadef_tags', meta, autoload=True) - - constraints = [('ix_namespaces_namespace', - [metadef_namespaces.c.namespace]), - ('ix_objects_namespace_id_name', - [metadef_objects.c.namespace_id, - metadef_objects.c.name]), - ('ix_metadef_properties_namespace_id_name', - [metadef_properties.c.namespace_id, - metadef_properties.c.name])] - metadef_tags_constraints = inspector.get_unique_constraints('metadef_tags') - for constraint in metadef_tags_constraints: - if set(constraint['column_names']) == set(['namespace_id', 'name']): - constraints.append((constraint['name'], - [metadef_tags.c.namespace_id, - metadef_tags.c.name])) - if meta.bind.name == "ibm_db_sa": - # For db2, the following constraints need to be dropped first, - # otherwise the index like ix_metadef_ns_res_types_namespace_id - # will fail to create. These constraints will be added back at - # the end. It should not affect the origional logic for other - # database backends. - for (constraint_name, cols) in constraints: - _change_db2_unique_constraint('drop', constraint_name, *cols) - else: - Index('ix_namespaces_namespace', metadef_namespaces.c.namespace).drop() - - Index('ix_objects_namespace_id_name', metadef_objects.c.namespace_id, - metadef_objects.c.name).drop() - - Index('ix_metadef_properties_namespace_id_name', - metadef_properties.c.namespace_id, - metadef_properties.c.name).drop() - - fkc = migrate.ForeignKeyConstraint([metadef_tags.c.namespace_id], - [metadef_namespaces.c.id]) - fkc.create() - - # `migrate` module removes unique constraint after adding - # foreign key to the table in sqlite. - # The reason of this issue is that it isn't possible to add fkc to - # existing table in sqlite. Instead of this we should recreate the table - # with needed fkc in the declaration. Migrate package provide us with such - # possibility, but unfortunately it recreates the table without - # constraints. Create unique constraint manually. - if migrate_engine.name == 'sqlite' and len( - inspector.get_unique_constraints('metadef_tags')) == 0: - uc = migrate.UniqueConstraint(metadef_tags.c.namespace_id, - metadef_tags.c.name) - uc.create() - - if meta.bind.name != "ibm_db_sa": - Index('ix_tags_namespace_id_name', metadef_tags.c.namespace_id, - metadef_tags.c.name).drop() - - Index('ix_metadef_tags_name', metadef_tags.c.name).create() - - Index('ix_metadef_tags_namespace_id', metadef_tags.c.namespace_id, - metadef_tags.c.name).create() - - if migrate_engine.name == 'mysql': - # We need to drop some foreign keys first because unique constraints - # that we want to delete depend on them. So drop the fk and recreate - # it again after unique constraint deletion. - fkc = migrate.ForeignKeyConstraint([metadef_properties.c.namespace_id], - [metadef_namespaces.c.id], - name='metadef_properties_ibfk_1') - fkc.drop() - constraint = UniqueConstraint(metadef_properties.c.namespace_id, - metadef_properties.c.name, - name='namespace_id') - migrate_engine.execute(DropConstraint(constraint)) - fkc.create() - - fkc = migrate.ForeignKeyConstraint([metadef_objects.c.namespace_id], - [metadef_namespaces.c.id], - name='metadef_objects_ibfk_1') - fkc.drop() - constraint = UniqueConstraint(metadef_objects.c.namespace_id, - metadef_objects.c.name, - name='namespace_id') - migrate_engine.execute(DropConstraint(constraint)) - fkc.create() - - constraint = UniqueConstraint(metadef_ns_res_types.c.resource_type_id, - metadef_ns_res_types.c.namespace_id, - name='resource_type_id') - migrate_engine.execute(DropConstraint(constraint)) - - constraint = UniqueConstraint(metadef_namespaces.c.namespace, - name='namespace') - migrate_engine.execute(DropConstraint(constraint)) - - constraint = UniqueConstraint(metadef_resource_types.c.name, - name='name') - migrate_engine.execute(DropConstraint(constraint)) - - if migrate_engine.name == 'postgresql': - met_obj_index_name = ( - inspector.get_unique_constraints('metadef_objects')[0]['name']) - constraint = UniqueConstraint( - metadef_objects.c.namespace_id, - metadef_objects.c.name, - name=met_obj_index_name) - migrate_engine.execute(DropConstraint(constraint)) - - met_prop_index_name = ( - inspector.get_unique_constraints('metadef_properties')[0]['name']) - constraint = UniqueConstraint( - metadef_properties.c.namespace_id, - metadef_properties.c.name, - name=met_prop_index_name) - migrate_engine.execute(DropConstraint(constraint)) - - metadef_namespaces_name = ( - inspector.get_unique_constraints( - 'metadef_namespaces')[0]['name']) - constraint = UniqueConstraint( - metadef_namespaces.c.namespace, - name=metadef_namespaces_name) - migrate_engine.execute(DropConstraint(constraint)) - - metadef_resource_types_name = (inspector.get_unique_constraints( - 'metadef_resource_types')[0]['name']) - constraint = UniqueConstraint( - metadef_resource_types.c.name, - name=metadef_resource_types_name) - migrate_engine.execute(DropConstraint(constraint)) - - constraint = UniqueConstraint( - metadef_tags.c.namespace_id, - metadef_tags.c.name, - name='metadef_tags_namespace_id_name_key') - migrate_engine.execute(DropConstraint(constraint)) - - Index('ix_metadef_ns_res_types_namespace_id', - metadef_ns_res_types.c.namespace_id).create() - - Index('ix_metadef_namespaces_namespace', - metadef_namespaces.c.namespace).create() - - Index('ix_metadef_namespaces_owner', metadef_namespaces.c.owner).create() - - Index('ix_metadef_objects_name', metadef_objects.c.name).create() - - Index('ix_metadef_objects_namespace_id', - metadef_objects.c.namespace_id).create() - - Index('ix_metadef_properties_name', metadef_properties.c.name).create() - - Index('ix_metadef_properties_namespace_id', - metadef_properties.c.namespace_id).create() - - if meta.bind.name == "ibm_db_sa": - # For db2, add these constraints back. It should not affect the - # origional logic for other database backends. - for (constraint_name, cols) in constraints: - _change_db2_unique_constraint('create', constraint_name, *cols) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/040_add_changes_to_satisfy_metadefs_tags.py b/glance/db/sqlalchemy/migrate_repo/versions/040_add_changes_to_satisfy_metadefs_tags.py deleted file mode 100644 index 1c52e901..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/040_add_changes_to_satisfy_metadefs_tags.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import sqlalchemy -from sqlalchemy import (Table, Index) - - -def upgrade(migrate_engine): - if migrate_engine.name == 'mysql': - meta = sqlalchemy.MetaData() - meta.bind = migrate_engine - metadef_tags = Table('metadef_tags', meta, autoload=True) - Index('namespace_id', metadef_tags.c.namespace_id, - metadef_tags.c.name).drop() diff --git a/glance/db/sqlalchemy/migrate_repo/versions/041_add_artifact_tables.py b/glance/db/sqlalchemy/migrate_repo/versions/041_add_artifact_tables.py deleted file mode 100644 index 71ce32c7..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/041_add_artifact_tables.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright (c) 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy.schema import (Column, ForeignKey, Index, MetaData, Table) - - -from glance.db.sqlalchemy.migrate_repo.schema import ( - BigInteger, Boolean, DateTime, Integer, Numeric, String, Text, - create_tables) # noqa - - -def define_artifacts_table(meta): - artifacts = Table('artifacts', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('name', String(255), nullable=False), - Column('type_name', String(255), nullable=False), - Column('type_version_prefix', BigInteger(), - nullable=False), - Column('type_version_suffix', String(255)), - Column('type_version_meta', String(255)), - Column('version_prefix', BigInteger(), nullable=False), - Column('version_suffix', String(255)), - Column('version_meta', String(255)), - Column('description', Text()), - Column('visibility', String(32), nullable=False), - Column('state', String(32), nullable=False), - Column('owner', String(255), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), - nullable=False), - Column('deleted_at', DateTime()), - Column('published_at', DateTime()), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - Index('ix_artifact_name_and_version', artifacts.c.name, - artifacts.c.version_prefix, artifacts.c.version_suffix) - Index('ix_artifact_type', artifacts.c.type_name, - artifacts.c.type_version_prefix, artifacts.c.type_version_suffix) - Index('ix_artifact_state', artifacts.c.state) - Index('ix_artifact_owner', artifacts.c.owner) - Index('ix_artifact_visibility', artifacts.c.visibility) - - return artifacts - - -def define_artifact_tags_table(meta): - artifact_tags = Table('artifact_tags', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('artifact_id', String(36), - ForeignKey('artifacts.id'), nullable=False), - Column('value', String(255), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), - nullable=False), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - Index('ix_artifact_tags_artifact_id', artifact_tags.c.artifact_id) - Index('ix_artifact_tags_artifact_id_tag_value', - artifact_tags.c.artifact_id, artifact_tags.c.value) - - return artifact_tags - - -def define_artifact_dependencies_table(meta): - artifact_dependencies = Table('artifact_dependencies', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('artifact_source', String(36), - ForeignKey('artifacts.id'), - nullable=False), - Column('artifact_dest', String(36), - ForeignKey('artifacts.id'), - nullable=False), - Column('artifact_origin', String(36), - ForeignKey('artifacts.id'), - nullable=False), - Column('is_direct', Boolean(), - nullable=False), - Column('position', Integer()), - Column('name', String(36)), - Column('created_at', DateTime(), - nullable=False), - Column('updated_at', DateTime(), - nullable=False), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - - Index('ix_artifact_dependencies_source_id', - artifact_dependencies.c.artifact_source) - Index('ix_artifact_dependencies_dest_id', - artifact_dependencies.c.artifact_dest), - Index('ix_artifact_dependencies_origin_id', - artifact_dependencies.c.artifact_origin) - Index('ix_artifact_dependencies_direct_dependencies', - artifact_dependencies.c.artifact_source, - artifact_dependencies.c.is_direct) - return artifact_dependencies - - -def define_artifact_blobs_table(meta): - artifact_blobs = Table('artifact_blobs', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('artifact_id', String(36), - ForeignKey('artifacts.id'), - nullable=False), - Column('size', BigInteger(), nullable=False), - Column('checksum', String(32)), - Column('name', String(255), nullable=False), - Column('item_key', String(329)), - Column('position', Integer()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), - nullable=False), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - Index('ix_artifact_blobs_artifact_id', - artifact_blobs.c.artifact_id) - Index('ix_artifact_blobs_name', - artifact_blobs.c.name) - return artifact_blobs - - -def define_artifact_properties_table(meta): - artifact_properties = Table('artifact_properties', - meta, - Column('id', String(36), - primary_key=True, - nullable=False), - Column('artifact_id', String(36), - ForeignKey('artifacts.id'), - nullable=False), - Column('name', String(255), - nullable=False), - Column('string_value', String(255)), - Column('int_value', Integer()), - Column('numeric_value', Numeric()), - Column('bool_value', Boolean()), - Column('text_value', Text()), - Column('created_at', DateTime(), - nullable=False), - Column('updated_at', DateTime(), - nullable=False), - Column('position', Integer()), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - Index('ix_artifact_properties_artifact_id', - artifact_properties.c.artifact_id) - Index('ix_artifact_properties_name', artifact_properties.c.name) - return artifact_properties - - -def define_artifact_blob_locations_table(meta): - artifact_blob_locations = Table('artifact_blob_locations', - meta, - Column('id', String(36), - primary_key=True, - nullable=False), - Column('blob_id', String(36), - ForeignKey('artifact_blobs.id'), - nullable=False), - Column('value', Text(), nullable=False), - Column('created_at', DateTime(), - nullable=False), - Column('updated_at', DateTime(), - nullable=False), - Column('position', Integer()), - Column('status', String(36), - nullable=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - extend_existing=True) - Index('ix_artifact_blob_locations_blob_id', - artifact_blob_locations.c.blob_id) - - return artifact_blob_locations - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - tables = [define_artifacts_table(meta), - define_artifact_tags_table(meta), - define_artifact_properties_table(meta), - define_artifact_blobs_table(meta), - define_artifact_blob_locations_table(meta), - define_artifact_dependencies_table(meta)] - create_tables(tables) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/042_add_changes_to_reinstall_unique_metadef_constraints.py b/glance/db/sqlalchemy/migrate_repo/versions/042_add_changes_to_reinstall_unique_metadef_constraints.py deleted file mode 100644 index af7863a3..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/042_add_changes_to_reinstall_unique_metadef_constraints.py +++ /dev/null @@ -1,442 +0,0 @@ - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import migrate -import sqlalchemy -from sqlalchemy import (func, Index, inspect, orm, String, Table, type_coerce) - - -# The _upgrade...get_duplicate() def's are separate functions to -# accommodate sqlite which locks the database against updates as long as -# db_recs is active. -# In addition, sqlite doesn't support the function 'concat' between -# Strings and Integers, so, the updating of records is also adjusted. -def _upgrade_metadef_namespaces_get_duplicates(migrate_engine): - meta = sqlalchemy.schema.MetaData(migrate_engine) - metadef_namespaces = Table('metadef_namespaces', meta, autoload=True) - - session = orm.sessionmaker(bind=migrate_engine)() - db_recs = (session.query(func.min(metadef_namespaces.c.id), - metadef_namespaces.c.namespace) - .group_by(metadef_namespaces.c.namespace) - .having(func.count(metadef_namespaces.c.namespace) > 1)) - dbrecs = [] - for row in db_recs: - dbrecs.append({'id': row[0], 'namespace': row[1]}) - session.close() - - return dbrecs - - -def _upgrade_metadef_objects_get_duplicates(migrate_engine): - meta = sqlalchemy.schema.MetaData(migrate_engine) - metadef_objects = Table('metadef_objects', meta, autoload=True) - - session = orm.sessionmaker(bind=migrate_engine)() - db_recs = (session.query(func.min(metadef_objects.c.id), - metadef_objects.c.namespace_id, - metadef_objects.c.name) - .group_by(metadef_objects.c.namespace_id, - metadef_objects.c.name) - .having(func.count() > 1)) - dbrecs = [] - for row in db_recs: - dbrecs.append({'id': row[0], 'namespace_id': row[1], 'name': row[2]}) - session.close() - - return dbrecs - - -def _upgrade_metadef_properties_get_duplicates(migrate_engine): - meta = sqlalchemy.schema.MetaData(migrate_engine) - metadef_properties = Table('metadef_properties', meta, autoload=True) - - session = orm.sessionmaker(bind=migrate_engine)() - db_recs = (session.query(func.min(metadef_properties.c.id), - metadef_properties.c.namespace_id, - metadef_properties.c.name) - .group_by(metadef_properties.c.namespace_id, - metadef_properties.c.name) - .having(func.count() > 1)) - dbrecs = [] - for row in db_recs: - dbrecs.append({'id': row[0], 'namespace_id': row[1], 'name': row[2]}) - session.close() - - return dbrecs - - -def _upgrade_metadef_tags_get_duplicates(migrate_engine): - meta = sqlalchemy.schema.MetaData(migrate_engine) - metadef_tags = Table('metadef_tags', meta, autoload=True) - - session = orm.sessionmaker(bind=migrate_engine)() - db_recs = (session.query(func.min(metadef_tags.c.id), - metadef_tags.c.namespace_id, - metadef_tags.c.name) - .group_by(metadef_tags.c.namespace_id, - metadef_tags.c.name) - .having(func.count() > 1)) - dbrecs = [] - for row in db_recs: - dbrecs.append({'id': row[0], 'namespace_id': row[1], 'name': row[2]}) - session.close() - - return dbrecs - - -def _upgrade_metadef_resource_types_get_duplicates(migrate_engine): - meta = sqlalchemy.schema.MetaData(migrate_engine) - metadef_resource_types = Table('metadef_resource_types', meta, - autoload=True) - - session = orm.sessionmaker(bind=migrate_engine)() - db_recs = (session.query(func.min(metadef_resource_types.c.id), - metadef_resource_types.c.name) - .group_by(metadef_resource_types.c.name) - .having(func.count(metadef_resource_types.c.name) > 1)) - dbrecs = [] - for row in db_recs: - dbrecs.append({'id': row[0], 'name': row[1]}) - session.close() - - return dbrecs - - -def _upgrade_data(migrate_engine): - # Rename duplicates to be unique. - meta = sqlalchemy.schema.MetaData(migrate_engine) - - # ORM tables - metadef_namespaces = Table('metadef_namespaces', meta, autoload=True) - metadef_objects = Table('metadef_objects', meta, autoload=True) - metadef_properties = Table('metadef_properties', meta, autoload=True) - metadef_tags = Table('metadef_tags', meta, autoload=True) - metadef_resource_types = Table('metadef_resource_types', meta, - autoload=True) - - # Fix duplicate metadef_namespaces - # Update the non-first record(s) with an unique namespace value - dbrecs = _upgrade_metadef_namespaces_get_duplicates(migrate_engine) - for row in dbrecs: - s = (metadef_namespaces.update() - .where(metadef_namespaces.c.id > row['id']) - .where(metadef_namespaces.c.namespace == row['namespace']) - ) - if migrate_engine.name == 'sqlite': - s = (s.values(namespace=(row['namespace'] + '-DUPL-' + - type_coerce(metadef_namespaces.c.id, - String)), - display_name=(row['namespace'] + '-DUPL-' + - type_coerce(metadef_namespaces.c.id, - String)))) - else: - s = s.values(namespace=func.concat(row['namespace'], - '-DUPL-', - metadef_namespaces.c.id), - display_name=func.concat(row['namespace'], - '-DUPL-', - metadef_namespaces.c.id)) - s.execute() - - # Fix duplicate metadef_objects - dbrecs = _upgrade_metadef_objects_get_duplicates(migrate_engine) - for row in dbrecs: - s = (metadef_objects.update() - .where(metadef_objects.c.id > row['id']) - .where(metadef_objects.c.namespace_id == row['namespace_id']) - .where(metadef_objects.c.name == str(row['name'])) - ) - if migrate_engine.name == 'sqlite': - s = (s.values(name=(row['name'] + '-DUPL-' - + type_coerce(metadef_objects.c.id, String)))) - else: - s = s.values(name=func.concat(row['name'], '-DUPL-', - metadef_objects.c.id)) - s.execute() - - # Fix duplicate metadef_properties - dbrecs = _upgrade_metadef_properties_get_duplicates(migrate_engine) - for row in dbrecs: - s = (metadef_properties.update() - .where(metadef_properties.c.id > row['id']) - .where(metadef_properties.c.namespace_id == row['namespace_id']) - .where(metadef_properties.c.name == str(row['name'])) - ) - if migrate_engine.name == 'sqlite': - s = (s.values(name=(row['name'] + '-DUPL-' + - type_coerce(metadef_properties.c.id, String))) - ) - else: - s = s.values(name=func.concat(row['name'], '-DUPL-', - metadef_properties.c.id)) - s.execute() - - # Fix duplicate metadef_tags - dbrecs = _upgrade_metadef_tags_get_duplicates(migrate_engine) - for row in dbrecs: - s = (metadef_tags.update() - .where(metadef_tags.c.id > row['id']) - .where(metadef_tags.c.namespace_id == row['namespace_id']) - .where(metadef_tags.c.name == str(row['name'])) - ) - if migrate_engine.name == 'sqlite': - s = (s.values(name=(row['name'] + '-DUPL-' + - type_coerce(metadef_tags.c.id, String))) - ) - else: - s = s.values(name=func.concat(row['name'], '-DUPL-', - metadef_tags.c.id)) - s.execute() - - # Fix duplicate metadef_resource_types - dbrecs = _upgrade_metadef_resource_types_get_duplicates(migrate_engine) - for row in dbrecs: - s = (metadef_resource_types.update() - .where(metadef_resource_types.c.id > row['id']) - .where(metadef_resource_types.c.name == str(row['name'])) - ) - if migrate_engine.name == 'sqlite': - s = (s.values(name=(row['name'] + '-DUPL-' + - type_coerce(metadef_resource_types.c.id, - String))) - ) - else: - s = s.values(name=func.concat(row['name'], '-DUPL-', - metadef_resource_types.c.id)) - s.execute() - - -def _update_sqlite_namespace_id_name_constraint(metadef, metadef_namespaces, - new_constraint_name, - new_fk_name): - migrate.UniqueConstraint( - metadef.c.namespace_id, metadef.c.name).drop() - migrate.UniqueConstraint( - metadef.c.namespace_id, metadef.c.name, - name=new_constraint_name).create() - migrate.ForeignKeyConstraint( - [metadef.c.namespace_id], - [metadef_namespaces.c.id], - name=new_fk_name).create() - - -def _drop_unique_constraint_if_exists(inspector, table_name, metadef): - name = _get_unique_constraint_name(inspector, - table_name, - ['namespace_id', 'name']) - if name: - migrate.UniqueConstraint(metadef.c.namespace_id, - metadef.c.name, - name=name).drop() - - -def _drop_index_with_fk_constraint(metadef, metadef_namespaces, - index_name, - fk_old_name, fk_new_name): - - fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id], - [metadef_namespaces.c.id], - name=fk_old_name) - fkc.drop() - - if index_name: - Index(index_name, metadef.c.namespace_id).drop() - - # Rename the fk for consistency across all db's - fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id], - [metadef_namespaces.c.id], - name=fk_new_name) - fkc.create() - - -def _get_unique_constraint_name(inspector, table_name, columns): - constraints = inspector.get_unique_constraints(table_name) - for constraint in constraints: - if set(constraint['column_names']) == set(columns): - return constraint['name'] - return None - - -def _get_fk_constraint_name(inspector, table_name, columns): - constraints = inspector.get_foreign_keys(table_name) - for constraint in constraints: - if set(constraint['constrained_columns']) == set(columns): - return constraint['name'] - return None - - -def upgrade(migrate_engine): - - _upgrade_data(migrate_engine) - - meta = sqlalchemy.MetaData() - meta.bind = migrate_engine - inspector = inspect(migrate_engine) - - # ORM tables - metadef_namespaces = Table('metadef_namespaces', meta, autoload=True) - metadef_objects = Table('metadef_objects', meta, autoload=True) - metadef_properties = Table('metadef_properties', meta, autoload=True) - metadef_tags = Table('metadef_tags', meta, autoload=True) - metadef_ns_res_types = Table('metadef_namespace_resource_types', - meta, autoload=True) - metadef_resource_types = Table('metadef_resource_types', meta, - autoload=True) - - # Drop the bad, non-unique indices. - if migrate_engine.name == 'sqlite': - # For sqlite: - # Only after the unique constraints have been added should the indices - # be dropped. If done the other way, sqlite complains during - # constraint adding/dropping that the index does/does not exist. - # Note: The _get_unique_constraint_name, _get_fk_constraint_name - # return None for constraints that do in fact exist. Also, - # get_index_names returns names, but, the names can not be used with - # the Index(name, blah).drop() command, so, putting sqlite into - # it's own section. - - # Objects - _update_sqlite_namespace_id_name_constraint( - metadef_objects, metadef_namespaces, - 'uq_metadef_objects_namespace_id_name', - 'metadef_objects_fk_1') - - # Properties - _update_sqlite_namespace_id_name_constraint( - metadef_properties, metadef_namespaces, - 'uq_metadef_properties_namespace_id_name', - 'metadef_properties_fk_1') - - # Tags - _update_sqlite_namespace_id_name_constraint( - metadef_tags, metadef_namespaces, - 'uq_metadef_tags_namespace_id_name', - 'metadef_tags_fk_1') - - # Namespaces - migrate.UniqueConstraint( - metadef_namespaces.c.namespace).drop() - migrate.UniqueConstraint( - metadef_namespaces.c.namespace, - name='uq_metadef_namespaces_namespace').create() - - # ResourceTypes - migrate.UniqueConstraint( - metadef_resource_types.c.name).drop() - migrate.UniqueConstraint( - metadef_resource_types.c.name, - name='uq_metadef_resource_types_name').create() - - # Now drop the bad indices - Index('ix_metadef_objects_namespace_id', - metadef_objects.c.namespace_id, - metadef_objects.c.name).drop() - Index('ix_metadef_properties_namespace_id', - metadef_properties.c.namespace_id, - metadef_properties.c.name).drop() - Index('ix_metadef_tags_namespace_id', - metadef_tags.c.namespace_id, - metadef_tags.c.name).drop() - else: - # First drop the bad non-unique indices. - # To do that (for mysql), must first drop foreign key constraints - # BY NAME and then drop the bad indices. - # Finally, re-create the foreign key constraints with a consistent - # name. - - # DB2 still has unique constraints, but, they are badly named. - # Drop them, they will be recreated at the final step. - name = _get_unique_constraint_name(inspector, 'metadef_namespaces', - ['namespace']) - if name: - migrate.UniqueConstraint(metadef_namespaces.c.namespace, - name=name).drop() - _drop_unique_constraint_if_exists(inspector, 'metadef_objects', - metadef_objects) - _drop_unique_constraint_if_exists(inspector, 'metadef_properties', - metadef_properties) - _drop_unique_constraint_if_exists(inspector, 'metadef_tags', - metadef_tags) - name = _get_unique_constraint_name(inspector, 'metadef_resource_types', - ['name']) - if name: - migrate.UniqueConstraint(metadef_resource_types.c.name, - name=name).drop() - - # Objects - _drop_index_with_fk_constraint( - metadef_objects, metadef_namespaces, - 'ix_metadef_objects_namespace_id', - _get_fk_constraint_name( - inspector, 'metadef_objects', ['namespace_id']), - 'metadef_objects_fk_1') - - # Properties - _drop_index_with_fk_constraint( - metadef_properties, metadef_namespaces, - 'ix_metadef_properties_namespace_id', - _get_fk_constraint_name( - inspector, 'metadef_properties', ['namespace_id']), - 'metadef_properties_fk_1') - - # Tags - _drop_index_with_fk_constraint( - metadef_tags, metadef_namespaces, - 'ix_metadef_tags_namespace_id', - _get_fk_constraint_name( - inspector, 'metadef_tags', ['namespace_id']), - 'metadef_tags_fk_1') - - # Drop Others without fk constraints. - Index('ix_metadef_namespaces_namespace', - metadef_namespaces.c.namespace).drop() - - # The next two don't exist in ibm_db_sa, but, drop them everywhere else. - if migrate_engine.name != 'ibm_db_sa': - Index('ix_metadef_resource_types_name', - metadef_resource_types.c.name).drop() - # Not needed due to primary key on same columns - Index('ix_metadef_ns_res_types_res_type_id_ns_id', - metadef_ns_res_types.c.resource_type_id, - metadef_ns_res_types.c.namespace_id).drop() - - # Now, add back the dropped indexes as unique constraints - if migrate_engine.name != 'sqlite': - # Namespaces - migrate.UniqueConstraint( - metadef_namespaces.c.namespace, - name='uq_metadef_namespaces_namespace').create() - - # Objects - migrate.UniqueConstraint( - metadef_objects.c.namespace_id, - metadef_objects.c.name, - name='uq_metadef_objects_namespace_id_name').create() - - # Properties - migrate.UniqueConstraint( - metadef_properties.c.namespace_id, - metadef_properties.c.name, - name='uq_metadef_properties_namespace_id_name').create() - - # Tags - migrate.UniqueConstraint( - metadef_tags.c.namespace_id, - metadef_tags.c.name, - name='uq_metadef_tags_namespace_id_name').create() - - # Resource Types - migrate.UniqueConstraint( - metadef_resource_types.c.name, - name='uq_metadef_resource_types_name').create() diff --git a/glance/db/sqlalchemy/migrate_repo/versions/043_add_image_created_updated_idx.py b/glance/db/sqlalchemy/migrate_repo/versions/043_add_image_created_updated_idx.py deleted file mode 100644 index e953b611..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/043_add_image_created_updated_idx.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from sqlalchemy import MetaData, Table, Index - -CREATED_AT_INDEX = 'created_at_image_idx' -UPDATED_AT_INDEX = 'updated_at_image_idx' - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - images = Table('images', meta, autoload=True) - - created_index = Index(CREATED_AT_INDEX, images.c.created_at) - created_index.create(migrate_engine) - updated_index = Index(UPDATED_AT_INDEX, images.c.updated_at) - updated_index.create(migrate_engine) diff --git a/glance/db/sqlalchemy/migrate_repo/versions/044_update_metadef_os_nova_server.py b/glance/db/sqlalchemy/migrate_repo/versions/044_update_metadef_os_nova_server.py deleted file mode 100644 index f7490621..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/044_update_metadef_os_nova_server.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) 2016 Hewlett Packard Enterprise Software, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from sqlalchemy import MetaData, Table - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - resource_types_table = Table('metadef_resource_types', meta, autoload=True) - - resource_types_table.update(values={'name': 'OS::Nova::Server'}).where( - resource_types_table.c.name == 'OS::Nova::Instance').execute() diff --git a/glance/db/sqlalchemy/migrate_repo/versions/045_add_visibility.py b/glance/db/sqlalchemy/migrate_repo/versions/045_add_visibility.py deleted file mode 100644 index c2724ab9..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/045_add_visibility.py +++ /dev/null @@ -1,51 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column, Enum, Index, MetaData, Table, select, not_, and_ -from sqlalchemy.engine import reflection - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - - images = Table('images', meta, autoload=True) - - enum = Enum('private', 'public', 'shared', 'community', metadata=meta, - name='image_visibility') - enum.create() - - images.create_column(Column('visibility', enum, nullable=False, - server_default='shared')) - visibility_index = Index('visibility_image_idx', images.c.visibility) - visibility_index.create(migrate_engine) - - images.update(values={'visibility': 'public'}).where( - images.c.is_public).execute() - - image_members = Table('image_members', meta, autoload=True) - - # NOTE(dharinic): Mark all the non-public images as 'private' first - images.update().values(visibility='private').where( - not_(images.c.is_public)).execute() - # NOTE(dharinic): Identify 'shared' images from the above - images.update().values(visibility='shared').where(and_( - images.c.visibility == 'private', images.c.id.in_(select( - [image_members.c.image_id]).distinct().where( - not_(image_members.c.deleted))))).execute() - - insp = reflection.Inspector.from_engine(migrate_engine) - for index in insp.get_indexes('images'): - if 'ix_images_is_public' == index['name']: - Index('ix_images_is_public', images.c.is_public).drop() - break - - images.c.is_public.drop() diff --git a/glance/db/sqlalchemy/migrate_repo/versions/045_sqlite_upgrade.sql b/glance/db/sqlalchemy/migrate_repo/versions/045_sqlite_upgrade.sql deleted file mode 100644 index 0e848cce..00000000 --- a/glance/db/sqlalchemy/migrate_repo/versions/045_sqlite_upgrade.sql +++ /dev/null @@ -1,162 +0,0 @@ -CREATE TEMPORARY TABLE images_backup ( - id VARCHAR(36) NOT NULL, - name VARCHAR(255), - size INTEGER, - status VARCHAR(30) NOT NULL, - is_public BOOLEAN NOT NULL, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - disk_format VARCHAR(20), - container_format VARCHAR(20), - checksum VARCHAR(32), - owner VARCHAR(255), - min_disk INTEGER NOT NULL, - min_ram INTEGER NOT NULL, - protected BOOLEAN DEFAULT 0 NOT NULL, - virtual_size INTEGER, - PRIMARY KEY (id), - CHECK (is_public IN (0, 1)), - CHECK (deleted IN (0, 1)), - CHECK (protected IN (0, 1)) -); - -INSERT INTO images_backup - SELECT id, - name, - size, - status, - is_public, - created_at, - updated_at, - deleted_at, - deleted, - disk_format, - container_format, - checksum, - owner, - min_disk, - min_ram, - protected, - virtual_size - FROM images; - -DROP TABLE images; - -CREATE TABLE images ( - id VARCHAR(36) NOT NULL, - name VARCHAR(255), - size INTEGER, - status VARCHAR(30) NOT NULL, - created_at DATETIME NOT NULL, - updated_at DATETIME, - deleted_at DATETIME, - deleted BOOLEAN NOT NULL, - disk_format VARCHAR(20), - container_format VARCHAR(20), - checksum VARCHAR(32), - owner VARCHAR(255), - min_disk INTEGER NOT NULL, - min_ram INTEGER NOT NULL, - protected BOOLEAN DEFAULT 0 NOT NULL, - virtual_size INTEGER, - visibility VARCHAR(9) DEFAULT 'shared' NOT NULL, - PRIMARY KEY (id), - CHECK (deleted IN (0, 1)), - CHECK (protected IN (0, 1)), - CONSTRAINT image_visibility CHECK (visibility IN ('private', 'public', 'shared', 'community')) -); - -CREATE INDEX checksum_image_idx ON images (checksum); -CREATE INDEX visibility_image_idx ON images (visibility); -CREATE INDEX ix_images_deleted ON images (deleted); -CREATE INDEX owner_image_idx ON images (owner); -CREATE INDEX created_at_image_idx ON images (created_at); -CREATE INDEX updated_at_image_idx ON images (updated_at); - --- Copy over all the 'public' rows - -INSERT INTO images ( - id, - name, - size, - status, - created_at, - updated_at, - deleted_at, - deleted, - disk_format, - container_format, - checksum, - owner, - min_disk, - min_ram, - protected, - virtual_size - ) - SELECT id, - name, - size, - status, - created_at, - updated_at, - deleted_at, - deleted, - disk_format, - container_format, - checksum, - owner, - min_disk, - min_ram, - protected, - virtual_size - FROM images_backup - WHERE is_public=1; - - -UPDATE images SET visibility='public'; - --- Now copy over the 'private' rows - -INSERT INTO images ( - id, - name, - size, - status, - created_at, - updated_at, - deleted_at, - deleted, - disk_format, - container_format, - checksum, - owner, - min_disk, - min_ram, - protected, - virtual_size - ) - SELECT id, - name, - size, - status, - created_at, - updated_at, - deleted_at, - deleted, - disk_format, - container_format, - checksum, - owner, - min_disk, - min_ram, - protected, - virtual_size - FROM images_backup - WHERE is_public=0; - -UPDATE images SET visibility='private' WHERE visibility='shared'; -UPDATE images SET visibility='shared' WHERE visibility='private' AND id IN (SELECT DISTINCT image_id FROM image_members WHERE deleted != 1); - -DROP TABLE images_backup; diff --git a/glance/db/sqlalchemy/migrate_repo/versions/__init__.py b/glance/db/sqlalchemy/migrate_repo/versions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/db/sqlalchemy/models.py b/glance/db/sqlalchemy/models.py deleted file mode 100644 index cc1b6eb5..00000000 --- a/glance/db/sqlalchemy/models.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SQLAlchemy models for glance data -""" - -import uuid - -from oslo_db.sqlalchemy import models -from oslo_serialization import jsonutils -from sqlalchemy import BigInteger -from sqlalchemy import Boolean -from sqlalchemy import Column -from sqlalchemy import DateTime -from sqlalchemy import Enum -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import ForeignKey -from sqlalchemy import Index -from sqlalchemy import Integer -from sqlalchemy.orm import backref, relationship -from sqlalchemy import sql -from sqlalchemy import String -from sqlalchemy import Text -from sqlalchemy.types import TypeDecorator -from sqlalchemy import UniqueConstraint - -from glance.common import timeutils - - -BASE = declarative_base() - - -class JSONEncodedDict(TypeDecorator): - """Represents an immutable structure as a json-encoded string""" - - impl = Text - - def process_bind_param(self, value, dialect): - if value is not None: - value = jsonutils.dumps(value) - return value - - def process_result_value(self, value, dialect): - if value is not None: - value = jsonutils.loads(value) - return value - - -class GlanceBase(models.ModelBase, models.TimestampMixin): - """Base class for Glance Models.""" - - __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} - __table_initialized__ = False - __protected_attributes__ = set([ - "created_at", "updated_at", "deleted_at", "deleted"]) - - def save(self, session=None): - from glance.db.sqlalchemy import api as db_api - super(GlanceBase, self).save(session or db_api.get_session()) - - created_at = Column(DateTime, default=lambda: timeutils.utcnow(), - nullable=False) - # TODO(vsergeyev): Column `updated_at` have no default value in - # OpenStack common code. We should decide, is this value - # required and make changes in oslo (if required) or - # in glance (if not). - updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), - nullable=True, onupdate=lambda: timeutils.utcnow()) - # TODO(boris-42): Use SoftDeleteMixin instead of deleted Column after - # migration that provides UniqueConstraints and change - # type of this column. - deleted_at = Column(DateTime) - deleted = Column(Boolean, nullable=False, default=False) - - def delete(self, session=None): - """Delete this object.""" - self.deleted = True - self.deleted_at = timeutils.utcnow() - self.save(session=session) - - def keys(self): - return self.__dict__.keys() - - def values(self): - return self.__dict__.values() - - def items(self): - return self.__dict__.items() - - def to_dict(self): - d = self.__dict__.copy() - # NOTE(flaper87): Remove - # private state instance - # It is not serializable - # and causes CircularReference - d.pop("_sa_instance_state") - return d - - -class Image(BASE, GlanceBase): - """Represents an image in the datastore.""" - __tablename__ = 'images' - __table_args__ = (Index('checksum_image_idx', 'checksum'), - Index('visibility_image_idx', 'visibility'), - Index('ix_images_deleted', 'deleted'), - Index('owner_image_idx', 'owner'), - Index('created_at_image_idx', 'created_at'), - Index('updated_at_image_idx', 'updated_at')) - - id = Column(String(36), primary_key=True, - default=lambda: str(uuid.uuid4())) - name = Column(String(255)) - disk_format = Column(String(20)) - container_format = Column(String(20)) - size = Column(BigInteger().with_variant(Integer, "sqlite")) - virtual_size = Column(BigInteger().with_variant(Integer, "sqlite")) - status = Column(String(30), nullable=False) - visibility = Column(Enum('private', 'public', 'shared', 'community', - name='image_visibility'), nullable=False, - server_default='shared') - checksum = Column(String(32)) - min_disk = Column(Integer, nullable=False, default=0) - min_ram = Column(Integer, nullable=False, default=0) - owner = Column(String(255)) - protected = Column(Boolean, nullable=False, default=False, - server_default=sql.expression.false()) - - -class ImageProperty(BASE, GlanceBase): - """Represents an image properties in the datastore.""" - __tablename__ = 'image_properties' - __table_args__ = (Index('ix_image_properties_image_id', 'image_id'), - Index('ix_image_properties_deleted', 'deleted'), - UniqueConstraint('image_id', - 'name', - name='ix_image_properties_' - 'image_id_name'),) - - id = Column(Integer, primary_key=True) - image_id = Column(String(36), ForeignKey('images.id'), - nullable=False) - image = relationship(Image, backref=backref('properties')) - - name = Column(String(255), nullable=False) - value = Column(Text) - - -class ImageTag(BASE, GlanceBase): - """Represents an image tag in the datastore.""" - __tablename__ = 'image_tags' - __table_args__ = (Index('ix_image_tags_image_id', 'image_id'), - Index('ix_image_tags_image_id_tag_value', - 'image_id', - 'value'),) - - id = Column(Integer, primary_key=True, nullable=False) - image_id = Column(String(36), ForeignKey('images.id'), nullable=False) - image = relationship(Image, backref=backref('tags')) - value = Column(String(255), nullable=False) - - -class ImageLocation(BASE, GlanceBase): - """Represents an image location in the datastore.""" - __tablename__ = 'image_locations' - __table_args__ = (Index('ix_image_locations_image_id', 'image_id'), - Index('ix_image_locations_deleted', 'deleted'),) - - id = Column(Integer, primary_key=True, nullable=False) - image_id = Column(String(36), ForeignKey('images.id'), nullable=False) - image = relationship(Image, backref=backref('locations')) - value = Column(Text(), nullable=False) - meta_data = Column(JSONEncodedDict(), default={}) - status = Column(String(30), server_default='active', nullable=False) - - -class ImageMember(BASE, GlanceBase): - """Represents an image members in the datastore.""" - __tablename__ = 'image_members' - unique_constraint_key_name = 'image_members_image_id_member_deleted_at_key' - __table_args__ = (Index('ix_image_members_deleted', 'deleted'), - Index('ix_image_members_image_id', 'image_id'), - Index('ix_image_members_image_id_member', - 'image_id', - 'member'), - UniqueConstraint('image_id', - 'member', - 'deleted_at', - name=unique_constraint_key_name),) - - id = Column(Integer, primary_key=True) - image_id = Column(String(36), ForeignKey('images.id'), - nullable=False) - image = relationship(Image, backref=backref('members')) - - member = Column(String(255), nullable=False) - can_share = Column(Boolean, nullable=False, default=False) - status = Column(String(20), nullable=False, default="pending", - server_default='pending') - - -class Task(BASE, GlanceBase): - """Represents an task in the datastore""" - __tablename__ = 'tasks' - __table_args__ = (Index('ix_tasks_type', 'type'), - Index('ix_tasks_status', 'status'), - Index('ix_tasks_owner', 'owner'), - Index('ix_tasks_deleted', 'deleted'), - Index('ix_tasks_updated_at', 'updated_at')) - - id = Column(String(36), primary_key=True, - default=lambda: str(uuid.uuid4())) - type = Column(String(30), nullable=False) - status = Column(String(30), nullable=False) - owner = Column(String(255), nullable=False) - expires_at = Column(DateTime, nullable=True) - - -class TaskInfo(BASE, models.ModelBase): - """Represents task info in the datastore""" - __tablename__ = 'task_info' - - task_id = Column(String(36), - ForeignKey('tasks.id'), - primary_key=True, - nullable=False) - - task = relationship(Task, backref=backref('info', uselist=False)) - - # NOTE(nikhil): input and result are stored as text in the DB. - # SQLAlchemy marshals the data to/from JSON using custom type - # JSONEncodedDict. It uses simplejson underneath. - input = Column(JSONEncodedDict()) - result = Column(JSONEncodedDict()) - message = Column(Text) - - -def register_models(engine): - """Create database tables for all models with the given engine.""" - models = (Image, ImageProperty, ImageMember) - for model in models: - model.metadata.create_all(engine) - - -def unregister_models(engine): - """Drop database tables for all models with the given engine.""" - models = (Image, ImageProperty) - for model in models: - model.metadata.drop_all(engine) diff --git a/glance/db/sqlalchemy/models_metadef.py b/glance/db/sqlalchemy/models_metadef.py deleted file mode 100644 index 3d822c7c..00000000 --- a/glance/db/sqlalchemy/models_metadef.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SQLAlchemy models for glance metadata schema -""" - -from oslo_db.sqlalchemy import models -from sqlalchemy import Boolean -from sqlalchemy import Column -from sqlalchemy import DateTime -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import ForeignKey -from sqlalchemy import Index -from sqlalchemy import Integer -from sqlalchemy.orm import relationship -from sqlalchemy import String -from sqlalchemy import Text -from sqlalchemy import UniqueConstraint - -from glance.common import timeutils -from glance.db.sqlalchemy.models import JSONEncodedDict - - -class DictionaryBase(models.ModelBase): - metadata = None - - def to_dict(self): - d = {} - for c in self.__table__.columns: - d[c.name] = self[c.name] - return d - - -BASE_DICT = declarative_base(cls=DictionaryBase) - - -class GlanceMetadefBase(models.TimestampMixin): - """Base class for Glance Metadef Models.""" - - __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} - __table_initialized__ = False - __protected_attributes__ = set(["created_at", "updated_at"]) - - created_at = Column(DateTime, default=lambda: timeutils.utcnow(), - nullable=False) - # TODO(wko): Column `updated_at` have no default value in - # OpenStack common code. We should decide, is this value - # required and make changes in oslo (if required) or - # in glance (if not). - updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), - nullable=True, onupdate=lambda: timeutils.utcnow()) - - -class MetadefNamespace(BASE_DICT, GlanceMetadefBase): - """Represents a metadata-schema namespace in the datastore.""" - __tablename__ = 'metadef_namespaces' - __table_args__ = (UniqueConstraint('namespace', - name='uq_metadef_namespaces' - '_namespace'), - Index('ix_metadef_namespaces_owner', 'owner') - ) - - id = Column(Integer, primary_key=True, nullable=False) - namespace = Column(String(80), nullable=False) - display_name = Column(String(80)) - description = Column(Text()) - visibility = Column(String(32)) - protected = Column(Boolean) - owner = Column(String(255), nullable=False) - - -class MetadefObject(BASE_DICT, GlanceMetadefBase): - """Represents a metadata-schema object in the datastore.""" - __tablename__ = 'metadef_objects' - __table_args__ = (UniqueConstraint('namespace_id', 'name', - name='uq_metadef_objects_namespace_id' - '_name'), - Index('ix_metadef_objects_name', 'name') - ) - - id = Column(Integer, primary_key=True, nullable=False) - namespace_id = Column(Integer(), ForeignKey('metadef_namespaces.id'), - nullable=False) - name = Column(String(80), nullable=False) - description = Column(Text()) - required = Column(Text()) - json_schema = Column(JSONEncodedDict(), default={}, nullable=False) - - -class MetadefProperty(BASE_DICT, GlanceMetadefBase): - """Represents a metadata-schema namespace-property in the datastore.""" - __tablename__ = 'metadef_properties' - __table_args__ = (UniqueConstraint('namespace_id', 'name', - name='uq_metadef_properties_namespace' - '_id_name'), - Index('ix_metadef_properties_name', 'name') - ) - - id = Column(Integer, primary_key=True, nullable=False) - namespace_id = Column(Integer(), ForeignKey('metadef_namespaces.id'), - nullable=False) - name = Column(String(80), nullable=False) - json_schema = Column(JSONEncodedDict(), default={}, nullable=False) - - -class MetadefNamespaceResourceType(BASE_DICT, GlanceMetadefBase): - """Represents a metadata-schema namespace-property in the datastore.""" - __tablename__ = 'metadef_namespace_resource_types' - __table_args__ = (Index('ix_metadef_ns_res_types_namespace_id', - 'namespace_id'), - ) - - resource_type_id = Column(Integer, - ForeignKey('metadef_resource_types.id'), - primary_key=True, nullable=False) - namespace_id = Column(Integer, ForeignKey('metadef_namespaces.id'), - primary_key=True, nullable=False) - properties_target = Column(String(80)) - prefix = Column(String(80)) - - -class MetadefResourceType(BASE_DICT, GlanceMetadefBase): - """Represents a metadata-schema resource type in the datastore.""" - __tablename__ = 'metadef_resource_types' - __table_args__ = (UniqueConstraint('name', - name='uq_metadef_resource_types_name'), - ) - - id = Column(Integer, primary_key=True, nullable=False) - name = Column(String(80), nullable=False) - protected = Column(Boolean, nullable=False, default=False) - - associations = relationship( - "MetadefNamespaceResourceType", - primaryjoin=id == MetadefNamespaceResourceType.resource_type_id) - - -class MetadefTag(BASE_DICT, GlanceMetadefBase): - """Represents a metadata-schema tag in the data store.""" - __tablename__ = 'metadef_tags' - __table_args__ = (UniqueConstraint('namespace_id', 'name', - name='uq_metadef_tags_namespace_id' - '_name'), - Index('ix_metadef_tags_name', 'name') - ) - - id = Column(Integer, primary_key=True, nullable=False) - namespace_id = Column(Integer(), ForeignKey('metadef_namespaces.id'), - nullable=False) - name = Column(String(80), nullable=False) - - -def register_models(engine): - """Create database tables for all models with the given engine.""" - models = (MetadefNamespace, MetadefObject, MetadefProperty, - MetadefTag, - MetadefResourceType, MetadefNamespaceResourceType) - for model in models: - model.metadata.create_all(engine) - - -def unregister_models(engine): - """Drop database tables for all models with the given engine.""" - models = (MetadefObject, MetadefProperty, MetadefNamespaceResourceType, - MetadefTag, - MetadefNamespace, MetadefResourceType) - for model in models: - model.metadata.drop_all(engine) diff --git a/glance/db/utils.py b/glance/db/utils.py deleted file mode 100644 index 30636602..00000000 --- a/glance/db/utils.py +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.common import exception - - -def mutate_image_dict_to_v1(image): - """ - Replaces a v2-style image dictionary's 'visibility' member with the - equivalent v1-style 'is_public' member. - """ - visibility = image.pop('visibility') - is_image_public = 'public' == visibility - image['is_public'] = is_image_public - return image - - -def ensure_image_dict_v2_compliant(image): - """ - Accepts an image dictionary that contains a v1-style 'is_public' member - and returns the equivalent v2-style image dictionary. - """ - if ('is_public' in image): - if ('visibility' in image): - msg = _("Specifying both 'visibility' and 'is_public' is not " - "permiitted.") - raise exception.Invalid(msg) - else: - image['visibility'] = ('public' if image.pop('is_public') else - 'shared') - return image - - -def is_image_visible(context, image, image_member_find, status=None): - """Return True if the image is visible in this context.""" - # Is admin == image visible - if context.is_admin: - return True - - # No owner == image visible - if image['owner'] is None: - return True - - # Public or Community visibility == image visible - if image['visibility'] in ['public', 'community']: - return True - - # Perform tests based on whether we have an owner - if context.owner is not None: - if context.owner == image['owner']: - return True - - # Figure out if this image is shared with that tenant - - if 'shared' == image['visibility']: - members = image_member_find(context, - image_id=image['id'], - member=context.owner, - status=status) - if members: - return True - - # Private image - return False diff --git a/glance/domain/__init__.py b/glance/domain/__init__.py deleted file mode 100644 index c6d5ded2..00000000 --- a/glance/domain/__init__.py +++ /dev/null @@ -1,681 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import datetime -import uuid - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import importutils -import six - -from glance.common import exception -from glance.common import timeutils -from glance.i18n import _, _LE, _LI, _LW - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -CONF.import_opt('task_executor', 'glance.common.config', group='task') - - -_delayed_delete_imported = False - - -def _import_delayed_delete(): - # glance_store (indirectly) imports glance.domain therefore we can't put - # the CONF.import_opt outside - we have to do it in a convoluted/indirect - # way! - global _delayed_delete_imported - if not _delayed_delete_imported: - CONF.import_opt('delayed_delete', 'glance_store') - _delayed_delete_imported = True - - -class ImageFactory(object): - _readonly_properties = ['created_at', 'updated_at', 'status', 'checksum', - 'size', 'virtual_size'] - _reserved_properties = ['owner', 'locations', 'deleted', 'deleted_at', - 'direct_url', 'self', 'file', 'schema'] - - def _check_readonly(self, kwargs): - for key in self._readonly_properties: - if key in kwargs: - raise exception.ReadonlyProperty(property=key) - - def _check_unexpected(self, kwargs): - if kwargs: - msg = _('new_image() got unexpected keywords %s') - raise TypeError(msg % kwargs.keys()) - - def _check_reserved(self, properties): - if properties is not None: - for key in self._reserved_properties: - if key in properties: - raise exception.ReservedProperty(property=key) - - def new_image(self, image_id=None, name=None, visibility='shared', - min_disk=0, min_ram=0, protected=False, owner=None, - disk_format=None, container_format=None, - extra_properties=None, tags=None, **other_args): - extra_properties = extra_properties or {} - self._check_readonly(other_args) - self._check_unexpected(other_args) - self._check_reserved(extra_properties) - - if image_id is None: - image_id = str(uuid.uuid4()) - created_at = timeutils.utcnow() - updated_at = created_at - status = 'queued' - - return Image(image_id=image_id, name=name, status=status, - created_at=created_at, updated_at=updated_at, - visibility=visibility, min_disk=min_disk, - min_ram=min_ram, protected=protected, - owner=owner, disk_format=disk_format, - container_format=container_format, - extra_properties=extra_properties, tags=tags or []) - - -class Image(object): - - valid_state_targets = { - # Each key denotes a "current" state for the image. Corresponding - # values list the valid states to which we can jump from that "current" - # state. - # NOTE(flwang): In v2, we are deprecating the 'killed' status, so it's - # allowed to restore image from 'saving' to 'queued' so that upload - # can be retried. - 'queued': ('saving', 'importing', 'active', 'deleted'), - 'saving': ('active', 'killed', 'deleted', 'queued'), - 'importing': ('active', 'deleted', 'queued'), - 'active': ('pending_delete', 'deleted', 'deactivated'), - 'killed': ('deleted',), - 'pending_delete': ('deleted',), - 'deleted': (), - 'deactivated': ('active', 'deleted'), - } - - def __init__(self, image_id, status, created_at, updated_at, **kwargs): - self.image_id = image_id - self.status = status - self.created_at = created_at - self.updated_at = updated_at - self.name = kwargs.pop('name', None) - self.visibility = kwargs.pop('visibility', 'shared') - self.min_disk = kwargs.pop('min_disk', 0) - self.min_ram = kwargs.pop('min_ram', 0) - self.protected = kwargs.pop('protected', False) - self.locations = kwargs.pop('locations', []) - self.checksum = kwargs.pop('checksum', None) - self.owner = kwargs.pop('owner', None) - self._disk_format = kwargs.pop('disk_format', None) - self._container_format = kwargs.pop('container_format', None) - self.size = kwargs.pop('size', None) - self.virtual_size = kwargs.pop('virtual_size', None) - extra_properties = kwargs.pop('extra_properties', {}) - self.extra_properties = ExtraProperties(extra_properties) - self.tags = kwargs.pop('tags', []) - if kwargs: - message = _("__init__() got unexpected keyword argument '%s'") - raise TypeError(message % list(kwargs.keys())[0]) - - @property - def status(self): - return self._status - - @status.setter - def status(self, status): - has_status = hasattr(self, '_status') - if has_status: - if status not in self.valid_state_targets[self._status]: - kw = {'cur_status': self._status, 'new_status': status} - e = exception.InvalidImageStatusTransition(**kw) - LOG.debug(e) - raise e - - if self._status == 'queued' and status in ('saving', - 'active', - 'importing'): - missing = [k for k in ['disk_format', 'container_format'] - if not getattr(self, k)] - if len(missing) > 0: - if len(missing) == 1: - msg = _('Property %s must be set prior to ' - 'saving data.') - else: - msg = _('Properties %s must be set prior to ' - 'saving data.') - raise ValueError(msg % ', '.join(missing)) - # NOTE(flwang): Image size should be cleared as long as the image - # status is updated to 'queued' - if status == 'queued': - self.size = None - self.virtual_size = None - self._status = status - - @property - def visibility(self): - return self._visibility - - @visibility.setter - def visibility(self, visibility): - if visibility not in ('community', 'public', 'private', 'shared'): - raise ValueError(_('Visibility must be one of "community", ' - '"public", "private", or "shared"')) - self._visibility = visibility - - @property - def tags(self): - return self._tags - - @tags.setter - def tags(self, value): - self._tags = set(value) - - @property - def container_format(self): - return self._container_format - - @container_format.setter - def container_format(self, value): - if hasattr(self, '_container_format') and self.status != 'queued': - msg = _("Attribute container_format can be only replaced " - "for a queued image.") - raise exception.Forbidden(message=msg) - self._container_format = value - - @property - def disk_format(self): - return self._disk_format - - @disk_format.setter - def disk_format(self, value): - if hasattr(self, '_disk_format') and self.status != 'queued': - msg = _("Attribute disk_format can be only replaced " - "for a queued image.") - raise exception.Forbidden(message=msg) - self._disk_format = value - - @property - def min_disk(self): - return self._min_disk - - @min_disk.setter - def min_disk(self, value): - if value and value < 0: - extra_msg = _('Cannot be a negative value') - raise exception.InvalidParameterValue(value=value, - param='min_disk', - extra_msg=extra_msg) - self._min_disk = value - - @property - def min_ram(self): - return self._min_ram - - @min_ram.setter - def min_ram(self, value): - if value and value < 0: - extra_msg = _('Cannot be a negative value') - raise exception.InvalidParameterValue(value=value, - param='min_ram', - extra_msg=extra_msg) - self._min_ram = value - - def delete(self): - if self.protected: - raise exception.ProtectedImageDelete(image_id=self.image_id) - if CONF.delayed_delete and self.locations: - self.status = 'pending_delete' - else: - self.status = 'deleted' - - def deactivate(self): - if self.status == 'active': - self.status = 'deactivated' - elif self.status == 'deactivated': - # Noop if already deactive - pass - else: - LOG.debug("Not allowed to deactivate image in status '%s'", - self.status) - msg = (_("Not allowed to deactivate image in status '%s'") - % self.status) - raise exception.Forbidden(message=msg) - - def reactivate(self): - if self.status == 'deactivated': - self.status = 'active' - elif self.status == 'active': - # Noop if already active - pass - else: - LOG.debug("Not allowed to reactivate image in status '%s'", - self.status) - msg = (_("Not allowed to reactivate image in status '%s'") - % self.status) - raise exception.Forbidden(message=msg) - - def get_data(self, *args, **kwargs): - raise NotImplementedError() - - def set_data(self, data, size=None): - raise NotImplementedError() - - -class ExtraProperties(collections.MutableMapping, dict): - - def __getitem__(self, key): - return dict.__getitem__(self, key) - - def __setitem__(self, key, value): - return dict.__setitem__(self, key, value) - - def __delitem__(self, key): - return dict.__delitem__(self, key) - - def __eq__(self, other): - if isinstance(other, ExtraProperties): - return dict(self).__eq__(dict(other)) - elif isinstance(other, dict): - return dict(self).__eq__(other) - else: - return False - - def __ne__(self, other): - return not self.__eq__(other) - - def __len__(self): - return dict(self).__len__() - - def keys(self): - return dict(self).keys() - - -class ImageMembership(object): - - def __init__(self, image_id, member_id, created_at, updated_at, - id=None, status=None): - self.id = id - self.image_id = image_id - self.member_id = member_id - self.created_at = created_at - self.updated_at = updated_at - self.status = status - - @property - def status(self): - return self._status - - @status.setter - def status(self, status): - if status not in ('pending', 'accepted', 'rejected'): - msg = _('Status must be "pending", "accepted" or "rejected".') - raise ValueError(msg) - self._status = status - - -class ImageMemberFactory(object): - - def new_image_member(self, image, member_id): - created_at = timeutils.utcnow() - updated_at = created_at - - return ImageMembership(image_id=image.image_id, member_id=member_id, - created_at=created_at, updated_at=updated_at, - status='pending') - - -class Task(object): - _supported_task_type = ('import', 'api_image_import') - - _supported_task_status = ('pending', 'processing', 'success', 'failure') - - def __init__(self, task_id, task_type, status, owner, - expires_at, created_at, updated_at, - task_input, result, message): - - if task_type not in self._supported_task_type: - raise exception.InvalidTaskType(task_type) - - if status not in self._supported_task_status: - raise exception.InvalidTaskStatus(status) - - self.task_id = task_id - self._status = status - self.type = task_type - self.owner = owner - self.expires_at = expires_at - # NOTE(nikhil): We use '_time_to_live' to determine how long a - # task should live from the time it succeeds or fails. - task_time_to_live = CONF.task.task_time_to_live - self._time_to_live = datetime.timedelta(hours=task_time_to_live) - self.created_at = created_at - self.updated_at = updated_at - self.task_input = task_input - self.result = result - self.message = message - - @property - def status(self): - return self._status - - @property - def message(self): - return self._message - - @message.setter - def message(self, message): - if message: - self._message = six.text_type(message) - else: - self._message = six.text_type('') - - def _validate_task_status_transition(self, cur_status, new_status): - valid_transitions = { - 'pending': ['processing', 'failure'], - 'processing': ['success', 'failure'], - 'success': [], - 'failure': [], - } - - if new_status in valid_transitions[cur_status]: - return True - else: - return False - - def _set_task_status(self, new_status): - if self._validate_task_status_transition(self.status, new_status): - self._status = new_status - LOG.info(_LI("Task [%(task_id)s] status changing from " - "%(cur_status)s to %(new_status)s"), - {'task_id': self.task_id, 'cur_status': self.status, - 'new_status': new_status}) - self._status = new_status - else: - LOG.error(_LE("Task [%(task_id)s] status failed to change from " - "%(cur_status)s to %(new_status)s"), - {'task_id': self.task_id, 'cur_status': self.status, - 'new_status': new_status}) - raise exception.InvalidTaskStatusTransition( - cur_status=self.status, - new_status=new_status - ) - - def begin_processing(self): - new_status = 'processing' - self._set_task_status(new_status) - - def succeed(self, result): - new_status = 'success' - self.result = result - self._set_task_status(new_status) - self.expires_at = timeutils.utcnow() + self._time_to_live - - def fail(self, message): - new_status = 'failure' - self.message = message - self._set_task_status(new_status) - self.expires_at = timeutils.utcnow() + self._time_to_live - - def run(self, executor): - executor.begin_processing(self.task_id) - - -class TaskStub(object): - - def __init__(self, task_id, task_type, status, owner, - expires_at, created_at, updated_at): - self.task_id = task_id - self._status = status - self.type = task_type - self.owner = owner - self.expires_at = expires_at - self.created_at = created_at - self.updated_at = updated_at - - @property - def status(self): - return self._status - - -class TaskFactory(object): - - def new_task(self, task_type, owner, - task_input=None, **kwargs): - task_id = str(uuid.uuid4()) - status = 'pending' - # Note(nikhil): expires_at would be set on the task, only when it - # succeeds or fails. - expires_at = None - created_at = timeutils.utcnow() - updated_at = created_at - return Task( - task_id, - task_type, - status, - owner, - expires_at, - created_at, - updated_at, - task_input, - kwargs.get('result'), - kwargs.get('message') - ) - - -class TaskExecutorFactory(object): - eventlet_deprecation_warned = False - - def __init__(self, task_repo, image_repo, image_factory): - self.task_repo = task_repo - self.image_repo = image_repo - self.image_factory = image_factory - - def new_task_executor(self, context): - try: - # NOTE(flaper87): Backwards compatibility layer. - # It'll allow us to provide a deprecation path to - # users that are currently consuming the `eventlet` - # executor. - task_executor = CONF.task.task_executor - if task_executor == 'eventlet': - # NOTE(jokke): Making sure we do not log the deprecation - # warning 1000 times or anything crazy like that. - if not TaskExecutorFactory.eventlet_deprecation_warned: - msg = _LW("The `eventlet` executor has been deprecated. " - "Use `taskflow` instead.") - LOG.warn(msg) - TaskExecutorFactory.eventlet_deprecation_warned = True - task_executor = 'taskflow' - - executor_cls = ('glance.async.%s_executor.' - 'TaskExecutor' % task_executor) - LOG.debug("Loading %s executor", task_executor) - executor = importutils.import_class(executor_cls) - return executor(context, - self.task_repo, - self.image_repo, - self.image_factory) - except ImportError: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to load the %s executor provided " - "in the config.") % CONF.task.task_executor) - - -class MetadefNamespace(object): - - def __init__(self, namespace_id, namespace, display_name, description, - owner, visibility, protected, created_at, updated_at): - self.namespace_id = namespace_id - self.namespace = namespace - self.display_name = display_name - self.description = description - self.owner = owner - self.visibility = visibility or "private" - self.protected = protected or False - self.created_at = created_at - self.updated_at = updated_at - - def delete(self): - if self.protected: - raise exception.ProtectedMetadefNamespaceDelete( - namespace=self.namespace) - - -class MetadefNamespaceFactory(object): - - def new_namespace(self, namespace, owner, **kwargs): - namespace_id = str(uuid.uuid4()) - created_at = timeutils.utcnow() - updated_at = created_at - return MetadefNamespace( - namespace_id, - namespace, - kwargs.get('display_name'), - kwargs.get('description'), - owner, - kwargs.get('visibility'), - kwargs.get('protected'), - created_at, - updated_at - ) - - -class MetadefObject(object): - - def __init__(self, namespace, object_id, name, created_at, updated_at, - required, description, properties): - self.namespace = namespace - self.object_id = object_id - self.name = name - self.created_at = created_at - self.updated_at = updated_at - self.required = required - self.description = description - self.properties = properties - - def delete(self): - if self.namespace.protected: - raise exception.ProtectedMetadefObjectDelete(object_name=self.name) - - -class MetadefObjectFactory(object): - - def new_object(self, namespace, name, **kwargs): - object_id = str(uuid.uuid4()) - created_at = timeutils.utcnow() - updated_at = created_at - return MetadefObject( - namespace, - object_id, - name, - created_at, - updated_at, - kwargs.get('required'), - kwargs.get('description'), - kwargs.get('properties') - ) - - -class MetadefResourceType(object): - - def __init__(self, namespace, name, prefix, properties_target, - created_at, updated_at): - self.namespace = namespace - self.name = name - self.prefix = prefix - self.properties_target = properties_target - self.created_at = created_at - self.updated_at = updated_at - - def delete(self): - if self.namespace.protected: - raise exception.ProtectedMetadefResourceTypeAssociationDelete( - resource_type=self.name) - - -class MetadefResourceTypeFactory(object): - - def new_resource_type(self, namespace, name, **kwargs): - created_at = timeutils.utcnow() - updated_at = created_at - return MetadefResourceType( - namespace, - name, - kwargs.get('prefix'), - kwargs.get('properties_target'), - created_at, - updated_at - ) - - -class MetadefProperty(object): - - def __init__(self, namespace, property_id, name, schema): - self.namespace = namespace - self.property_id = property_id - self.name = name - self.schema = schema - - def delete(self): - if self.namespace.protected: - raise exception.ProtectedMetadefNamespacePropDelete( - property_name=self.name) - - -class MetadefPropertyFactory(object): - - def new_namespace_property(self, namespace, name, schema, **kwargs): - property_id = str(uuid.uuid4()) - return MetadefProperty( - namespace, - property_id, - name, - schema - ) - - -class MetadefTag(object): - - def __init__(self, namespace, tag_id, name, created_at, updated_at): - self.namespace = namespace - self.tag_id = tag_id - self.name = name - self.created_at = created_at - self.updated_at = updated_at - - def delete(self): - if self.namespace.protected: - raise exception.ProtectedMetadefTagDelete(tag_name=self.name) - - -class MetadefTagFactory(object): - - def new_tag(self, namespace, name, **kwargs): - tag_id = str(uuid.uuid4()) - created_at = timeutils.utcnow() - updated_at = created_at - return MetadefTag( - namespace, - tag_id, - name, - created_at, - updated_at - ) diff --git a/glance/domain/proxy.py b/glance/domain/proxy.py deleted file mode 100644 index 9cc7bfef..00000000 --- a/glance/domain/proxy.py +++ /dev/null @@ -1,580 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def _proxy(target, attr): - def get_attr(self): - return getattr(getattr(self, target), attr) - - def set_attr(self, value): - return setattr(getattr(self, target), attr, value) - - def del_attr(self): - return delattr(getattr(self, target), attr) - - return property(get_attr, set_attr, del_attr) - - -class Helper(object): - def __init__(self, proxy_class=None, proxy_kwargs=None): - self.proxy_class = proxy_class - self.proxy_kwargs = proxy_kwargs or {} - - def proxy(self, obj): - if obj is None or self.proxy_class is None: - return obj - return self.proxy_class(obj, **self.proxy_kwargs) - - def unproxy(self, obj): - if obj is None or self.proxy_class is None: - return obj - return obj.base - - -class TaskRepo(object): - def __init__(self, base, - task_proxy_class=None, task_proxy_kwargs=None): - self.base = base - self.task_proxy_helper = Helper(task_proxy_class, task_proxy_kwargs) - - def get(self, task_id): - task = self.base.get(task_id) - return self.task_proxy_helper.proxy(task) - - def add(self, task): - self.base.add(self.task_proxy_helper.unproxy(task)) - - def save(self, task): - self.base.save(self.task_proxy_helper.unproxy(task)) - - def remove(self, task): - base_task = self.task_proxy_helper.unproxy(task) - self.base.remove(base_task) - - -class TaskStubRepo(object): - def __init__(self, base, task_stub_proxy_class=None, - task_stub_proxy_kwargs=None): - self.base = base - self.task_stub_proxy_helper = Helper(task_stub_proxy_class, - task_stub_proxy_kwargs) - - def list(self, *args, **kwargs): - tasks = self.base.list(*args, **kwargs) - return [self.task_stub_proxy_helper.proxy(task) for task in tasks] - - -class Repo(object): - def __init__(self, base, item_proxy_class=None, item_proxy_kwargs=None): - self.base = base - self.helper = Helper(item_proxy_class, item_proxy_kwargs) - - def get(self, item_id): - return self.helper.proxy(self.base.get(item_id)) - - def list(self, *args, **kwargs): - items = self.base.list(*args, **kwargs) - return [self.helper.proxy(item) for item in items] - - def add(self, item): - base_item = self.helper.unproxy(item) - result = self.base.add(base_item) - return self.helper.proxy(result) - - def save(self, item, from_state=None): - base_item = self.helper.unproxy(item) - result = self.base.save(base_item, from_state=from_state) - return self.helper.proxy(result) - - def remove(self, item): - base_item = self.helper.unproxy(item) - result = self.base.remove(base_item) - return self.helper.proxy(result) - - -class MemberRepo(object): - def __init__(self, image, base, - member_proxy_class=None, member_proxy_kwargs=None): - self.image = image - self.base = base - self.member_proxy_helper = Helper(member_proxy_class, - member_proxy_kwargs) - - def get(self, member_id): - member = self.base.get(member_id) - return self.member_proxy_helper.proxy(member) - - def add(self, member): - self.base.add(self.member_proxy_helper.unproxy(member)) - - def list(self, *args, **kwargs): - members = self.base.list(*args, **kwargs) - return [self.member_proxy_helper.proxy(member) for member - in members] - - def remove(self, member): - base_item = self.member_proxy_helper.unproxy(member) - result = self.base.remove(base_item) - return self.member_proxy_helper.proxy(result) - - def save(self, member, from_state=None): - base_item = self.member_proxy_helper.unproxy(member) - result = self.base.save(base_item, from_state=from_state) - return self.member_proxy_helper.proxy(result) - - -class ImageFactory(object): - def __init__(self, base, proxy_class=None, proxy_kwargs=None): - self.helper = Helper(proxy_class, proxy_kwargs) - self.base = base - - def new_image(self, **kwargs): - return self.helper.proxy(self.base.new_image(**kwargs)) - - -class ImageMembershipFactory(object): - def __init__(self, base, proxy_class=None, proxy_kwargs=None): - self.helper = Helper(proxy_class, proxy_kwargs) - self.base = base - - def new_image_member(self, image, member, **kwargs): - return self.helper.proxy(self.base.new_image_member(image, - member, - **kwargs)) - - -class Image(object): - def __init__(self, base, member_repo_proxy_class=None, - member_repo_proxy_kwargs=None): - self.base = base - self.helper = Helper(member_repo_proxy_class, - member_repo_proxy_kwargs) - - name = _proxy('base', 'name') - image_id = _proxy('base', 'image_id') - status = _proxy('base', 'status') - created_at = _proxy('base', 'created_at') - updated_at = _proxy('base', 'updated_at') - visibility = _proxy('base', 'visibility') - min_disk = _proxy('base', 'min_disk') - min_ram = _proxy('base', 'min_ram') - protected = _proxy('base', 'protected') - locations = _proxy('base', 'locations') - checksum = _proxy('base', 'checksum') - owner = _proxy('base', 'owner') - disk_format = _proxy('base', 'disk_format') - container_format = _proxy('base', 'container_format') - size = _proxy('base', 'size') - virtual_size = _proxy('base', 'virtual_size') - extra_properties = _proxy('base', 'extra_properties') - tags = _proxy('base', 'tags') - - def delete(self): - self.base.delete() - - def deactivate(self): - self.base.deactivate() - - def reactivate(self): - self.base.reactivate() - - def set_data(self, data, size=None): - self.base.set_data(data, size) - - def get_data(self, *args, **kwargs): - return self.base.get_data(*args, **kwargs) - - -class ImageMember(object): - def __init__(self, base): - self.base = base - - id = _proxy('base', 'id') - image_id = _proxy('base', 'image_id') - member_id = _proxy('base', 'member_id') - status = _proxy('base', 'status') - created_at = _proxy('base', 'created_at') - updated_at = _proxy('base', 'updated_at') - - -class Task(object): - def __init__(self, base): - self.base = base - - task_id = _proxy('base', 'task_id') - type = _proxy('base', 'type') - status = _proxy('base', 'status') - owner = _proxy('base', 'owner') - expires_at = _proxy('base', 'expires_at') - created_at = _proxy('base', 'created_at') - updated_at = _proxy('base', 'updated_at') - task_input = _proxy('base', 'task_input') - result = _proxy('base', 'result') - message = _proxy('base', 'message') - - def begin_processing(self): - self.base.begin_processing() - - def succeed(self, result): - self.base.succeed(result) - - def fail(self, message): - self.base.fail(message) - - def run(self, executor): - self.base.run(executor) - - -class TaskStub(object): - def __init__(self, base): - self.base = base - - task_id = _proxy('base', 'task_id') - type = _proxy('base', 'type') - status = _proxy('base', 'status') - owner = _proxy('base', 'owner') - expires_at = _proxy('base', 'expires_at') - created_at = _proxy('base', 'created_at') - updated_at = _proxy('base', 'updated_at') - - -class TaskFactory(object): - def __init__(self, - base, - task_proxy_class=None, - task_proxy_kwargs=None): - self.task_helper = Helper(task_proxy_class, task_proxy_kwargs) - self.base = base - - def new_task(self, **kwargs): - t = self.base.new_task(**kwargs) - return self.task_helper.proxy(t) - - -# Metadef Namespace classes -class MetadefNamespaceRepo(object): - def __init__(self, base, - namespace_proxy_class=None, namespace_proxy_kwargs=None): - self.base = base - self.namespace_proxy_helper = Helper(namespace_proxy_class, - namespace_proxy_kwargs) - - def get(self, namespace): - namespace_obj = self.base.get(namespace) - return self.namespace_proxy_helper.proxy(namespace_obj) - - def add(self, namespace): - self.base.add(self.namespace_proxy_helper.unproxy(namespace)) - - def list(self, *args, **kwargs): - namespaces = self.base.list(*args, **kwargs) - return [self.namespace_proxy_helper.proxy(namespace) for namespace - in namespaces] - - def remove(self, item): - base_item = self.namespace_proxy_helper.unproxy(item) - result = self.base.remove(base_item) - return self.namespace_proxy_helper.proxy(result) - - def remove_objects(self, item): - base_item = self.namespace_proxy_helper.unproxy(item) - result = self.base.remove_objects(base_item) - return self.namespace_proxy_helper.proxy(result) - - def remove_properties(self, item): - base_item = self.namespace_proxy_helper.unproxy(item) - result = self.base.remove_properties(base_item) - return self.namespace_proxy_helper.proxy(result) - - def remove_tags(self, item): - base_item = self.namespace_proxy_helper.unproxy(item) - result = self.base.remove_tags(base_item) - return self.namespace_proxy_helper.proxy(result) - - def save(self, item): - base_item = self.namespace_proxy_helper.unproxy(item) - result = self.base.save(base_item) - return self.namespace_proxy_helper.proxy(result) - - -class MetadefNamespace(object): - def __init__(self, base): - self.base = base - - namespace_id = _proxy('base', 'namespace_id') - namespace = _proxy('base', 'namespace') - display_name = _proxy('base', 'display_name') - description = _proxy('base', 'description') - owner = _proxy('base', 'owner') - visibility = _proxy('base', 'visibility') - protected = _proxy('base', 'protected') - created_at = _proxy('base', 'created_at') - updated_at = _proxy('base', 'updated_at') - - def delete(self): - self.base.delete() - - -class MetadefNamespaceFactory(object): - def __init__(self, - base, - meta_namespace_proxy_class=None, - meta_namespace_proxy_kwargs=None): - self.meta_namespace_helper = Helper(meta_namespace_proxy_class, - meta_namespace_proxy_kwargs) - self.base = base - - def new_namespace(self, **kwargs): - t = self.base.new_namespace(**kwargs) - return self.meta_namespace_helper.proxy(t) - - -# Metadef object classes -class MetadefObjectRepo(object): - def __init__(self, base, - object_proxy_class=None, object_proxy_kwargs=None): - self.base = base - self.object_proxy_helper = Helper(object_proxy_class, - object_proxy_kwargs) - - def get(self, namespace, object_name): - meta_object = self.base.get(namespace, object_name) - return self.object_proxy_helper.proxy(meta_object) - - def add(self, meta_object): - self.base.add(self.object_proxy_helper.unproxy(meta_object)) - - def list(self, *args, **kwargs): - objects = self.base.list(*args, **kwargs) - return [self.object_proxy_helper.proxy(meta_object) for meta_object - in objects] - - def remove(self, item): - base_item = self.object_proxy_helper.unproxy(item) - result = self.base.remove(base_item) - return self.object_proxy_helper.proxy(result) - - def save(self, item): - base_item = self.object_proxy_helper.unproxy(item) - result = self.base.save(base_item) - return self.object_proxy_helper.proxy(result) - - -class MetadefObject(object): - def __init__(self, base): - self.base = base - namespace = _proxy('base', 'namespace') - object_id = _proxy('base', 'object_id') - name = _proxy('base', 'name') - required = _proxy('base', 'required') - description = _proxy('base', 'description') - properties = _proxy('base', 'properties') - created_at = _proxy('base', 'created_at') - updated_at = _proxy('base', 'updated_at') - - def delete(self): - self.base.delete() - - -class MetadefObjectFactory(object): - def __init__(self, - base, - meta_object_proxy_class=None, - meta_object_proxy_kwargs=None): - self.meta_object_helper = Helper(meta_object_proxy_class, - meta_object_proxy_kwargs) - self.base = base - - def new_object(self, **kwargs): - t = self.base.new_object(**kwargs) - return self.meta_object_helper.proxy(t) - - -# Metadef ResourceType classes -class MetadefResourceTypeRepo(object): - def __init__(self, base, resource_type_proxy_class=None, - resource_type_proxy_kwargs=None): - self.base = base - self.resource_type_proxy_helper = Helper(resource_type_proxy_class, - resource_type_proxy_kwargs) - - def add(self, meta_resource_type): - self.base.add(self.resource_type_proxy_helper.unproxy( - meta_resource_type)) - - def get(self, *args, **kwargs): - resource_type = self.base.get(*args, **kwargs) - return self.resource_type_proxy_helper.proxy(resource_type) - - def list(self, *args, **kwargs): - resource_types = self.base.list(*args, **kwargs) - return [self.resource_type_proxy_helper.proxy(resource_type) - for resource_type in resource_types] - - def remove(self, item): - base_item = self.resource_type_proxy_helper.unproxy(item) - result = self.base.remove(base_item) - return self.resource_type_proxy_helper.proxy(result) - - -class MetadefResourceType(object): - def __init__(self, base): - self.base = base - namespace = _proxy('base', 'namespace') - name = _proxy('base', 'name') - prefix = _proxy('base', 'prefix') - properties_target = _proxy('base', 'properties_target') - created_at = _proxy('base', 'created_at') - updated_at = _proxy('base', 'updated_at') - - def delete(self): - self.base.delete() - - -class MetadefResourceTypeFactory(object): - def __init__(self, - base, - resource_type_proxy_class=None, - resource_type_proxy_kwargs=None): - self.resource_type_helper = Helper(resource_type_proxy_class, - resource_type_proxy_kwargs) - self.base = base - - def new_resource_type(self, **kwargs): - t = self.base.new_resource_type(**kwargs) - return self.resource_type_helper.proxy(t) - - -# Metadef namespace property classes -class MetadefPropertyRepo(object): - def __init__(self, base, - property_proxy_class=None, property_proxy_kwargs=None): - self.base = base - self.property_proxy_helper = Helper(property_proxy_class, - property_proxy_kwargs) - - def get(self, namespace, property_name): - property = self.base.get(namespace, property_name) - return self.property_proxy_helper.proxy(property) - - def add(self, property): - self.base.add(self.property_proxy_helper.unproxy(property)) - - def list(self, *args, **kwargs): - properties = self.base.list(*args, **kwargs) - return [self.property_proxy_helper.proxy(property) for property - in properties] - - def remove(self, item): - base_item = self.property_proxy_helper.unproxy(item) - result = self.base.remove(base_item) - return self.property_proxy_helper.proxy(result) - - def save(self, item): - base_item = self.property_proxy_helper.unproxy(item) - result = self.base.save(base_item) - return self.property_proxy_helper.proxy(result) - - -class MetadefProperty(object): - def __init__(self, base): - self.base = base - namespace = _proxy('base', 'namespace') - property_id = _proxy('base', 'property_id') - name = _proxy('base', 'name') - schema = _proxy('base', 'schema') - - def delete(self): - self.base.delete() - - -class MetadefPropertyFactory(object): - def __init__(self, - base, - property_proxy_class=None, - property_proxy_kwargs=None): - self.meta_object_helper = Helper(property_proxy_class, - property_proxy_kwargs) - self.base = base - - def new_namespace_property(self, **kwargs): - t = self.base.new_namespace_property(**kwargs) - return self.meta_object_helper.proxy(t) - - -# Metadef tag classes -class MetadefTagRepo(object): - def __init__(self, base, - tag_proxy_class=None, tag_proxy_kwargs=None): - self.base = base - self.tag_proxy_helper = Helper(tag_proxy_class, - tag_proxy_kwargs) - - def get(self, namespace, name): - meta_tag = self.base.get(namespace, name) - return self.tag_proxy_helper.proxy(meta_tag) - - def add(self, meta_tag): - self.base.add(self.tag_proxy_helper.unproxy(meta_tag)) - - def add_tags(self, meta_tags): - tags_list = [] - for meta_tag in meta_tags: - tags_list.append(self.tag_proxy_helper.unproxy(meta_tag)) - self.base.add_tags(tags_list) - - def list(self, *args, **kwargs): - tags = self.base.list(*args, **kwargs) - return [self.tag_proxy_helper.proxy(meta_tag) for meta_tag - in tags] - - def remove(self, item): - base_item = self.tag_proxy_helper.unproxy(item) - result = self.base.remove(base_item) - return self.tag_proxy_helper.proxy(result) - - def save(self, item): - base_item = self.tag_proxy_helper.unproxy(item) - result = self.base.save(base_item) - return self.tag_proxy_helper.proxy(result) - - -class MetadefTag(object): - def __init__(self, base): - self.base = base - - namespace = _proxy('base', 'namespace') - tag_id = _proxy('base', 'tag_id') - name = _proxy('base', 'name') - created_at = _proxy('base', 'created_at') - updated_at = _proxy('base', 'updated_at') - - def delete(self): - self.base.delete() - - -class MetadefTagFactory(object): - def __init__(self, - base, - meta_tag_proxy_class=None, - meta_tag_proxy_kwargs=None): - self.meta_tag_helper = Helper(meta_tag_proxy_class, - meta_tag_proxy_kwargs) - self.base = base - - def new_tag(self, **kwargs): - t = self.base.new_tag(**kwargs) - return self.meta_tag_helper.proxy(t) diff --git a/glance/gateway.py b/glance/gateway.py deleted file mode 100644 index c15d4d74..00000000 --- a/glance/gateway.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import glance_store - -from glance.api import authorization -from glance.api import policy -from glance.api import property_protections -from glance.common import property_utils -from glance.common import store_utils -import glance.db -import glance.domain -import glance.location -import glance.notifier -import glance.quota - - -class Gateway(object): - def __init__(self, db_api=None, store_api=None, notifier=None, - policy_enforcer=None): - self.db_api = db_api or glance.db.get_api() - self.store_api = store_api or glance_store - self.store_utils = store_utils - self.notifier = notifier or glance.notifier.Notifier() - self.policy = policy_enforcer or policy.Enforcer() - - def get_image_factory(self, context): - image_factory = glance.domain.ImageFactory() - store_image_factory = glance.location.ImageFactoryProxy( - image_factory, context, self.store_api, self.store_utils) - quota_image_factory = glance.quota.ImageFactoryProxy( - store_image_factory, context, self.db_api, self.store_utils) - policy_image_factory = policy.ImageFactoryProxy( - quota_image_factory, context, self.policy) - notifier_image_factory = glance.notifier.ImageFactoryProxy( - policy_image_factory, context, self.notifier) - if property_utils.is_property_protection_enabled(): - property_rules = property_utils.PropertyRules(self.policy) - pif = property_protections.ProtectedImageFactoryProxy( - notifier_image_factory, context, property_rules) - authorized_image_factory = authorization.ImageFactoryProxy( - pif, context) - else: - authorized_image_factory = authorization.ImageFactoryProxy( - notifier_image_factory, context) - return authorized_image_factory - - def get_image_member_factory(self, context): - image_factory = glance.domain.ImageMemberFactory() - quota_image_factory = glance.quota.ImageMemberFactoryProxy( - image_factory, context, self.db_api, self.store_utils) - policy_member_factory = policy.ImageMemberFactoryProxy( - quota_image_factory, context, self.policy) - authorized_image_factory = authorization.ImageMemberFactoryProxy( - policy_member_factory, context) - return authorized_image_factory - - def get_repo(self, context): - image_repo = glance.db.ImageRepo(context, self.db_api) - store_image_repo = glance.location.ImageRepoProxy( - image_repo, context, self.store_api, self.store_utils) - quota_image_repo = glance.quota.ImageRepoProxy( - store_image_repo, context, self.db_api, self.store_utils) - policy_image_repo = policy.ImageRepoProxy( - quota_image_repo, context, self.policy) - notifier_image_repo = glance.notifier.ImageRepoProxy( - policy_image_repo, context, self.notifier) - if property_utils.is_property_protection_enabled(): - property_rules = property_utils.PropertyRules(self.policy) - pir = property_protections.ProtectedImageRepoProxy( - notifier_image_repo, context, property_rules) - authorized_image_repo = authorization.ImageRepoProxy( - pir, context) - else: - authorized_image_repo = authorization.ImageRepoProxy( - notifier_image_repo, context) - - return authorized_image_repo - - def get_member_repo(self, image, context): - image_member_repo = glance.db.ImageMemberRepo( - context, self.db_api, image) - store_image_repo = glance.location.ImageMemberRepoProxy( - image_member_repo, image, context, self.store_api) - policy_member_repo = policy.ImageMemberRepoProxy( - store_image_repo, image, context, self.policy) - notifier_member_repo = glance.notifier.ImageMemberRepoProxy( - policy_member_repo, image, context, self.notifier) - authorized_member_repo = authorization.ImageMemberRepoProxy( - notifier_member_repo, image, context) - - return authorized_member_repo - - def get_task_factory(self, context): - task_factory = glance.domain.TaskFactory() - policy_task_factory = policy.TaskFactoryProxy( - task_factory, context, self.policy) - notifier_task_factory = glance.notifier.TaskFactoryProxy( - policy_task_factory, context, self.notifier) - authorized_task_factory = authorization.TaskFactoryProxy( - notifier_task_factory, context) - return authorized_task_factory - - def get_task_repo(self, context): - task_repo = glance.db.TaskRepo(context, self.db_api) - policy_task_repo = policy.TaskRepoProxy( - task_repo, context, self.policy) - notifier_task_repo = glance.notifier.TaskRepoProxy( - policy_task_repo, context, self.notifier) - authorized_task_repo = authorization.TaskRepoProxy( - notifier_task_repo, context) - return authorized_task_repo - - def get_task_stub_repo(self, context): - task_stub_repo = glance.db.TaskRepo(context, self.db_api) - policy_task_stub_repo = policy.TaskStubRepoProxy( - task_stub_repo, context, self.policy) - notifier_task_stub_repo = glance.notifier.TaskStubRepoProxy( - policy_task_stub_repo, context, self.notifier) - authorized_task_stub_repo = authorization.TaskStubRepoProxy( - notifier_task_stub_repo, context) - return authorized_task_stub_repo - - def get_task_executor_factory(self, context): - task_repo = self.get_task_repo(context) - image_repo = self.get_repo(context) - image_factory = self.get_image_factory(context) - return glance.domain.TaskExecutorFactory(task_repo, - image_repo, - image_factory) - - def get_metadef_namespace_factory(self, context): - ns_factory = glance.domain.MetadefNamespaceFactory() - policy_ns_factory = policy.MetadefNamespaceFactoryProxy( - ns_factory, context, self.policy) - notifier_ns_factory = glance.notifier.MetadefNamespaceFactoryProxy( - policy_ns_factory, context, self.notifier) - authorized_ns_factory = authorization.MetadefNamespaceFactoryProxy( - notifier_ns_factory, context) - return authorized_ns_factory - - def get_metadef_namespace_repo(self, context): - ns_repo = glance.db.MetadefNamespaceRepo(context, self.db_api) - policy_ns_repo = policy.MetadefNamespaceRepoProxy( - ns_repo, context, self.policy) - notifier_ns_repo = glance.notifier.MetadefNamespaceRepoProxy( - policy_ns_repo, context, self.notifier) - authorized_ns_repo = authorization.MetadefNamespaceRepoProxy( - notifier_ns_repo, context) - return authorized_ns_repo - - def get_metadef_object_factory(self, context): - object_factory = glance.domain.MetadefObjectFactory() - policy_object_factory = policy.MetadefObjectFactoryProxy( - object_factory, context, self.policy) - notifier_object_factory = glance.notifier.MetadefObjectFactoryProxy( - policy_object_factory, context, self.notifier) - authorized_object_factory = authorization.MetadefObjectFactoryProxy( - notifier_object_factory, context) - return authorized_object_factory - - def get_metadef_object_repo(self, context): - object_repo = glance.db.MetadefObjectRepo(context, self.db_api) - policy_object_repo = policy.MetadefObjectRepoProxy( - object_repo, context, self.policy) - notifier_object_repo = glance.notifier.MetadefObjectRepoProxy( - policy_object_repo, context, self.notifier) - authorized_object_repo = authorization.MetadefObjectRepoProxy( - notifier_object_repo, context) - return authorized_object_repo - - def get_metadef_resource_type_factory(self, context): - resource_type_factory = glance.domain.MetadefResourceTypeFactory() - policy_resource_type_factory = policy.MetadefResourceTypeFactoryProxy( - resource_type_factory, context, self.policy) - notifier_resource_type_factory = ( - glance.notifier.MetadefResourceTypeFactoryProxy( - policy_resource_type_factory, context, self.notifier) - ) - authorized_resource_type_factory = ( - authorization.MetadefResourceTypeFactoryProxy( - notifier_resource_type_factory, context) - ) - return authorized_resource_type_factory - - def get_metadef_resource_type_repo(self, context): - resource_type_repo = glance.db.MetadefResourceTypeRepo( - context, self.db_api) - policy_object_repo = policy.MetadefResourceTypeRepoProxy( - resource_type_repo, context, self.policy) - notifier_object_repo = glance.notifier.MetadefResourceTypeRepoProxy( - policy_object_repo, context, self.notifier) - authorized_object_repo = authorization.MetadefResourceTypeRepoProxy( - notifier_object_repo, context) - return authorized_object_repo - - def get_metadef_property_factory(self, context): - prop_factory = glance.domain.MetadefPropertyFactory() - policy_prop_factory = policy.MetadefPropertyFactoryProxy( - prop_factory, context, self.policy) - notifier_prop_factory = glance.notifier.MetadefPropertyFactoryProxy( - policy_prop_factory, context, self.notifier) - authorized_prop_factory = authorization.MetadefPropertyFactoryProxy( - notifier_prop_factory, context) - return authorized_prop_factory - - def get_metadef_property_repo(self, context): - prop_repo = glance.db.MetadefPropertyRepo(context, self.db_api) - policy_prop_repo = policy.MetadefPropertyRepoProxy( - prop_repo, context, self.policy) - notifier_prop_repo = glance.notifier.MetadefPropertyRepoProxy( - policy_prop_repo, context, self.notifier) - authorized_prop_repo = authorization.MetadefPropertyRepoProxy( - notifier_prop_repo, context) - return authorized_prop_repo - - def get_metadef_tag_factory(self, context): - tag_factory = glance.domain.MetadefTagFactory() - policy_tag_factory = policy.MetadefTagFactoryProxy( - tag_factory, context, self.policy) - notifier_tag_factory = glance.notifier.MetadefTagFactoryProxy( - policy_tag_factory, context, self.notifier) - authorized_tag_factory = authorization.MetadefTagFactoryProxy( - notifier_tag_factory, context) - return authorized_tag_factory - - def get_metadef_tag_repo(self, context): - tag_repo = glance.db.MetadefTagRepo(context, self.db_api) - policy_tag_repo = policy.MetadefTagRepoProxy( - tag_repo, context, self.policy) - notifier_tag_repo = glance.notifier.MetadefTagRepoProxy( - policy_tag_repo, context, self.notifier) - authorized_tag_repo = authorization.MetadefTagRepoProxy( - notifier_tag_repo, context) - return authorized_tag_repo diff --git a/glance/hacking/__init__.py b/glance/hacking/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/hacking/checks.py b/glance/hacking/checks.py deleted file mode 100644 index 48fe0019..00000000 --- a/glance/hacking/checks.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -""" -Guidelines for writing new hacking checks - - - Use only for Glance-specific tests. OpenStack general tests - should be submitted to the common 'hacking' module. - - Pick numbers in the range G3xx. Find the current test with - the highest allocated number and then pick the next value. - If nova has an N3xx code for that test, use the same number. - - Keep the test method code in the source file ordered based - on the G3xx value. - - List the new rule in the top level HACKING.rst file - - Add test cases for each new rule to glance/tests/test_hacking.py - -""" - - -asse_trueinst_re = re.compile( - r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " - "(\w|\.|\'|\"|\[|\])+\)\)") -asse_equal_type_re = re.compile( - r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), " - "(\w|\.|\'|\"|\[|\])+\)") -asse_equal_end_with_none_re = re.compile( - r"(.)*assertEqual\((\w|\.|\'|\"|\[|\])+, None\)") -asse_equal_start_with_none_re = re.compile( - r"(.)*assertEqual\(None, (\w|\.|\'|\"|\[|\])+\)") -unicode_func_re = re.compile(r"(\s|\W|^)unicode\(") -dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") - - -def assert_true_instance(logical_line): - """Check for assertTrue(isinstance(a, b)) sentences - - G316 - """ - if asse_trueinst_re.match(logical_line): - yield (0, "G316: assertTrue(isinstance(a, b)) sentences not allowed") - - -def assert_equal_type(logical_line): - """Check for assertEqual(type(A), B) sentences - - G317 - """ - if asse_equal_type_re.match(logical_line): - yield (0, "G317: assertEqual(type(A), B) sentences not allowed") - - -def assert_equal_none(logical_line): - """Check for assertEqual(A, None) or assertEqual(None, A) sentences - - G318 - """ - res = (asse_equal_start_with_none_re.match(logical_line) or - asse_equal_end_with_none_re.match(logical_line)) - if res: - yield (0, "G318: assertEqual(A, None) or assertEqual(None, A) " - "sentences not allowed") - - -def no_translate_debug_logs(logical_line, filename): - dirs = [ - "glance/api", - "glance/cmd", - "glance/common", - "glance/db", - "glance/domain", - "glance/image_cache", - "glance/quota", - "glance/registry", - "glance/store", - "glance/tests", - ] - - if max([name in filename for name in dirs]): - if logical_line.startswith("LOG.debug(_("): - yield(0, "G319: Don't translate debug level logs") - - -def no_direct_use_of_unicode_function(logical_line): - """Check for use of unicode() builtin - - G320 - """ - if unicode_func_re.match(logical_line): - yield(0, "G320: Use six.text_type() instead of unicode()") - - -def check_no_contextlib_nested(logical_line): - msg = ("G327: contextlib.nested is deprecated since Python 2.7. See " - "https://docs.python.org/2/library/contextlib.html#contextlib." - "nested for more information.") - if ("with contextlib.nested(" in logical_line or - "with nested(" in logical_line): - yield(0, msg) - - -def dict_constructor_with_list_copy(logical_line): - msg = ("G328: Must use a dict comprehension instead of a dict constructor " - "with a sequence of key-value pairs.") - if dict_constructor_with_list_copy_re.match(logical_line): - yield (0, msg) - - -def check_python3_xrange(logical_line): - if re.search(r"\bxrange\s*\(", logical_line): - yield(0, "G329: Do not use xrange. Use range, or six.moves.range for " - "large loops.") - - -def check_python3_no_iteritems(logical_line): - msg = ("G330: Use six.iteritems() or dict.items() instead of " - "dict.iteritems().") - if re.search(r".*\.iteritems\(\)", logical_line): - yield(0, msg) - - -def check_python3_no_iterkeys(logical_line): - msg = ("G331: Use six.iterkeys() or dict.keys() instead of " - "dict.iterkeys().") - if re.search(r".*\.iterkeys\(\)", logical_line): - yield(0, msg) - - -def check_python3_no_itervalues(logical_line): - msg = ("G332: Use six.itervalues() or dict.values instead of " - "dict.itervalues().") - if re.search(r".*\.itervalues\(\)", logical_line): - yield(0, msg) - - -def factory(register): - register(assert_true_instance) - register(assert_equal_type) - register(assert_equal_none) - register(no_translate_debug_logs) - register(no_direct_use_of_unicode_function) - register(check_no_contextlib_nested) - register(dict_constructor_with_list_copy) - register(check_python3_xrange) - register(check_python3_no_iteritems) - register(check_python3_no_iterkeys) - register(check_python3_no_itervalues) diff --git a/glance/i18n.py b/glance/i18n.py deleted file mode 100644 index 0a8ac675..00000000 --- a/glance/i18n.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_i18n import * # noqa - -_translators = TranslatorFactory(domain='glance') - -# The primary translation function using the well-known name "_" -_ = _translators.primary - - -# i18n log translation functions are deprecated. While removing the invocations -# requires a lot of reviewing effort, we decide to make it as no-op functions. -def _LI(msg): - return msg - - -def _LW(msg): - return msg - - -def _LE(msg): - return msg - - -def _LC(msg): - return msg diff --git a/glance/image_cache/__init__.py b/glance/image_cache/__init__.py deleted file mode 100644 index 31c33a95..00000000 --- a/glance/image_cache/__init__.py +++ /dev/null @@ -1,442 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -LRU Cache for Image Data -""" - -import hashlib - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import units - -from glance.common import exception -from glance.common import utils -from glance.i18n import _, _LE, _LI, _LW - -LOG = logging.getLogger(__name__) - -image_cache_opts = [ - cfg.StrOpt('image_cache_driver', default='sqlite', - choices=('sqlite', 'xattr'), ignore_case=True, - help=_(""" -The driver to use for image cache management. - -This configuration option provides the flexibility to choose between the -different image-cache drivers available. An image-cache driver is responsible -for providing the essential functions of image-cache like write images to/read -images from cache, track age and usage of cached images, provide a list of -cached images, fetch size of the cache, queue images for caching and clean up -the cache, etc. - -The essential functions of a driver are defined in the base class -``glance.image_cache.drivers.base.Driver``. All image-cache drivers (existing -and prospective) must implement this interface. Currently available drivers -are ``sqlite`` and ``xattr``. These drivers primarily differ in the way they -store the information about cached images: - * The ``sqlite`` driver uses a sqlite database (which sits on every glance - node locally) to track the usage of cached images. - * The ``xattr`` driver uses the extended attributes of files to store this - information. It also requires a filesystem that sets ``atime`` on the files - when accessed. - -Possible values: - * sqlite - * xattr - -Related options: - * None - -""")), - - cfg.IntOpt('image_cache_max_size', default=10 * units.Gi, # 10 GB - min=0, - help=_(""" -The upper limit on cache size, in bytes, after which the cache-pruner cleans -up the image cache. - -NOTE: This is just a threshold for cache-pruner to act upon. It is NOT a -hard limit beyond which the image cache would never grow. In fact, depending -on how often the cache-pruner runs and how quickly the cache fills, the image -cache can far exceed the size specified here very easily. Hence, care must be -taken to appropriately schedule the cache-pruner and in setting this limit. - -Glance caches an image when it is downloaded. Consequently, the size of the -image cache grows over time as the number of downloads increases. To keep the -cache size from becoming unmanageable, it is recommended to run the -cache-pruner as a periodic task. When the cache pruner is kicked off, it -compares the current size of image cache and triggers a cleanup if the image -cache grew beyond the size specified here. After the cleanup, the size of -cache is less than or equal to size specified here. - -Possible values: - * Any non-negative integer - -Related options: - * None - -""")), - - cfg.IntOpt('image_cache_stall_time', default=86400, # 24 hours - min=0, - help=_(""" -The amount of time, in seconds, an incomplete image remains in the cache. - -Incomplete images are images for which download is in progress. Please see the -description of configuration option ``image_cache_dir`` for more detail. -Sometimes, due to various reasons, it is possible the download may hang and -the incompletely downloaded image remains in the ``incomplete`` directory. -This configuration option sets a time limit on how long the incomplete images -should remain in the ``incomplete`` directory before they are cleaned up. -Once an incomplete image spends more time than is specified here, it'll be -removed by cache-cleaner on its next run. - -It is recommended to run cache-cleaner as a periodic task on the Glance API -nodes to keep the incomplete images from occupying disk space. - -Possible values: - * Any non-negative integer - -Related options: - * None - -""")), - - cfg.StrOpt('image_cache_dir', - help=_(""" -Base directory for image cache. - -This is the location where image data is cached and served out of. All cached -images are stored directly under this directory. This directory also contains -three subdirectories, namely, ``incomplete``, ``invalid`` and ``queue``. - -The ``incomplete`` subdirectory is the staging area for downloading images. An -image is first downloaded to this directory. When the image download is -successful it is moved to the base directory. However, if the download fails, -the partially downloaded image file is moved to the ``invalid`` subdirectory. - -The ``queue``subdirectory is used for queuing images for download. This is -used primarily by the cache-prefetcher, which can be scheduled as a periodic -task like cache-pruner and cache-cleaner, to cache images ahead of their usage. -Upon receiving the request to cache an image, Glance touches a file in the -``queue`` directory with the image id as the file name. The cache-prefetcher, -when running, polls for the files in ``queue`` directory and starts -downloading them in the order they were created. When the download is -successful, the zero-sized file is deleted from the ``queue`` directory. -If the download fails, the zero-sized file remains and it'll be retried the -next time cache-prefetcher runs. - -Possible values: - * A valid path - -Related options: - * ``image_cache_sqlite_db`` - -""")), -] - -CONF = cfg.CONF -CONF.register_opts(image_cache_opts) - - -class ImageCache(object): - - """Provides an LRU cache for image data.""" - - def __init__(self): - self.init_driver() - - def init_driver(self): - """ - Create the driver for the cache - """ - driver_name = CONF.image_cache_driver - driver_module = (__name__ + '.drivers.' + driver_name + '.Driver') - try: - self.driver_class = importutils.import_class(driver_module) - LOG.info(_LI("Image cache loaded driver '%s'."), driver_name) - except ImportError as import_err: - LOG.warn(_LW("Image cache driver " - "'%(driver_name)s' failed to load. " - "Got error: '%(import_err)s."), - {'driver_name': driver_name, - 'import_err': import_err}) - - driver_module = __name__ + '.drivers.sqlite.Driver' - LOG.info(_LI("Defaulting to SQLite driver.")) - self.driver_class = importutils.import_class(driver_module) - self.configure_driver() - - def configure_driver(self): - """ - Configure the driver for the cache and, if it fails to configure, - fall back to using the SQLite driver which has no odd dependencies - """ - try: - self.driver = self.driver_class() - self.driver.configure() - except exception.BadDriverConfiguration as config_err: - driver_module = self.driver_class.__module__ - LOG.warn(_LW("Image cache driver " - "'%(driver_module)s' failed to configure. " - "Got error: '%(config_err)s"), - {'driver_module': driver_module, - 'config_err': config_err}) - LOG.info(_LI("Defaulting to SQLite driver.")) - default_module = __name__ + '.drivers.sqlite.Driver' - self.driver_class = importutils.import_class(default_module) - self.driver = self.driver_class() - self.driver.configure() - - def is_cached(self, image_id): - """ - Returns True if the image with the supplied ID has its image - file cached. - - :param image_id: Image ID - """ - return self.driver.is_cached(image_id) - - def is_queued(self, image_id): - """ - Returns True if the image identifier is in our cache queue. - - :param image_id: Image ID - """ - return self.driver.is_queued(image_id) - - def get_cache_size(self): - """ - Returns the total size in bytes of the image cache. - """ - return self.driver.get_cache_size() - - def get_hit_count(self, image_id): - """ - Return the number of hits that an image has - - :param image_id: Opaque image identifier - """ - return self.driver.get_hit_count(image_id) - - def get_cached_images(self): - """ - Returns a list of records about cached images. - """ - return self.driver.get_cached_images() - - def delete_all_cached_images(self): - """ - Removes all cached image files and any attributes about the images - and returns the number of cached image files that were deleted. - """ - return self.driver.delete_all_cached_images() - - def delete_cached_image(self, image_id): - """ - Removes a specific cached image file and any attributes about the image - - :param image_id: Image ID - """ - self.driver.delete_cached_image(image_id) - - def delete_all_queued_images(self): - """ - Removes all queued image files and any attributes about the images - and returns the number of queued image files that were deleted. - """ - return self.driver.delete_all_queued_images() - - def delete_queued_image(self, image_id): - """ - Removes a specific queued image file and any attributes about the image - - :param image_id: Image ID - """ - self.driver.delete_queued_image(image_id) - - def prune(self): - """ - Removes all cached image files above the cache's maximum - size. Returns a tuple containing the total number of cached - files removed and the total size of all pruned image files. - """ - max_size = CONF.image_cache_max_size - current_size = self.driver.get_cache_size() - if max_size > current_size: - LOG.debug("Image cache has free space, skipping prune...") - return (0, 0) - - overage = current_size - max_size - LOG.debug("Image cache currently %(overage)d bytes over max " - "size. Starting prune to max size of %(max_size)d ", - {'overage': overage, 'max_size': max_size}) - - total_bytes_pruned = 0 - total_files_pruned = 0 - entry = self.driver.get_least_recently_accessed() - while entry and current_size > max_size: - image_id, size = entry - LOG.debug("Pruning '%(image_id)s' to free %(size)d bytes", - {'image_id': image_id, 'size': size}) - self.driver.delete_cached_image(image_id) - total_bytes_pruned = total_bytes_pruned + size - total_files_pruned = total_files_pruned + 1 - current_size = current_size - size - entry = self.driver.get_least_recently_accessed() - - LOG.debug("Pruning finished pruning. " - "Pruned %(total_files_pruned)d and " - "%(total_bytes_pruned)d.", - {'total_files_pruned': total_files_pruned, - 'total_bytes_pruned': total_bytes_pruned}) - return total_files_pruned, total_bytes_pruned - - def clean(self, stall_time=None): - """ - Cleans up any invalid or incomplete cached images. The cache driver - decides what that means... - """ - self.driver.clean(stall_time) - - def queue_image(self, image_id): - """ - This adds a image to be cache to the queue. - - If the image already exists in the queue or has already been - cached, we return False, True otherwise - - :param image_id: Image ID - """ - return self.driver.queue_image(image_id) - - def get_caching_iter(self, image_id, image_checksum, image_iter): - """ - Returns an iterator that caches the contents of an image - while the image contents are read through the supplied - iterator. - - :param image_id: Image ID - :param image_checksum: checksum expected to be generated while - iterating over image data - :param image_iter: Iterator that will read image contents - """ - if not self.driver.is_cacheable(image_id): - return image_iter - - LOG.debug("Tee'ing image '%s' into cache", image_id) - - return self.cache_tee_iter(image_id, image_iter, image_checksum) - - def cache_tee_iter(self, image_id, image_iter, image_checksum): - try: - current_checksum = hashlib.md5() - - with self.driver.open_for_write(image_id) as cache_file: - for chunk in image_iter: - try: - cache_file.write(chunk) - finally: - current_checksum.update(chunk) - yield chunk - cache_file.flush() - - if (image_checksum and - image_checksum != current_checksum.hexdigest()): - msg = _("Checksum verification failed. Aborted " - "caching of image '%s'.") % image_id - raise exception.GlanceException(msg) - - except exception.GlanceException as e: - with excutils.save_and_reraise_exception(): - # image_iter has given us bad, (size_checked_iter has found a - # bad length), or corrupt data (checksum is wrong). - LOG.exception(encodeutils.exception_to_unicode(e)) - except Exception as e: - LOG.exception(_LE("Exception encountered while tee'ing " - "image '%(image_id)s' into cache: %(error)s. " - "Continuing with response.") % - {'image_id': image_id, - 'error': encodeutils.exception_to_unicode(e)}) - - # If no checksum provided continue responding even if - # caching failed. - for chunk in image_iter: - yield chunk - - def cache_image_iter(self, image_id, image_iter, image_checksum=None): - """ - Cache an image with supplied iterator. - - :param image_id: Image ID - :param image_file: Iterator retrieving image chunks - :param image_checksum: Checksum of image - - :returns: True if image file was cached, False otherwise - """ - if not self.driver.is_cacheable(image_id): - return False - - for chunk in self.get_caching_iter(image_id, image_checksum, - image_iter): - pass - return True - - def cache_image_file(self, image_id, image_file): - """ - Cache an image file. - - :param image_id: Image ID - :param image_file: Image file to cache - - :returns: True if image file was cached, False otherwise - """ - CHUNKSIZE = 64 * units.Mi - - return self.cache_image_iter(image_id, - utils.chunkiter(image_file, CHUNKSIZE)) - - def open_for_read(self, image_id): - """ - Open and yield file for reading the image file for an image - with supplied identifier. - - :note Upon successful reading of the image file, the image's - hit count will be incremented. - - :param image_id: Image ID - """ - return self.driver.open_for_read(image_id) - - def get_image_size(self, image_id): - """ - Return the size of the image file for an image with supplied - identifier. - - :param image_id: Image ID - """ - return self.driver.get_image_size(image_id) - - def get_queued_images(self): - """ - Returns a list of image IDs that are in the queue. The - list should be sorted by the time the image ID was inserted - into the queue. - """ - return self.driver.get_queued_images() diff --git a/glance/image_cache/base.py b/glance/image_cache/base.py deleted file mode 100644 index e26b49de..00000000 --- a/glance/image_cache/base.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.image_cache import ImageCache - - -class CacheApp(object): - - def __init__(self): - self.cache = ImageCache() diff --git a/glance/image_cache/cleaner.py b/glance/image_cache/cleaner.py deleted file mode 100644 index 33ca26d1..00000000 --- a/glance/image_cache/cleaner.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -Cleans up any invalid cache entries -""" - -from glance.image_cache import base - - -class Cleaner(base.CacheApp): - - def run(self): - self.cache.clean() diff --git a/glance/image_cache/client.py b/glance/image_cache/client.py deleted file mode 100644 index 22160152..00000000 --- a/glance/image_cache/client.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_serialization import jsonutils as json - -from glance.common import client as base_client -from glance.common import exception -from glance.i18n import _ - - -class CacheClient(base_client.BaseClient): - - DEFAULT_PORT = 9292 - DEFAULT_DOC_ROOT = '/v1' - - def delete_cached_image(self, image_id): - """ - Delete a specified image from the cache - """ - self.do_request("DELETE", "/cached_images/%s" % image_id) - return True - - def get_cached_images(self, **kwargs): - """ - Returns a list of images stored in the image cache. - """ - res = self.do_request("GET", "/cached_images") - data = json.loads(res.read())['cached_images'] - return data - - def get_queued_images(self, **kwargs): - """ - Returns a list of images queued for caching - """ - res = self.do_request("GET", "/queued_images") - data = json.loads(res.read())['queued_images'] - return data - - def delete_all_cached_images(self): - """ - Delete all cached images - """ - res = self.do_request("DELETE", "/cached_images") - data = json.loads(res.read()) - num_deleted = data['num_deleted'] - return num_deleted - - def queue_image_for_caching(self, image_id): - """ - Queue an image for prefetching into cache - """ - self.do_request("PUT", "/queued_images/%s" % image_id) - return True - - def delete_queued_image(self, image_id): - """ - Delete a specified image from the cache queue - """ - self.do_request("DELETE", "/queued_images/%s" % image_id) - return True - - def delete_all_queued_images(self): - """ - Delete all queued images - """ - res = self.do_request("DELETE", "/queued_images") - data = json.loads(res.read()) - num_deleted = data['num_deleted'] - return num_deleted - - -def get_client(host, port=None, timeout=None, use_ssl=False, username=None, - password=None, tenant=None, - auth_url=None, auth_strategy=None, - auth_token=None, region=None, - is_silent_upload=False, insecure=False): - """ - Returns a new client Glance client object based on common kwargs. - If an option isn't specified falls back to common environment variable - defaults. - """ - - if auth_url or os.getenv('OS_AUTH_URL'): - force_strategy = 'keystone' - else: - force_strategy = None - - creds = { - 'username': username or - os.getenv('OS_AUTH_USER', os.getenv('OS_USERNAME')), - 'password': password or - os.getenv('OS_AUTH_KEY', os.getenv('OS_PASSWORD')), - 'tenant': tenant or - os.getenv('OS_AUTH_TENANT', os.getenv('OS_TENANT_NAME')), - 'auth_url': auth_url or - os.getenv('OS_AUTH_URL'), - 'strategy': force_strategy or - auth_strategy or - os.getenv('OS_AUTH_STRATEGY', 'noauth'), - 'region': region or - os.getenv('OS_REGION_NAME'), - } - - if creds['strategy'] == 'keystone' and not creds['auth_url']: - msg = _("--os_auth_url option or OS_AUTH_URL environment variable " - "required when keystone authentication strategy is enabled\n") - raise exception.ClientConfigurationError(msg) - - return CacheClient( - host=host, - port=port, - timeout=timeout, - use_ssl=use_ssl, - auth_token=auth_token or - os.getenv('OS_TOKEN'), - creds=creds, - insecure=insecure, - configure_via_auth=False) diff --git a/glance/image_cache/drivers/__init__.py b/glance/image_cache/drivers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/image_cache/drivers/base.py b/glance/image_cache/drivers/base.py deleted file mode 100644 index 40453e22..00000000 --- a/glance/image_cache/drivers/base.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Base attribute driver class -""" - -import os.path - -from oslo_config import cfg -from oslo_log import log as logging - -from glance.common import exception -from glance.common import utils -from glance.i18n import _ - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -class Driver(object): - - def configure(self): - """ - Configure the driver to use the stored configuration options - Any store that needs special configuration should implement - this method. If the store was not able to successfully configure - itself, it should raise `exception.BadDriverConfiguration` - """ - # Here we set up the various file-based image cache paths - # that we need in order to find the files in different states - # of cache management. - self.set_paths() - - def set_paths(self): - """ - Creates all necessary directories under the base cache directory - """ - - self.base_dir = CONF.image_cache_dir - if self.base_dir is None: - msg = _('Failed to read %s from config') % 'image_cache_dir' - LOG.error(msg) - driver = self.__class__.__module__ - raise exception.BadDriverConfiguration(driver_name=driver, - reason=msg) - - self.incomplete_dir = os.path.join(self.base_dir, 'incomplete') - self.invalid_dir = os.path.join(self.base_dir, 'invalid') - self.queue_dir = os.path.join(self.base_dir, 'queue') - - dirs = [self.incomplete_dir, self.invalid_dir, self.queue_dir] - - for path in dirs: - utils.safe_mkdirs(path) - - def get_cache_size(self): - """ - Returns the total size in bytes of the image cache. - """ - raise NotImplementedError - - def get_cached_images(self): - """ - Returns a list of records about cached images. - - The list of records shall be ordered by image ID and shall look like:: - - [ - { - 'image_id': , - 'hits': INTEGER, - 'last_modified': ISO_TIMESTAMP, - 'last_accessed': ISO_TIMESTAMP, - 'size': INTEGER - }, ... - ] - - """ - return NotImplementedError - - def is_cached(self, image_id): - """ - Returns True if the image with the supplied ID has its image - file cached. - - :param image_id: Image ID - """ - raise NotImplementedError - - def is_cacheable(self, image_id): - """ - Returns True if the image with the supplied ID can have its - image file cached, False otherwise. - - :param image_id: Image ID - """ - raise NotImplementedError - - def is_queued(self, image_id): - """ - Returns True if the image identifier is in our cache queue. - - :param image_id: Image ID - """ - raise NotImplementedError - - def delete_all_cached_images(self): - """ - Removes all cached image files and any attributes about the images - and returns the number of cached image files that were deleted. - """ - raise NotImplementedError - - def delete_cached_image(self, image_id): - """ - Removes a specific cached image file and any attributes about the image - - :param image_id: Image ID - """ - raise NotImplementedError - - def delete_all_queued_images(self): - """ - Removes all queued image files and any attributes about the images - and returns the number of queued image files that were deleted. - """ - raise NotImplementedError - - def delete_queued_image(self, image_id): - """ - Removes a specific queued image file and any attributes about the image - - :param image_id: Image ID - """ - raise NotImplementedError - - def queue_image(self, image_id): - """ - Puts an image identifier in a queue for caching. Return True - on successful add to the queue, False otherwise... - - :param image_id: Image ID - """ - - def clean(self, stall_time=None): - """ - Dependent on the driver, clean up and destroy any invalid or incomplete - cached images - """ - raise NotImplementedError - - def get_least_recently_accessed(self): - """ - Return a tuple containing the image_id and size of the least recently - accessed cached file, or None if no cached files. - """ - raise NotImplementedError - - def open_for_write(self, image_id): - """ - Open a file for writing the image file for an image - with supplied identifier. - - :param image_id: Image ID - """ - raise NotImplementedError - - def open_for_read(self, image_id): - """ - Open and yield file for reading the image file for an image - with supplied identifier. - - :param image_id: Image ID - """ - raise NotImplementedError - - def get_image_filepath(self, image_id, cache_status='active'): - """ - This crafts an absolute path to a specific entry - - :param image_id: Image ID - :param cache_status: Status of the image in the cache - """ - if cache_status == 'active': - return os.path.join(self.base_dir, str(image_id)) - return os.path.join(self.base_dir, cache_status, str(image_id)) - - def get_image_size(self, image_id): - """ - Return the size of the image file for an image with supplied - identifier. - - :param image_id: Image ID - """ - path = self.get_image_filepath(image_id) - return os.path.getsize(path) - - def get_queued_images(self): - """ - Returns a list of image IDs that are in the queue. The - list should be sorted by the time the image ID was inserted - into the queue. - """ - raise NotImplementedError diff --git a/glance/image_cache/drivers/sqlite.py b/glance/image_cache/drivers/sqlite.py deleted file mode 100644 index 4bed3c75..00000000 --- a/glance/image_cache/drivers/sqlite.py +++ /dev/null @@ -1,508 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cache driver that uses SQLite to store information about cached images -""" - -from __future__ import absolute_import -from contextlib import contextmanager -import os -import sqlite3 -import stat -import time - -from eventlet import sleep -from eventlet import timeout -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import fileutils - -from glance.common import exception -from glance.i18n import _, _LE, _LI, _LW -from glance.image_cache.drivers import base - -LOG = logging.getLogger(__name__) - -sqlite_opts = [ - cfg.StrOpt('image_cache_sqlite_db', default='cache.db', - help=_(""" -The relative path to sqlite file database that will be used for image cache -management. - -This is a relative path to the sqlite file database that tracks the age and -usage statistics of image cache. The path is relative to image cache base -directory, specified by the configuration option ``image_cache_dir``. - -This is a lightweight database with just one table. - -Possible values: - * A valid relative path to sqlite file database - -Related options: - * ``image_cache_dir`` - -""")), -] - -CONF = cfg.CONF -CONF.register_opts(sqlite_opts) - -DEFAULT_SQL_CALL_TIMEOUT = 2 - - -class SqliteConnection(sqlite3.Connection): - - """ - SQLite DB Connection handler that plays well with eventlet, - slightly modified from Swift's similar code. - """ - - def __init__(self, *args, **kwargs): - self.timeout_seconds = kwargs.get('timeout', DEFAULT_SQL_CALL_TIMEOUT) - kwargs['timeout'] = 0 - sqlite3.Connection.__init__(self, *args, **kwargs) - - def _timeout(self, call): - with timeout.Timeout(self.timeout_seconds): - while True: - try: - return call() - except sqlite3.OperationalError as e: - if 'locked' not in str(e): - raise - sleep(0.05) - - def execute(self, *args, **kwargs): - return self._timeout(lambda: sqlite3.Connection.execute( - self, *args, **kwargs)) - - def commit(self): - return self._timeout(lambda: sqlite3.Connection.commit(self)) - - -def dict_factory(cur, row): - return {col[0]: row[idx] for idx, col in enumerate(cur.description)} - - -class Driver(base.Driver): - - """ - Cache driver that uses xattr file tags and requires a filesystem - that has atimes set. - """ - - def configure(self): - """ - Configure the driver to use the stored configuration options - Any store that needs special configuration should implement - this method. If the store was not able to successfully configure - itself, it should raise `exception.BadDriverConfiguration` - """ - super(Driver, self).configure() - - # Create the SQLite database that will hold our cache attributes - self.initialize_db() - - def initialize_db(self): - db = CONF.image_cache_sqlite_db - self.db_path = os.path.join(self.base_dir, db) - lockutils.set_defaults(self.base_dir) - - @lockutils.synchronized('image_cache_db_init', external=True) - def create_db(): - try: - conn = sqlite3.connect(self.db_path, check_same_thread=False, - factory=SqliteConnection) - conn.executescript(""" - CREATE TABLE IF NOT EXISTS cached_images ( - image_id TEXT PRIMARY KEY, - last_accessed REAL DEFAULT 0.0, - last_modified REAL DEFAULT 0.0, - size INTEGER DEFAULT 0, - hits INTEGER DEFAULT 0, - checksum TEXT - ); - """) - conn.close() - except sqlite3.DatabaseError as e: - msg = _("Failed to initialize the image cache database. " - "Got error: %s") % e - LOG.error(msg) - raise exception.BadDriverConfiguration(driver_name='sqlite', - reason=msg) - - create_db() - - def get_cache_size(self): - """ - Returns the total size in bytes of the image cache. - """ - sizes = [] - for path in self.get_cache_files(self.base_dir): - if path == self.db_path: - continue - file_info = os.stat(path) - sizes.append(file_info[stat.ST_SIZE]) - return sum(sizes) - - def get_hit_count(self, image_id): - """ - Return the number of hits that an image has. - - :param image_id: Opaque image identifier - """ - if not self.is_cached(image_id): - return 0 - - hits = 0 - with self.get_db() as db: - cur = db.execute("""SELECT hits FROM cached_images - WHERE image_id = ?""", - (image_id,)) - hits = cur.fetchone()[0] - return hits - - def get_cached_images(self): - """ - Returns a list of records about cached images. - """ - LOG.debug("Gathering cached image entries.") - with self.get_db() as db: - cur = db.execute("""SELECT - image_id, hits, last_accessed, last_modified, size - FROM cached_images - ORDER BY image_id""") - cur.row_factory = dict_factory - return [r for r in cur] - - def is_cached(self, image_id): - """ - Returns True if the image with the supplied ID has its image - file cached. - - :param image_id: Image ID - """ - return os.path.exists(self.get_image_filepath(image_id)) - - def is_cacheable(self, image_id): - """ - Returns True if the image with the supplied ID can have its - image file cached, False otherwise. - - :param image_id: Image ID - """ - # Make sure we're not already cached or caching the image - return not (self.is_cached(image_id) or - self.is_being_cached(image_id)) - - def is_being_cached(self, image_id): - """ - Returns True if the image with supplied id is currently - in the process of having its image file cached. - - :param image_id: Image ID - """ - path = self.get_image_filepath(image_id, 'incomplete') - return os.path.exists(path) - - def is_queued(self, image_id): - """ - Returns True if the image identifier is in our cache queue. - - :param image_id: Image ID - """ - path = self.get_image_filepath(image_id, 'queue') - return os.path.exists(path) - - def delete_all_cached_images(self): - """ - Removes all cached image files and any attributes about the images - """ - deleted = 0 - with self.get_db() as db: - for path in self.get_cache_files(self.base_dir): - delete_cached_file(path) - deleted += 1 - db.execute("""DELETE FROM cached_images""") - db.commit() - return deleted - - def delete_cached_image(self, image_id): - """ - Removes a specific cached image file and any attributes about the image - - :param image_id: Image ID - """ - path = self.get_image_filepath(image_id) - with self.get_db() as db: - delete_cached_file(path) - db.execute("""DELETE FROM cached_images WHERE image_id = ?""", - (image_id, )) - db.commit() - - def delete_all_queued_images(self): - """ - Removes all queued image files and any attributes about the images - """ - files = [f for f in self.get_cache_files(self.queue_dir)] - for file in files: - fileutils.delete_if_exists(file) - return len(files) - - def delete_queued_image(self, image_id): - """ - Removes a specific queued image file and any attributes about the image - - :param image_id: Image ID - """ - path = self.get_image_filepath(image_id, 'queue') - fileutils.delete_if_exists(path) - - def clean(self, stall_time=None): - """ - Delete any image files in the invalid directory and any - files in the incomplete directory that are older than a - configurable amount of time. - """ - self.delete_invalid_files() - - if stall_time is None: - stall_time = CONF.image_cache_stall_time - - now = time.time() - older_than = now - stall_time - self.delete_stalled_files(older_than) - - def get_least_recently_accessed(self): - """ - Return a tuple containing the image_id and size of the least recently - accessed cached file, or None if no cached files. - """ - with self.get_db() as db: - cur = db.execute("""SELECT image_id FROM cached_images - ORDER BY last_accessed LIMIT 1""") - try: - image_id = cur.fetchone()[0] - except TypeError: - # There are no more cached images - return None - - path = self.get_image_filepath(image_id) - try: - file_info = os.stat(path) - size = file_info[stat.ST_SIZE] - except OSError: - size = 0 - return image_id, size - - @contextmanager - def open_for_write(self, image_id): - """ - Open a file for writing the image file for an image - with supplied identifier. - - :param image_id: Image ID - """ - incomplete_path = self.get_image_filepath(image_id, 'incomplete') - - def commit(): - with self.get_db() as db: - final_path = self.get_image_filepath(image_id) - LOG.debug("Fetch finished, moving " - "'%(incomplete_path)s' to '%(final_path)s'", - dict(incomplete_path=incomplete_path, - final_path=final_path)) - os.rename(incomplete_path, final_path) - - # Make sure that we "pop" the image from the queue... - if self.is_queued(image_id): - fileutils.delete_if_exists( - self.get_image_filepath(image_id, 'queue')) - - filesize = os.path.getsize(final_path) - now = time.time() - - db.execute("""INSERT INTO cached_images - (image_id, last_accessed, last_modified, hits, size) - VALUES (?, ?, ?, 0, ?)""", - (image_id, now, now, filesize)) - db.commit() - - def rollback(e): - with self.get_db() as db: - if os.path.exists(incomplete_path): - invalid_path = self.get_image_filepath(image_id, 'invalid') - - LOG.warn(_LW("Fetch of cache file failed (%(e)s), rolling " - "back by moving '%(incomplete_path)s' to " - "'%(invalid_path)s'") % - {'e': e, - 'incomplete_path': incomplete_path, - 'invalid_path': invalid_path}) - os.rename(incomplete_path, invalid_path) - - db.execute("""DELETE FROM cached_images - WHERE image_id = ?""", (image_id, )) - db.commit() - - try: - with open(incomplete_path, 'wb') as cache_file: - yield cache_file - except Exception as e: - with excutils.save_and_reraise_exception(): - rollback(e) - else: - commit() - finally: - # if the generator filling the cache file neither raises an - # exception, nor completes fetching all data, neither rollback - # nor commit will have been called, so the incomplete file - # will persist - in that case remove it as it is unusable - # example: ^c from client fetch - if os.path.exists(incomplete_path): - rollback('incomplete fetch') - - @contextmanager - def open_for_read(self, image_id): - """ - Open and yield file for reading the image file for an image - with supplied identifier. - - :param image_id: Image ID - """ - path = self.get_image_filepath(image_id) - with open(path, 'rb') as cache_file: - yield cache_file - now = time.time() - with self.get_db() as db: - db.execute("""UPDATE cached_images - SET hits = hits + 1, last_accessed = ? - WHERE image_id = ?""", - (now, image_id)) - db.commit() - - @contextmanager - def get_db(self): - """ - Returns a context manager that produces a database connection that - self-closes and calls rollback if an error occurs while using the - database connection - """ - conn = sqlite3.connect(self.db_path, check_same_thread=False, - factory=SqliteConnection) - conn.row_factory = sqlite3.Row - conn.text_factory = str - conn.execute('PRAGMA synchronous = NORMAL') - conn.execute('PRAGMA count_changes = OFF') - conn.execute('PRAGMA temp_store = MEMORY') - try: - yield conn - except sqlite3.DatabaseError as e: - msg = _LE("Error executing SQLite call. Got error: %s") % e - LOG.error(msg) - conn.rollback() - finally: - conn.close() - - def queue_image(self, image_id): - """ - This adds a image to be cache to the queue. - - If the image already exists in the queue or has already been - cached, we return False, True otherwise - - :param image_id: Image ID - """ - if self.is_cached(image_id): - LOG.info(_LI("Not queueing image '%s'. Already cached."), image_id) - return False - - if self.is_being_cached(image_id): - LOG.info(_LI("Not queueing image '%s'. Already being " - "written to cache"), image_id) - return False - - if self.is_queued(image_id): - LOG.info(_LI("Not queueing image '%s'. Already queued."), image_id) - return False - - path = self.get_image_filepath(image_id, 'queue') - - # Touch the file to add it to the queue - with open(path, "w"): - pass - - return True - - def delete_invalid_files(self): - """ - Removes any invalid cache entries - """ - for path in self.get_cache_files(self.invalid_dir): - fileutils.delete_if_exists(path) - LOG.info(_LI("Removed invalid cache file %s"), path) - - def delete_stalled_files(self, older_than): - """ - Removes any incomplete cache entries older than a - supplied modified time. - - :param older_than: Files written to on or before this timestamp - will be deleted. - """ - for path in self.get_cache_files(self.incomplete_dir): - if os.path.getmtime(path) < older_than: - try: - fileutils.delete_if_exists(path) - LOG.info(_LI("Removed stalled cache file %s"), path) - except Exception as e: - msg = (_LW("Failed to delete file %(path)s. " - "Got error: %(e)s"), - dict(path=path, e=e)) - LOG.warn(msg) - - def get_queued_images(self): - """ - Returns a list of image IDs that are in the queue. The - list should be sorted by the time the image ID was inserted - into the queue. - """ - files = [f for f in self.get_cache_files(self.queue_dir)] - items = [] - for path in files: - mtime = os.path.getmtime(path) - items.append((mtime, os.path.basename(path))) - - items.sort() - return [image_id for (modtime, image_id) in items] - - def get_cache_files(self, basepath): - """ - Returns cache files in the supplied directory - - :param basepath: Directory to look in for cache files - """ - for fname in os.listdir(basepath): - path = os.path.join(basepath, fname) - if path != self.db_path and os.path.isfile(path): - yield path - - -def delete_cached_file(path): - LOG.debug("Deleting image cache file '%s'", path) - fileutils.delete_if_exists(path) diff --git a/glance/image_cache/drivers/xattr.py b/glance/image_cache/drivers/xattr.py deleted file mode 100644 index 89a397dd..00000000 --- a/glance/image_cache/drivers/xattr.py +++ /dev/null @@ -1,501 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cache driver that uses xattr file tags and requires a filesystem -that has atimes set. - -Assumptions -=========== - -1. Cache data directory exists on a filesytem that updates atime on - reads ('noatime' should NOT be set) - -2. Cache data directory exists on a filesystem that supports xattrs. - This is optional, but highly recommended since it allows us to - present ops with useful information pertaining to the cache, like - human readable filenames and statistics. - -3. `glance-prune` is scheduled to run as a periodic job via cron. This - is needed to run the LRU prune strategy to keep the cache size - within the limits set by the config file. - - -Cache Directory Notes -===================== - -The image cache data directory contains the main cache path, where the -active cache entries and subdirectories for handling partial downloads -and errored-out cache images. - -The layout looks like: - -$image_cache_dir/ - entry1 - entry2 - ... - incomplete/ - invalid/ - queue/ -""" - -from __future__ import absolute_import -from contextlib import contextmanager -import errno -import os -import stat -import time - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -from oslo_utils import fileutils -import six -import xattr - -from glance.common import exception -from glance.i18n import _, _LI -from glance.image_cache.drivers import base - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -class Driver(base.Driver): - - """ - Cache driver that uses xattr file tags and requires a filesystem - that has atimes set. - """ - - def configure(self): - """ - Configure the driver to use the stored configuration options - Any store that needs special configuration should implement - this method. If the store was not able to successfully configure - itself, it should raise `exception.BadDriverConfiguration` - """ - # Here we set up the various file-based image cache paths - # that we need in order to find the files in different states - # of cache management. - self.set_paths() - - # We do a quick attempt to write a user xattr to a temporary file - # to check that the filesystem is even enabled to support xattrs - image_cache_dir = self.base_dir - fake_image_filepath = os.path.join(image_cache_dir, 'checkme') - with open(fake_image_filepath, 'wb') as fake_file: - fake_file.write(b"XXX") - fake_file.flush() - try: - set_xattr(fake_image_filepath, 'hits', '1') - except IOError as e: - if e.errno == errno.EOPNOTSUPP: - msg = (_("The device housing the image cache directory " - "%(image_cache_dir)s does not support xattr. It is" - " likely you need to edit your fstab and add the " - "user_xattr option to the appropriate line for the" - " device housing the cache directory.") % - {'image_cache_dir': image_cache_dir}) - LOG.error(msg) - raise exception.BadDriverConfiguration(driver_name="xattr", - reason=msg) - else: - # Cleanup after ourselves... - fileutils.delete_if_exists(fake_image_filepath) - - def get_cache_size(self): - """ - Returns the total size in bytes of the image cache. - """ - sizes = [] - for path in get_all_regular_files(self.base_dir): - file_info = os.stat(path) - sizes.append(file_info[stat.ST_SIZE]) - return sum(sizes) - - def get_hit_count(self, image_id): - """ - Return the number of hits that an image has. - - :param image_id: Opaque image identifier - """ - if not self.is_cached(image_id): - return 0 - - path = self.get_image_filepath(image_id) - return int(get_xattr(path, 'hits', default=0)) - - def get_cached_images(self): - """ - Returns a list of records about cached images. - """ - LOG.debug("Gathering cached image entries.") - entries = [] - for path in get_all_regular_files(self.base_dir): - image_id = os.path.basename(path) - - entry = {'image_id': image_id} - file_info = os.stat(path) - entry['last_modified'] = file_info[stat.ST_MTIME] - entry['last_accessed'] = file_info[stat.ST_ATIME] - entry['size'] = file_info[stat.ST_SIZE] - entry['hits'] = self.get_hit_count(image_id) - - entries.append(entry) - return entries - - def is_cached(self, image_id): - """ - Returns True if the image with the supplied ID has its image - file cached. - - :param image_id: Image ID - """ - return os.path.exists(self.get_image_filepath(image_id)) - - def is_cacheable(self, image_id): - """ - Returns True if the image with the supplied ID can have its - image file cached, False otherwise. - - :param image_id: Image ID - """ - # Make sure we're not already cached or caching the image - return not (self.is_cached(image_id) or - self.is_being_cached(image_id)) - - def is_being_cached(self, image_id): - """ - Returns True if the image with supplied id is currently - in the process of having its image file cached. - - :param image_id: Image ID - """ - path = self.get_image_filepath(image_id, 'incomplete') - return os.path.exists(path) - - def is_queued(self, image_id): - """ - Returns True if the image identifier is in our cache queue. - """ - path = self.get_image_filepath(image_id, 'queue') - return os.path.exists(path) - - def delete_all_cached_images(self): - """ - Removes all cached image files and any attributes about the images - """ - deleted = 0 - for path in get_all_regular_files(self.base_dir): - delete_cached_file(path) - deleted += 1 - return deleted - - def delete_cached_image(self, image_id): - """ - Removes a specific cached image file and any attributes about the image - - :param image_id: Image ID - """ - path = self.get_image_filepath(image_id) - delete_cached_file(path) - - def delete_all_queued_images(self): - """ - Removes all queued image files and any attributes about the images - """ - files = [f for f in get_all_regular_files(self.queue_dir)] - for file in files: - fileutils.delete_if_exists(file) - return len(files) - - def delete_queued_image(self, image_id): - """ - Removes a specific queued image file and any attributes about the image - - :param image_id: Image ID - """ - path = self.get_image_filepath(image_id, 'queue') - fileutils.delete_if_exists(path) - - def get_least_recently_accessed(self): - """ - Return a tuple containing the image_id and size of the least recently - accessed cached file, or None if no cached files. - """ - stats = [] - for path in get_all_regular_files(self.base_dir): - file_info = os.stat(path) - stats.append((file_info[stat.ST_ATIME], # access time - file_info[stat.ST_SIZE], # size in bytes - path)) # absolute path - - if not stats: - return None - - stats.sort() - return os.path.basename(stats[0][2]), stats[0][1] - - @contextmanager - def open_for_write(self, image_id): - """ - Open a file for writing the image file for an image - with supplied identifier. - - :param image_id: Image ID - """ - incomplete_path = self.get_image_filepath(image_id, 'incomplete') - - def set_attr(key, value): - set_xattr(incomplete_path, key, value) - - def commit(): - set_attr('hits', 0) - - final_path = self.get_image_filepath(image_id) - LOG.debug("Fetch finished, moving " - "'%(incomplete_path)s' to '%(final_path)s'", - dict(incomplete_path=incomplete_path, - final_path=final_path)) - os.rename(incomplete_path, final_path) - - # Make sure that we "pop" the image from the queue... - if self.is_queued(image_id): - LOG.debug("Removing image '%s' from queue after " - "caching it.", image_id) - fileutils.delete_if_exists( - self.get_image_filepath(image_id, 'queue')) - - def rollback(e): - set_attr('error', encodeutils.exception_to_unicode(e)) - - invalid_path = self.get_image_filepath(image_id, 'invalid') - LOG.debug("Fetch of cache file failed (%(e)s), rolling back by " - "moving '%(incomplete_path)s' to " - "'%(invalid_path)s'", - {'e': encodeutils.exception_to_unicode(e), - 'incomplete_path': incomplete_path, - 'invalid_path': invalid_path}) - os.rename(incomplete_path, invalid_path) - - try: - with open(incomplete_path, 'wb') as cache_file: - yield cache_file - except Exception as e: - with excutils.save_and_reraise_exception(): - rollback(e) - else: - commit() - finally: - # if the generator filling the cache file neither raises an - # exception, nor completes fetching all data, neither rollback - # nor commit will have been called, so the incomplete file - # will persist - in that case remove it as it is unusable - # example: ^c from client fetch - if os.path.exists(incomplete_path): - rollback('incomplete fetch') - - @contextmanager - def open_for_read(self, image_id): - """ - Open and yield file for reading the image file for an image - with supplied identifier. - - :param image_id: Image ID - """ - path = self.get_image_filepath(image_id) - with open(path, 'rb') as cache_file: - yield cache_file - path = self.get_image_filepath(image_id) - inc_xattr(path, 'hits', 1) - - def queue_image(self, image_id): - """ - This adds a image to be cache to the queue. - - If the image already exists in the queue or has already been - cached, we return False, True otherwise - - :param image_id: Image ID - """ - if self.is_cached(image_id): - LOG.info(_LI("Not queueing image '%s'. Already cached."), image_id) - return False - - if self.is_being_cached(image_id): - LOG.info(_LI("Not queueing image '%s'. Already being " - "written to cache"), image_id) - return False - - if self.is_queued(image_id): - LOG.info(_LI("Not queueing image '%s'. Already queued."), image_id) - return False - - path = self.get_image_filepath(image_id, 'queue') - LOG.debug("Queueing image '%s'.", image_id) - - # Touch the file to add it to the queue - with open(path, "w"): - pass - - return True - - def get_queued_images(self): - """ - Returns a list of image IDs that are in the queue. The - list should be sorted by the time the image ID was inserted - into the queue. - """ - files = [f for f in get_all_regular_files(self.queue_dir)] - items = [] - for path in files: - mtime = os.path.getmtime(path) - items.append((mtime, os.path.basename(path))) - - items.sort() - return [image_id for (modtime, image_id) in items] - - def _reap_old_files(self, dirpath, entry_type, grace=None): - now = time.time() - reaped = 0 - for path in get_all_regular_files(dirpath): - mtime = os.path.getmtime(path) - age = now - mtime - if not grace: - LOG.debug("No grace period, reaping '%(path)s'" - " immediately", {'path': path}) - delete_cached_file(path) - reaped += 1 - elif age > grace: - LOG.debug("Cache entry '%(path)s' exceeds grace period, " - "(%(age)i s > %(grace)i s)", - {'path': path, 'age': age, 'grace': grace}) - delete_cached_file(path) - reaped += 1 - - LOG.info(_LI("Reaped %(reaped)s %(entry_type)s cache entries"), - {'reaped': reaped, 'entry_type': entry_type}) - return reaped - - def reap_invalid(self, grace=None): - """Remove any invalid cache entries - - :param grace: Number of seconds to keep an invalid entry around for - debugging purposes. If None, then delete immediately. - """ - return self._reap_old_files(self.invalid_dir, 'invalid', grace=grace) - - def reap_stalled(self, grace=None): - """Remove any stalled cache entries - - :param grace: Number of seconds to keep an invalid entry around for - debugging purposes. If None, then delete immediately. - """ - return self._reap_old_files(self.incomplete_dir, 'stalled', - grace=grace) - - def clean(self, stall_time=None): - """ - Delete any image files in the invalid directory and any - files in the incomplete directory that are older than a - configurable amount of time. - """ - self.reap_invalid() - - if stall_time is None: - stall_time = CONF.image_cache_stall_time - - self.reap_stalled(stall_time) - - -def get_all_regular_files(basepath): - for fname in os.listdir(basepath): - path = os.path.join(basepath, fname) - if os.path.isfile(path): - yield path - - -def delete_cached_file(path): - LOG.debug("Deleting image cache file '%s'", path) - fileutils.delete_if_exists(path) - - -def _make_namespaced_xattr_key(key, namespace='user'): - """ - Create a fully-qualified xattr-key by including the intended namespace. - - Namespacing differs among OSes[1]: - - FreeBSD: user, system - Linux: user, system, trusted, security - MacOS X: not needed - - Mac OS X won't break if we include a namespace qualifier, so, for - simplicity, we always include it. - - -- - [1] http://en.wikipedia.org/wiki/Extended_file_attributes - """ - namespaced_key = ".".join([namespace, key]) - return namespaced_key - - -def get_xattr(path, key, **kwargs): - """Return the value for a particular xattr - - If the key doesn't not exist, or xattrs aren't supported by the file - system then a KeyError will be raised, that is, unless you specify a - default using kwargs. - """ - namespaced_key = _make_namespaced_xattr_key(key) - try: - return xattr.getxattr(path, namespaced_key) - except IOError: - if 'default' in kwargs: - return kwargs['default'] - else: - raise - - -def set_xattr(path, key, value): - """Set the value of a specified xattr. - - If xattrs aren't supported by the file-system, we skip setting the value. - """ - namespaced_key = _make_namespaced_xattr_key(key) - if not isinstance(value, six.binary_type): - value = str(value) - if six.PY3: - value = value.encode('utf-8') - xattr.setxattr(path, namespaced_key, value) - - -def inc_xattr(path, key, n=1): - """ - Increment the value of an xattr (assuming it is an integer). - - BEWARE, this code *does* have a RACE CONDITION, since the - read/update/write sequence is not atomic. - - Since the use-case for this function is collecting stats--not critical-- - the benefits of simple, lock-free code out-weighs the possibility of an - occasional hit not being counted. - """ - count = int(get_xattr(path, key)) - count += n - set_xattr(path, key, str(count)) diff --git a/glance/image_cache/prefetcher.py b/glance/image_cache/prefetcher.py deleted file mode 100644 index 0e907033..00000000 --- a/glance/image_cache/prefetcher.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Prefetches images into the Image Cache -""" - -import eventlet -import glance_store -from oslo_log import log as logging - -from glance.common import exception -from glance import context -from glance.i18n import _LI, _LW -from glance.image_cache import base -import glance.registry.client.v1.api as registry - -LOG = logging.getLogger(__name__) - - -class Prefetcher(base.CacheApp): - - def __init__(self): - super(Prefetcher, self).__init__() - registry.configure_registry_client() - registry.configure_registry_admin_creds() - - def fetch_image_into_cache(self, image_id): - ctx = context.RequestContext(is_admin=True, show_deleted=True) - - try: - image_meta = registry.get_image_metadata(ctx, image_id) - if image_meta['status'] != 'active': - LOG.warn(_LW("Image '%s' is not active. Not caching.") % - image_id) - return False - - except exception.NotFound: - LOG.warn(_LW("No metadata found for image '%s'") % image_id) - return False - - location = image_meta['location'] - image_data, image_size = glance_store.get_from_backend(location, - context=ctx) - LOG.debug("Caching image '%s'", image_id) - cache_tee_iter = self.cache.cache_tee_iter(image_id, image_data, - image_meta['checksum']) - # Image is tee'd into cache and checksum verified - # as we iterate - list(cache_tee_iter) - return True - - def run(self): - - images = self.cache.get_queued_images() - if not images: - LOG.debug("Nothing to prefetch.") - return True - - num_images = len(images) - LOG.debug("Found %d images to prefetch", num_images) - - pool = eventlet.GreenPool(num_images) - results = pool.imap(self.fetch_image_into_cache, images) - successes = sum([1 for r in results if r is True]) - if successes != num_images: - LOG.warn(_LW("Failed to successfully cache all " - "images in queue.")) - return False - - LOG.info(_LI("Successfully cached all %d images"), num_images) - return True diff --git a/glance/image_cache/pruner.py b/glance/image_cache/pruner.py deleted file mode 100644 index c5d106a5..00000000 --- a/glance/image_cache/pruner.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Prunes the Image Cache -""" - -from glance.image_cache import base - - -class Pruner(base.CacheApp): - - def run(self): - self.cache.prune() diff --git a/glance/locale/de/LC_MESSAGES/glance.po b/glance/locale/de/LC_MESSAGES/glance.po deleted file mode 100644 index 8db0fade..00000000 --- a/glance/locale/de/LC_MESSAGES/glance.po +++ /dev/null @@ -1,2169 +0,0 @@ -# Translations template for glance. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the glance project. -# -# Translators: -# Carsten Duch , 2014 -# Ettore Atalan , 2014 -# Laera Loris , 2013 -# Robert Simai, 2014 -# Andreas Jaeger , 2016. #zanata -# Robert Simai , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: glance 15.0.0.0b3.dev29\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-06-23 20:54+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-12-06 08:09+0000\n" -"Last-Translator: Robert Simai \n" -"Language: de\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: German\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "%(cls)s exception was raised in the last rpc call: %(val)s" -msgstr "Eine %(cls)s-Ausnahme ist im letzten RPC-Aufruf aufgetreten: %(val)s" - -#, python-format -msgid "%(m_id)s not found in the member list of the image %(i_id)s." -msgstr "%(m_id)s in der Mitgliedsliste des Abbild %(i_id)s nicht gefunden." - -#, python-format -msgid "%(serv)s (pid %(pid)s) is running..." -msgstr "%(serv)s (pid %(pid)s) läuft..." - -#, python-format -msgid "%(serv)s appears to already be running: %(pid)s" -msgstr "%(serv)s scheint bereits aktiv zu sein: %(pid)s" - -#, python-format -msgid "" -"%(strategy)s is registered as a module twice. %(module)s is not being used." -msgstr "" -"%(strategy)s ist als Modul doppelt registriert. %(module)s wird nicht " -"verwendet." - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Could not load the " -"filesystem store" -msgstr "" -"%(task_id)s von %(task_type)s sind nicht ordnungsgemäß konfiguriert. Laden " -"des Dateisystemspeichers nicht möglich" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Missing work dir: " -"%(work_dir)s" -msgstr "" -"%(task_id)s von %(task_type)s sind nicht ordnungsgemäß konfiguriert. " -"Fehlendes Arbeitsverzeichnis: %(work_dir)s" - -#, python-format -msgid "%(verb)sing %(serv)s" -msgstr "%(verb)sing %(serv)s" - -#, python-format -msgid "%(verb)sing %(serv)s with %(conf)s" -msgstr "%(serv)s mit %(conf)s %(verb)s" - -#, python-format -msgid "" -"%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " -"address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " -"separately from the port (i.e., \"[fe80::a:b:c]:9876\")." -msgstr "" -"%s Geben Sie ein Host:Port-Paar an, wobei 'Host' eine IPv4-Adresse, eine " -"IPv6-Adresse, ein Hostname oder ein vollständig qualifizierter Domänenname " -"ist. Bei Verwendung einer IPv6-Adresse schließen Sie diese in Klammern ein, " -"damit sie vom Port getrennt ist (d. h. \"[fe80::a:b:c]:9876\")." - -#, python-format -msgid "%s can't contain 4 byte unicode characters." -msgstr "%s darf keine 4-Byte-Unicode-Zeichen enthalten." - -#, python-format -msgid "%s is already stopped" -msgstr "%s ist bereits gestoppt" - -#, python-format -msgid "%s is stopped" -msgstr "%s ist gestoppt" - -msgid "" -"--os_auth_url option or OS_AUTH_URL environment variable required when " -"keystone authentication strategy is enabled\n" -msgstr "" -"Option --os_auth_url oder Umgebungsvariable OS_AUTH_URL erforderlich, wenn " -"die Keystone-Authentifizierungsstrategie aktiviert ist\n" - -msgid "A body is not expected with this request." -msgstr "Es wird kein Body bei dieser Anforderung erwartet. " - -#, python-format -msgid "" -"A metadata definition object with name=%(object_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Ein Metadatendefinitionsobjekt namens %(object_name)s ist bereits in " -"Namensbereich %(namespace_name)s nicht gefunden." - -#, python-format -msgid "" -"A metadata definition property with name=%(property_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Eine Metadatendefinitionseigenschaft namens %(property_name)s ist bereits in " -"Namensbereich %(namespace_name)s vorhanden. " - -#, python-format -msgid "" -"A metadata definition resource-type with name=%(resource_type_name)s already " -"exists." -msgstr "" -"Ein Ressourcentyp %(resource_type_name)s der Metadatendefinition ist bereits " -"vorhanden. " - -msgid "A set of URLs to access the image file kept in external store" -msgstr "URLs für den Zugriff auf die Abbilddatei im externen Speicher" - -msgid "Amount of disk space (in GB) required to boot image." -msgstr "" -"Menge an Plattenspeicher (in GB), die zum Booten des Abbildes erforderlich " -"ist." - -msgid "Amount of ram (in MB) required to boot image." -msgstr "" -"Menge an Arbeitsspeicher (in MB), die zum Booten des Abbildes erforderlich " -"ist." - -msgid "An identifier for the image" -msgstr "Eine ID für das Abbild" - -msgid "An identifier for the image member (tenantId)" -msgstr "Eine ID für das Abbildelement (tenantId)" - -msgid "An identifier for the owner of this task" -msgstr "Eine ID für den Eigentümer diesen Tasks" - -msgid "An identifier for the task" -msgstr "Eine ID für die Task" - -msgid "An image file url" -msgstr "URL der Abbilddatei" - -msgid "An image schema url" -msgstr "URL des Abbildschemas" - -msgid "An image self url" -msgstr "'self'-URL für Abbild" - -#, python-format -msgid "An image with identifier %s already exists" -msgstr "Ein Abbild mit ID %s ist bereits vorhanden" - -msgid "An import task exception occurred" -msgstr "Es ist eine Ausnahme bei einer Importtask eingetreten." - -msgid "An object with the same identifier already exists." -msgstr "Ein Objekt mit der gleichen ID ist bereits vorhanden." - -msgid "An object with the same identifier is currently being operated on." -msgstr "An einem Objekt mit dieser ID wird derzeit eine Operation ausgeführt. " - -msgid "An object with the specified identifier was not found." -msgstr "Ein Objekt mit der angegebenen ID wurde nicht gefunden." - -msgid "An unknown exception occurred" -msgstr "Eine unbekannte Ausnahme ist aufgetreten" - -msgid "An unknown task exception occurred" -msgstr "Eine unbekannte Taskausnahme ist aufgetreten" - -#, python-format -msgid "Attempt to upload duplicate image: %s" -msgstr "Versuch doppeltes Abbild hochzuladen: %s" - -msgid "Attempted to update Location field for an image not in queued status." -msgstr "" -"Versuch, Adressfeld für ein Abbild zu aktualisieren, das sich nicht im " -"Warteschlangenmodus befindet." - -#, python-format -msgid "Attribute '%(property)s' is read-only." -msgstr "Attribut '%(property)s' ist schreibgeschützt." - -#, python-format -msgid "Attribute '%(property)s' is reserved." -msgstr "Attribut '%(property)s' ist reserviert." - -#, python-format -msgid "Attribute '%s' is read-only." -msgstr "Attribut '%s' ist schreibgeschützt." - -#, python-format -msgid "Attribute '%s' is reserved." -msgstr "Attribut '%s' ist reserviert." - -msgid "Attribute container_format can be only replaced for a queued image." -msgstr "" -"Attribut 'container_format' kann nur durch ein Abbild in der Warteschlange " -"ersetzt werden. " - -msgid "Attribute disk_format can be only replaced for a queued image." -msgstr "" -"Attribut 'disk_format' kann nur durch ein Abbild in der Warteschlange " -"ersetzt werden. " - -#, python-format -msgid "Auth service at URL %(url)s not found." -msgstr "Authentifizierungsservice unter URL %(url)s nicht gefunden." - -#, python-format -msgid "" -"Authentication error - the token may have expired during file upload. " -"Deleting image data for %s." -msgstr "" -"Authentifizierungsfehler: Das Token ist möglicherweise beim Hochladen der " -"Datei abgelaufen. Die Abbilddaten für %s werden gelöscht." - -msgid "Authorization failed." -msgstr "Authorisierung fehlgeschlagen." - -msgid "Available categories:" -msgstr "Verfügbare Kategorien:" - -#, python-format -msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." -msgstr "" -"Falsches \"%s\"-Abfragefilterformat. Verwenden Sie die ISO 8601 DateTime-" -"Notation." - -#, python-format -msgid "Bad Command: %s" -msgstr "Fehlerhaftes Kommando: %s" - -#, python-format -msgid "Bad header: %(header_name)s" -msgstr "Fehlerhafter Header: %(header_name)s" - -#, python-format -msgid "Bad value passed to filter %(filter)s got %(val)s" -msgstr "Falscher an Filter %(filter)s übergebener Wert hat %(val)s abgerufen" - -#, python-format -msgid "Badly formed S3 URI: %(uri)s" -msgstr "Falsches Format der S3 URI: %(uri)s" - -#, python-format -msgid "Badly formed credentials '%(creds)s' in Swift URI" -msgstr "Fehlerhafter Berechtigungsnachweis '%(creds)s' in Swift-URI" - -msgid "Badly formed credentials in Swift URI." -msgstr "Fehlerhafter Berechtigungsnachweis in Swift-URI." - -msgid "Body expected in request." -msgstr "Text in Anforderung erwartet." - -msgid "Cannot be a negative value" -msgstr "Darf kein negativer Wert sein" - -msgid "Cannot be a negative value." -msgstr "Darf kein negativer Wert sein." - -#, python-format -msgid "Cannot convert image %(key)s '%(value)s' to an integer." -msgstr "" -"Abbild %(key)s '%(value)s' kann nicht in eine Ganzzahl konvertiert werden. " - -msgid "Cannot remove last location in the image." -msgstr "Die letzte Position im Abbild kann nicht entfernt werden. " - -#, python-format -msgid "Cannot save data for image %(image_id)s: %(error)s" -msgstr "" -"Daten für Abbild %(image_id)s können nicht gespeichert werden: %(error)s" - -msgid "Cannot set locations to empty list." -msgstr "Positionen können nicht auf leere Liste gesetzt werden. " - -msgid "Cannot upload to an unqueued image" -msgstr "" -"Hochladen auf Abbild, das sich nicht in Warteschlange befindet, nicht möglich" - -#, python-format -msgid "Checksum verification failed. Aborted caching of image '%s'." -msgstr "" -"Verifizierung von Kontrollsumme fehlgeschlagen. Zwischenspeichern von Image " -"'%s' abgebrochen." - -msgid "Client disconnected before sending all data to backend" -msgstr "" -"Die Verbindung zum Client wurde beendet, bevor alle Daten zum Backend " -"geschickt wurden" - -msgid "Command not found" -msgstr "Kommando nicht gefunden" - -msgid "Configuration option was not valid" -msgstr "Konfigurationsoption war nicht gültig" - -#, python-format -msgid "Connect error/bad request to Auth service at URL %(url)s." -msgstr "" -"Verbindungsfehler/fehlerhafte Anforderung an Authentifizierungsservice unter " -"URL %(url)s." - -#, python-format -msgid "Constructed URL: %s" -msgstr "Erstellte URL: %s" - -msgid "Container format is not specified." -msgstr "Containerformat wurde nicht angegeben." - -msgid "Content-Type must be application/octet-stream" -msgstr "Inhaltstyp muss Anwendungs-/Oktet-Stream sein" - -#, python-format -msgid "Corrupt image download for image %(image_id)s" -msgstr "Fehlerhafter Abbild-Download für Abbild %(image_id)s" - -#, python-format -msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" -msgstr "" -"Keine Bindung an %(host)s:%(port)s möglich nach Versuch über 30 Sekunden" - -msgid "Could not find OVF file in OVA archive file." -msgstr "Es wurde keine OVF-Datei in der OVA-Archivdatei gefunden. " - -#, python-format -msgid "Could not find metadata object %s" -msgstr "Metadatenobjekt %s konnte nicht gefunden werden" - -#, python-format -msgid "Could not find metadata tag %s" -msgstr "Metadatenschlagwort %s konnte nicht gefunden werden" - -#, python-format -msgid "Could not find namespace %s" -msgstr "Namensbereich %s konnte nicht gefunden werden" - -#, python-format -msgid "Could not find property %s" -msgstr "Eigenschaft %s konnte nicht gefunden werden" - -msgid "Could not find required configuration option" -msgstr "Erforderliche Konfigurationsoption konnte nicht gefunden werden" - -#, python-format -msgid "Could not find task %s" -msgstr "Task %s konnte nicht gefunden werden" - -#, python-format -msgid "Could not update image: %s" -msgstr "Abbild konnte nicht aktualisiert werden: %s" - -msgid "Currently, OVA packages containing multiple disk are not supported." -msgstr "Zurzeit werden OVA-Pakete mit mehreren Platten nicht unterstützt. " - -#, python-format -msgid "Data for image_id not found: %s" -msgstr "Daten für image_id nicht gefunden: %s" - -msgid "Data supplied was not valid." -msgstr "Angegebene Daten waren nicht gültig." - -msgid "Date and time of image member creation" -msgstr "Datum und Uhrzeit der Erstellung des Abbildelements" - -msgid "Date and time of image registration" -msgstr "Datum und Uhrzeit der Abbildregistrierung " - -msgid "Date and time of last modification of image member" -msgstr "Datum und Uhrzeit der letzten Änderung des Abbildelements" - -msgid "Date and time of namespace creation" -msgstr "Datum und Uhrzeit der Erstellung des Namensbereichs" - -msgid "Date and time of object creation" -msgstr "Datum und Uhrzeit der Objekterstellung" - -msgid "Date and time of resource type association" -msgstr "Datum und Uhrzeit der Ressourcentypzuordnung" - -msgid "Date and time of tag creation" -msgstr "Datum und Uhrzeit der Erstellung des Schlagwortes" - -msgid "Date and time of the last image modification" -msgstr "Datum und Uhrzeit der letzten Abbildänderung" - -msgid "Date and time of the last namespace modification" -msgstr "Datum und Uhrzeit der letzten Änderung des Namensbereichs" - -msgid "Date and time of the last object modification" -msgstr "Datum und Uhrzeit der letzten Objektänderung" - -msgid "Date and time of the last resource type association modification" -msgstr "Datum und Uhrzeit der letzten Änderung der Ressourcentypzuordnung" - -msgid "Date and time of the last tag modification" -msgstr "Datum und Uhrzeit der letzten Schlagwortänderung" - -msgid "Datetime when this resource was created" -msgstr "Datum/Uhrzeit der Erstellung dieser Ressource" - -msgid "Datetime when this resource was updated" -msgstr "Datum/Uhrzeit der Aktualisierung dieser Ressource" - -msgid "Datetime when this resource would be subject to removal" -msgstr "Datum/Uhrzeit, zu dem/der diese Ressource entfernt werden würde" - -#, python-format -msgid "Denying attempt to upload image because it exceeds the quota: %s" -msgstr "" -"Versuch, das Abbild hochzuladen, wird verweigert, weil es das Kontingent " -"überschreitet: %s" - -#, python-format -msgid "Denying attempt to upload image larger than %d bytes." -msgstr "" -"Versuch, Abbild hochzuladen, das größer ist als %d Bytes, wird nicht " -"zugelassen." - -msgid "Descriptive name for the image" -msgstr "Beschreibender Name für das Abbild" - -msgid "Disk format is not specified." -msgstr "Plattenformat wurde nicht angegeben." - -#, python-format -msgid "" -"Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" -msgstr "" -"Treiber %(driver_name)s konnte nicht ordnungsgemäß konfiguriert werden. " -"Grund: %(reason)s" - -msgid "" -"Error decoding your request. Either the URL or the request body contained " -"characters that could not be decoded by Glance" -msgstr "" -"Fehler beim Entschlüsseln Ihrer Anforderung. Entweder die URL oder der " -"angeforderte Body enthalten Zeichen, die von Glance nicht entschlüsselt " -"werden konnten. " - -#, python-format -msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" -msgstr "" -"Fehler beim Abrufen der Mitglieder von Abbild %(image_id)s: %(inner_msg)s" - -msgid "Error in store configuration. Adding images to store is disabled." -msgstr "" -"Fehler in Speicherkonfiguration. Hinzufügen von Abbildern zu Speicher ist " -"inaktiviert." - -msgid "Expected a member in the form: {\"member\": \"image_id\"}" -msgstr "" -"Mitglied mit Angabe im folgenden Format erwartet: {\"member\": \"image_id\"}" - -msgid "Expected a status in the form: {\"status\": \"status\"}" -msgstr "" -"Status mit Angabe im folgenden Format erwartet: {\"status\": \"status\"}" - -msgid "External source should not be empty" -msgstr "Externe Quelle darf nicht leer sein." - -#, python-format -msgid "External sources are not supported: '%s'" -msgstr "Externe Quellen werden nicht unterstützt: '%s'" - -#, python-format -msgid "Failed to activate image. Got error: %s" -msgstr "Abbild wurde nicht aktiviert. Fehler: %s" - -#, python-format -msgid "Failed to add image metadata. Got error: %s" -msgstr "Abbildmetadaten wurden nicht hinzugefügt. Fehler: %s" - -#, python-format -msgid "Failed to find image %(image_id)s to delete" -msgstr "Zu löschendes Abbild %(image_id)s wurde nicht gefunden" - -#, python-format -msgid "Failed to find image to delete: %s" -msgstr "Zu löschendes Abbild wurde nicht gefunden: %s" - -#, python-format -msgid "Failed to find image to update: %s" -msgstr "Zu aktualisierendes Abbild wurde nicht gefunden: %s" - -#, python-format -msgid "Failed to find resource type %(resourcetype)s to delete" -msgstr "Zu löschender Ressourcentyp %(resourcetype)s wurde nicht gefunden" - -#, python-format -msgid "Failed to initialize the image cache database. Got error: %s" -msgstr "" -"Die Image-Zwischenspeicherdatenbank wurde nicht initialisiert. Fehler: %s" - -#, python-format -msgid "Failed to read %s from config" -msgstr "Fehler beim Lesen von %s aus Konfiguration" - -#, python-format -msgid "Failed to reserve image. Got error: %s" -msgstr "Abbild wurde nicht reserviert. Fehler: %s" - -#, python-format -msgid "Failed to update image metadata. Got error: %s" -msgstr "Abbildmetadaten wurden nicht aktualisiert. Fehler: %s" - -#, python-format -msgid "Failed to upload image %s" -msgstr "Fehler beim Hochladen des Abbildes %s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to HTTP error: " -"%(error)s" -msgstr "" -"Fehler beim Hochladen von Abbilddaten für Abbild %(image_id)s wegen HTTP-" -"Fehler: %(error)s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to internal error: " -"%(error)s" -msgstr "" -"Fehler beim Hochladen der Abbilddaten für das Abbild %(image_id)s auf Grund " -"eines internen Fehlers: %(error)s" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "Datei %(path)s hat ungültige Sicherungsdatei %(bfile)s. Abbruch." - -msgid "" -"File based imports are not allowed. Please use a non-local source of image " -"data." -msgstr "" -"Dateibasierte Importe sind nicht zulässig. Verwenden Sie eine " -"Imagedatenquelle, die nicht lokal ist." - -msgid "Forbidden image access" -msgstr "Unzulässiger Zugriff auf Abbild" - -#, python-format -msgid "Forbidden to delete a %s image." -msgstr "Es ist nicht erlaubt, ein %s Abbild zu löschen." - -#, python-format -msgid "Forbidden to delete image: %s" -msgstr "Löschen von Abbild nicht erlaubt: %s" - -#, python-format -msgid "Forbidden to modify '%(key)s' of %(status)s image." -msgstr "Es ist nicht erlaubt, '%(key)s' des %(status)s-Abbild zu ändern." - -#, python-format -msgid "Forbidden to modify '%s' of image." -msgstr "Ändern von '%s' eines Abbild nicht erlaubt." - -msgid "Forbidden to reserve image." -msgstr "Reservieren von Abbild nicht erlaubt." - -msgid "Forbidden to update deleted image." -msgstr "Aktualisieren von gelöschtem Abbild nicht erlaubt." - -#, python-format -msgid "Forbidden to update image: %s" -msgstr "Aktualisieren von Abbild nicht erlaubt: %s" - -#, python-format -msgid "Forbidden upload attempt: %s" -msgstr "Unerlaubter Uploadversuch: %s" - -#, python-format -msgid "Forbidding request, metadata definition namespace=%s is not visible." -msgstr "" -"Anforderung wird verboten, Metadatendefinitionsnamensbereich %s ist nicht " -"sichtbar. " - -#, python-format -msgid "Forbidding request, task %s is not visible" -msgstr "Anforderung wird nicht zugelassen, Task %s ist nicht sichtbar" - -msgid "Format of the container" -msgstr "Format des Containers" - -msgid "Format of the disk" -msgstr "Format der Festplatte" - -#, python-format -msgid "Host \"%s\" is not valid." -msgstr "Host \"%s\" ist nicht gültig." - -#, python-format -msgid "Host and port \"%s\" is not valid." -msgstr "Host und Port \"%s\" ist nicht gültig." - -msgid "" -"Human-readable informative message only included when appropriate (usually " -"on failure)" -msgstr "" -"Informationsnachricht in Klarschrift nur eingeschlossen, wenn zweckdienlich " -"(in der Regel bei einem Fehler)" - -msgid "If true, image will not be deletable." -msgstr "Bei 'true' kann das Abbild nicht gelöscht werden." - -msgid "If true, namespace will not be deletable." -msgstr "Bei 'true' kann der Namensbereich nicht gelöscht werden." - -#, python-format -msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" -msgstr "" -"Abbild %(id)s konnte nicht gelöscht werden, da es verwendet wird: %(exc)s" - -#, python-format -msgid "Image %(id)s not found" -msgstr "Abbild %(id)s nicht gefunden" - -#, python-format -msgid "" -"Image %(image_id)s could not be found after upload. The image may have been " -"deleted during the upload: %(error)s" -msgstr "" -"Abbild %(image_id)s wurde nach dem Upload nicht gefunden. Das Abbild wurde " -"möglicherweise während des Uploads gelöscht: %(error)s" - -#, python-format -msgid "Image %(image_id)s is protected and cannot be deleted." -msgstr "Abbild %(image_id)s ist geschützt und kann nicht gelöscht werden." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload, cleaning up the chunks uploaded." -msgstr "" -"Abbild %s konnte nach dem Upload nicht gefunden werden. Das Abbild wurde " -"möglicherweise beim Upload gelöscht. Die hochgeladenen Blöcke werden " -"bereinigt." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload." -msgstr "" -"Abbild %s konnte nach dem Hochladen nicht gefunden werden. Das Abbild ist " -"möglicherweise beim Hochladen gelöscht worden." - -#, python-format -msgid "Image %s is deactivated" -msgstr "Abbild %s ist deaktiviert" - -#, python-format -msgid "Image %s is not active" -msgstr "Abbild %s ist nicht aktiv" - -#, python-format -msgid "Image %s not found." -msgstr "Abbild %s nicht gefunden." - -#, python-format -msgid "Image exceeds the storage quota: %s" -msgstr "Das Abbild übersteigt das vorhandene Speicherkontingent: %s" - -msgid "Image id is required." -msgstr "Abbild-ID ist erforderlich." - -msgid "Image is protected" -msgstr "Abbild ist geschützt" - -#, python-format -msgid "Image member limit exceeded for image %(id)s: %(e)s:" -msgstr "Grenzwert für Abbildmitglieder für Abbild %(id)s überschritten: %(e)s:" - -#, python-format -msgid "Image name too long: %d" -msgstr "Abbildname zu lang: %d" - -msgid "Image operation conflicts" -msgstr "Abbildoperationskonflikte" - -#, python-format -msgid "" -"Image status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"Abbild-Statusänderung von %(cur_status)s nach %(new_status)s ist nicht " -"erlaubt" - -#, python-format -msgid "Image storage media is full: %s" -msgstr "Datenträger zum Speichern des Abbildes ist voll: %s" - -#, python-format -msgid "Image tag limit exceeded for image %(id)s: %(e)s:" -msgstr "Grenzwert für Abbildschlagwort für Abbild %(id)s überschritten: %(e)s:" - -#, python-format -msgid "Image upload problem: %s" -msgstr "Problem beim Abbildupload: %s" - -#, python-format -msgid "Image with identifier %s already exists!" -msgstr "Abbild mit ID %s ist bereits vorhanden!" - -#, python-format -msgid "Image with identifier %s has been deleted." -msgstr "Abbild mit ID %s wurde gelöscht. " - -#, python-format -msgid "Image with identifier %s not found" -msgstr "Abbild mit ID %s nicht gefunden" - -#, python-format -msgid "Image with the given id %(image_id)s was not found" -msgstr "Abbild mit der angegebenen ID %(image_id)s wurde nicht gefunden" - -#, python-format -msgid "" -"Incorrect auth strategy, expected \"%(expected)s\" but received " -"\"%(received)s\"" -msgstr "" -"Falsche Authentifizierungsstrategie. Erwartet wurde \"%(expected)s\", " -"empfangen wurde jedoch \"%(received)s\"" - -#, python-format -msgid "Incorrect request: %s" -msgstr "Falsche Anforderung: %s" - -#, python-format -msgid "Input does not contain '%(key)s' field" -msgstr "Eingabe enthält nicht das Feld '%(key)s' " - -#, python-format -msgid "Insufficient permissions on image storage media: %s" -msgstr "Nicht ausreichende Berechtigungen auf Abbildspeichermedien: %s" - -#, python-format -msgid "Invalid JSON pointer for this resource: '/%s'" -msgstr "Ungültiger JSON Zeiger für diese Ressource: : '/%s'" - -#, python-format -msgid "Invalid checksum '%s': can't exceed 32 characters" -msgstr "Ungültige Kontrollsumme '%s': Darf 32 Zeichen nicht überschreiten" - -msgid "Invalid configuration in glance-swift conf file." -msgstr "Ungültige Konfiguration in der Glance-Swift-Konfigurationsdatei." - -msgid "Invalid configuration in property protection file." -msgstr "Ungültige Konfiguration in Eigenschaftsschutzdatei. " - -#, python-format -msgid "Invalid container format '%s' for image." -msgstr "Ungültiges Containerformat '%s' für Abbild." - -#, python-format -msgid "Invalid content type %(content_type)s" -msgstr "Ungültiger Inhaltstyp %(content_type)s" - -#, python-format -msgid "Invalid disk format '%s' for image." -msgstr "Ungültiges Plattenformat '%s' für Abbild." - -#, python-format -msgid "Invalid filter value %s. The quote is not closed." -msgstr "Ungültiger Filterwert %s. Das schließende Anführungszeichen fehlt." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma after closing quotation mark." -msgstr "" -"Ungültiger Filterwert %s. Vor dem schließenden Anführungszeichen ist kein " -"Komma." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma before opening quotation mark." -msgstr "" -"Ungültiger Filterwert %s. Vor dem öffnenden Anführungszeichen ist kein Komma." - -msgid "Invalid image id format" -msgstr "Ungültiges Abbild-ID-Format" - -msgid "Invalid location" -msgstr "Ungültige Position" - -#, python-format -msgid "Invalid location %s" -msgstr "Ungültige Position %s" - -#, python-format -msgid "Invalid location: %s" -msgstr "Ungültiger Ort: %s" - -#, python-format -msgid "" -"Invalid location_strategy option: %(name)s. The valid strategy option(s) " -"is(are): %(strategies)s" -msgstr "" -"Ungültige location_strategy Option: %(name)s. Gültige Optionen sind: " -"%(strategies)s" - -msgid "Invalid locations" -msgstr "Ungültige Positionen" - -#, python-format -msgid "Invalid locations: %s" -msgstr "Unbekannte Stellen: %s" - -msgid "Invalid marker format" -msgstr "Ungültiges Markerformat" - -msgid "Invalid marker. Image could not be found." -msgstr "Ungültiger Marker. Abbild konnte nicht gefunden werden." - -#, python-format -msgid "Invalid membership association: %s" -msgstr "Ungültige Mitgliedschaftszuordnung: %s" - -msgid "" -"Invalid mix of disk and container formats. When setting a disk or container " -"format to one of 'aki', 'ari', or 'ami', the container and disk formats must " -"match." -msgstr "" -"Ungültige Kombination von Platten- und Containerformaten. Beim Festlegen " -"eines Platten- oder Containerformats auf 'aki', 'ari' oder 'ami' müssen die " -"Container- und Plattenformate übereinstimmen." - -#, python-format -msgid "" -"Invalid operation: `%(op)s`. It must be one of the following: %(available)s." -msgstr "" -"Ungültige Operation: '%(op)s'. Es muss eine der folgenden Optionen verwendet " -"werden: %(available)s." - -msgid "Invalid position for adding a location." -msgstr "Ungültige Position zum Hinzufügen einer Position." - -msgid "Invalid position for removing a location." -msgstr "Ungültige Stelle zum Entfernen einer Position." - -msgid "Invalid service catalog json." -msgstr "Ungültige Servicekatalog-JSON." - -#, python-format -msgid "Invalid sort direction: %s" -msgstr "Ungültige Sortierrichtung: %s" - -#, python-format -msgid "" -"Invalid sort key: %(sort_key)s. It must be one of the following: " -"%(available)s." -msgstr "" -"Ungültiger Sortierschlüssel: %(sort_key)s. Es muss einer der folgenden sein: " -"%(available)s." - -#, python-format -msgid "Invalid status value: %s" -msgstr "Ungültiger Statuswert: %s" - -#, python-format -msgid "Invalid status: %s" -msgstr "Ungültiger Status: %s" - -#, python-format -msgid "Invalid time format for %s." -msgstr "Ungültiges Zeitformat für %s." - -#, python-format -msgid "Invalid type value: %s" -msgstr "Ungültiger Wert für Typ: %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition namespace " -"with the same name of %s" -msgstr "" -"Ungültige Aktualisierung. Sie würde zu einer doppelten " -"Metadatendefinitionseigenschaft mit demselben Namen wie %s führen" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Ungültige Aktualisierung. Sie wurde zu einem doppelten " -"Metadatendefinitionsobjekt mit demselben Namen %(name)s im Namensbereich " -"%(namespace_name)s führen." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Ungültige Aktualisierung. Sie wurde zu einem doppelten " -"Metadatendefinitionsobjekt mit demselben Namen %(name)s im Namensbereich " -"%(namespace_name)s führen." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition property " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Ungültige Aktualisierung. Sie würde zu einer doppelten " -"Metadatendefinitionseigenschaft mit demselben Namen %(name)s im " -"Namensbereich %(namespace_name)s führen. " - -#, python-format -msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" -msgstr "Ungültiger Wert '%(value)s' für Parameter '%(param)s': %(extra_msg)s" - -#, python-format -msgid "Invalid value for option %(option)s: %(value)s" -msgstr "Ungültiger Wert für Option %(option)s: %(value)s" - -#, python-format -msgid "Invalid visibility value: %s" -msgstr "Ungültiger Sichtbarkeitswert: %s" - -msgid "It's invalid to provide multiple image sources." -msgstr "Die Angabe von mehreren Abbildquellen ist ungültig." - -msgid "It's not allowed to add locations if locations are invisible." -msgstr "" -"Es ist nicht zulässig, Positionen hinzuzufügen, wenn die Positionen nicht " -"sichtbar sind. " - -msgid "It's not allowed to remove locations if locations are invisible." -msgstr "" -"Es ist nicht zulässig, Positionen zu entfernen, wenn die Positionen nicht " -"sichtbar sind. " - -msgid "It's not allowed to update locations if locations are invisible." -msgstr "" -"Es ist nicht zulässig, Positionen zu aktualisieren, wenn die Positionen " -"nicht sichtbar sind. " - -msgid "List of strings related to the image" -msgstr "Liste mit dem Abbild zugehörigen Zeichenketten" - -msgid "Malformed JSON in request body." -msgstr "Fehlerhafte JSON in Anforderungshauptteil." - -msgid "Maximal age is count of days since epoch." -msgstr "Das maximale Alter entspricht der Anzahl von Tagen seit der Epoche." - -#, python-format -msgid "Maximum redirects (%(redirects)s) was exceeded." -msgstr "Das Maximum an Umleitungen (%(redirects)s) wurde überschritten." - -#, python-format -msgid "Member %(member_id)s is duplicated for image %(image_id)s" -msgstr "Mitglied %(member_id)s ist für Abbild %(image_id)s doppelt vorhanden" - -msgid "Member can't be empty" -msgstr "Mitglied darf nicht leer sein" - -msgid "Member to be added not specified" -msgstr "Hinzuzufügendes Element nicht angegeben" - -msgid "Membership could not be found." -msgstr "Mitgliedschaft konnte nicht gefunden werden." - -#, python-format -msgid "" -"Metadata definition namespace %(namespace)s is protected and cannot be " -"deleted." -msgstr "" -"Der Metadatendefinitionsnamensbereich %(namespace)s ist geschützt und kann " -"nicht gelöscht werden." - -#, python-format -msgid "Metadata definition namespace not found for id=%s" -msgstr "Metadatendefinitionsnamensbereich für id=%s nicht gefunden" - -#, python-format -msgid "" -"Metadata definition object %(object_name)s is protected and cannot be " -"deleted." -msgstr "" -"Das Metadatendefinitionsobjekt %(object_name)s ist geschützt und kann nicht " -"gelöscht werden." - -#, python-format -msgid "Metadata definition object not found for id=%s" -msgstr "Metadatendefinitionsobjekt für id=%s nicht gefunden" - -#, python-format -msgid "" -"Metadata definition property %(property_name)s is protected and cannot be " -"deleted." -msgstr "" -"Die Metadatendefinitionseigenschaft %(property_name)s ist geschützt und kann " -"nicht gelöscht werden. " - -#, python-format -msgid "Metadata definition property not found for id=%s" -msgstr "Metadatendefinitionseigenschaft für id=%s nicht gefunden" - -#, python-format -msgid "" -"Metadata definition resource-type %(resource_type_name)s is a seeded-system " -"type and cannot be deleted." -msgstr "" -"Der Ressourcentyp %(resource_type_name)s der Metadatendefinition ist ein " -"Basisdaten-Systemtyp und kann nicht gelöscht werden. " - -#, python-format -msgid "" -"Metadata definition resource-type-association %(resource_type)s is protected " -"and cannot be deleted." -msgstr "" -"Die Ressourcentypzuordnung %(resource_type)s der Metadatendefinition ist " -"geschützt und kann nicht gelöscht werden." - -#, python-format -msgid "" -"Metadata definition tag %(tag_name)s is protected and cannot be deleted." -msgstr "" -"Der Metadatendefinitionstag %(tag_name)s ist geschützt und kann nicht " -"gelöscht werden." - -#, python-format -msgid "Metadata definition tag not found for id=%s" -msgstr "Metadatendefinitionstag für id=%s nicht gefunden" - -msgid "Minimal rows limit is 1." -msgstr "Der Wert für die Mindestzeilenanzahl ist 1." - -#, python-format -msgid "Missing required credential: %(required)s" -msgstr "Erforderlicher Berechtigungsnachweis fehlt: %(required)s" - -#, python-format -msgid "" -"Multiple 'image' service matches for region %(region)s. This generally means " -"that a region is required and you have not supplied one." -msgstr "" -"Mehrere 'image'-Serviceübereinstimmungen für Region %(region)s. Dies weist " -"im Allgemeinen darauf hin, dass eine Region erforderlich ist und dass Sie " -"keine angegeben haben." - -msgid "No authenticated user" -msgstr "Kein authentifizierter Benutzer" - -#, python-format -msgid "No image found with ID %s" -msgstr "Es wurde kein Abbild mit der ID %s gefunden" - -#, python-format -msgid "No location found with ID %(loc)s from image %(img)s" -msgstr "Keine Position mit ID %(loc)s von Abbild %(img)s gefunden" - -msgid "No permission to share that image" -msgstr "Keine Berechtigung dieses Abbild freizugeben" - -#, python-format -msgid "Not allowed to create members for image %s." -msgstr "Es ist nicht zulässig, Mitglieder für Abbild %s zu erstellen." - -#, python-format -msgid "Not allowed to deactivate image in status '%s'" -msgstr "Deaktivieren des Abbild im Status '%s' nicht zulässig" - -#, python-format -msgid "Not allowed to delete members for image %s." -msgstr "Es ist nicht zulässig, Mitglieder für Abbild %s zu löschen." - -#, python-format -msgid "Not allowed to delete tags for image %s." -msgstr "Es ist nicht zulässig, Schlagwörter für Abbild %s zu löschen." - -#, python-format -msgid "Not allowed to list members for image %s." -msgstr "Es ist nicht zulässig, Mitglieder für Abbild %s aufzulisten." - -#, python-format -msgid "Not allowed to reactivate image in status '%s'" -msgstr "Erneutes Aktivieren des Abbildes im Status '%s' nicht zulässig" - -#, python-format -msgid "Not allowed to update members for image %s." -msgstr "Es ist nicht zulässig, Mitglieder für Abbild %s zu aktualisieren." - -#, python-format -msgid "Not allowed to update tags for image %s." -msgstr "Es ist nicht zulässig, Schlagwörter für Abbild %s zu aktualisieren." - -#, python-format -msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" -msgstr "" -"Hochladen von Abbilddaten für Abbild %(image_id)s nicht zulässig: %(error)s" - -msgid "Number of sort dirs does not match the number of sort keys" -msgstr "" -"Die Anzahl der Sortierverzeichnisse entspricht nicht der Anzahl der " -"Sortierschlüssel" - -msgid "OVA extract is limited to admin" -msgstr "OVA-Extraktion kann nur vom Administrator ausgeführt werden." - -msgid "Old and new sorting syntax cannot be combined" -msgstr "Die alte und die neue Sortiersyntax können nicht kombiniert werden" - -#, python-format -msgid "Operation \"%s\" requires a member named \"value\"." -msgstr "Operation \"%s\" erfordert ein Element mit der Bezeichnung \"value\"." - -msgid "" -"Operation objects must contain exactly one member named \"add\", \"remove\", " -"or \"replace\"." -msgstr "" -"Operationsobjekte müssen genau ein Element mit der Bezeichnung \"add\", " -"\"remove\" oder \"replace\" enthalten." - -msgid "" -"Operation objects must contain only one member named \"add\", \"remove\", or " -"\"replace\"." -msgstr "" -"Operationsobjekte dürfen nur ein Element mit der Bezeichnung \"add\", " -"\"remove\" oder \"replace\" enthalten." - -msgid "Operations must be JSON objects." -msgstr "Operationen müssen JSON-Objekte sein." - -#, python-format -msgid "Original locations is not empty: %s" -msgstr "Originalpositionen sind nicht leer: %s" - -msgid "Owner can't be updated by non admin." -msgstr "" -"Eigner kann durch einen Benutzer, der kein Administrator ist, nicht " -"aktualisiert werden." - -msgid "Owner must be specified to create a tag." -msgstr "Der Eigentümer muss zum Erstellen eines Schlagwortes angegeben werden." - -msgid "Owner of the image" -msgstr "Eigentümer des Abbildes" - -msgid "Owner of the namespace." -msgstr "Eigentümer des Namensbereichs. " - -msgid "Param values can't contain 4 byte unicode." -msgstr "Parameterwerte dürfen kein 4-Byte-Unicode enthalten." - -#, python-format -msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." -msgstr "" -"Zeiger `%s` enthält \"~\", das nicht Teil einer erkannten Escapezeichenfolge " -"ist." - -#, python-format -msgid "Pointer `%s` contains adjacent \"/\"." -msgstr "Der Zeiger `%s` enthält ein angrenzendes \"/\"." - -#, python-format -msgid "Pointer `%s` does not contains valid token." -msgstr "Der Zeiger `%s` enthält kein gültiges Token." - -#, python-format -msgid "Pointer `%s` does not start with \"/\"." -msgstr "Zeiger `%s` beginnt nicht mit \"/\"." - -#, python-format -msgid "Pointer `%s` end with \"/\"." -msgstr "Der Zeiger `%s` endet mit einem \"/\"." - -#, python-format -msgid "Port \"%s\" is not valid." -msgstr "Port \"%s\" ist nicht gültig." - -#, python-format -msgid "Process %d not running" -msgstr "Prozess %d wird nicht ausgeführt" - -#, python-format -msgid "Properties %s must be set prior to saving data." -msgstr "Eigenschaften %s müssen vor dem Speichern von Daten festgelegt werden." - -#, python-format -msgid "" -"Property %(property_name)s does not start with the expected resource type " -"association prefix of '%(prefix)s'." -msgstr "" -"Eigenschaft %(property_name)s beginnt nicht mit dem erwarteten " -"Zuordnungspräfix für Ressourcentypen '%(prefix)s'." - -#, python-format -msgid "Property %s already present." -msgstr "Eigenschaft %s ist bereits vorhanden." - -#, python-format -msgid "Property %s does not exist." -msgstr "Eigenschaft %s ist nicht vorhanden." - -#, python-format -msgid "Property %s may not be removed." -msgstr "Eigenschaft %s darf nicht entfernt werden." - -#, python-format -msgid "Property %s must be set prior to saving data." -msgstr "Eigenschaft %s muss vor dem Speichern von Daten festgelegt werden." - -#, python-format -msgid "Property '%s' is protected" -msgstr "Eigenschaft '%s' ist geschützt" - -msgid "Property names can't contain 4 byte unicode." -msgstr "Eigenschaftsnamen dürfen kein 4-Byte-Unicode enthalten." - -#, python-format -msgid "" -"Provided image size must match the stored image size. (provided size: " -"%(ps)d, stored size: %(ss)d)" -msgstr "" -"Die angegebene Abbildgröße muss der gespeicherten Abbildgröße entsprechen. " -"(angegebene Größe: %(ps)d, gespeicherte Größe: %(ss)d)" - -#, python-format -msgid "Provided object does not match schema '%(schema)s': %(reason)s" -msgstr "Angegebenes Objekt passt nicht zu Schema '%(schema)s': %(reason)s" - -#, python-format -msgid "Provided status of task is unsupported: %(status)s" -msgstr "Der angegebene Status der Task wird nicht unterstützt: %(status)s" - -#, python-format -msgid "Provided type of task is unsupported: %(type)s" -msgstr "Der angegebene Typ der Task wird nicht unterstützt: %(type)s" - -msgid "Provides a user friendly description of the namespace." -msgstr "" -"Stellt eine benutzerfreundliche Beschreibung des Namensbereichs bereit. " - -msgid "Received invalid HTTP redirect." -msgstr "Ungültige HTTP-Umleitung erhalten." - -#, python-format -msgid "Redirecting to %(uri)s for authorization." -msgstr "Umleitung auf %(uri)s für Autorisierung." - -#, python-format -msgid "Registry service can't use %s" -msgstr "Registrierungsdienst kann %s nicht verwenden" - -#, python-format -msgid "Registry was not configured correctly on API server. Reason: %(reason)s" -msgstr "" -"Registrierungsdatenbank wurde nicht ordnungsgemäß auf einem API-Server " -"konfiguriert. Grund: %(reason)s" - -#, python-format -msgid "Reload of %(serv)s not supported" -msgstr "Erneutes Laden von %(serv)s nicht unterstützt" - -#, python-format -msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "%(serv)s (PID %(pid)s) wird mit Signal (%(sig)s) erneut geladen" - -#, python-format -msgid "Removing stale pid file %s" -msgstr "Veraltete PID-Datei %s wird entfernt" - -msgid "Request body must be a JSON array of operation objects." -msgstr "" -"Anforderungshauptteil muss eine JSON-Array mit Operationsobjekten sein." - -msgid "Request must be a list of commands" -msgstr "Die Anfrage muss eine Liste von Kommandos sein" - -#, python-format -msgid "Required store %s is invalid" -msgstr "Der verlangte Speicher %s ist ungültig" - -msgid "" -"Resource type names should be aligned with Heat resource types whenever " -"possible: http://docs.openstack.org/developer/heat/template_guide/openstack." -"html" -msgstr "" -"Ressourcentypennamen sollten möglichst immer an den Heat-Ressourcentypen " -"ausgerichtet werden: http://docs.openstack.org/developer/heat/template_guide/" -"openstack.html" - -msgid "Response from Keystone does not contain a Glance endpoint." -msgstr "Antwort von Keystone enthält keinen Glance-Endpunkt." - -msgid "Scope of image accessibility" -msgstr "Umfang der Abbildzugänglichkeit" - -msgid "Scope of namespace accessibility." -msgstr "Umfang der Zugänglichkeit des Namensbereichs. " - -#, python-format -msgid "Server %(serv)s is stopped" -msgstr "Server %(serv)s wurde gestoppt" - -#, python-format -msgid "Server worker creation failed: %(reason)s." -msgstr "Erstellung von Server-Worker fehlgeschlagen: %(reason)s." - -msgid "Signature verification failed" -msgstr "Signaturverifizierung fehlgeschlagen" - -msgid "Size of image file in bytes" -msgstr "Größe der Abbilddatei in Byte " - -msgid "" -"Some resource types allow more than one key / value pair per instance. For " -"example, Cinder allows user and image metadata on volumes. Only the image " -"properties metadata is evaluated by Nova (scheduling or drivers). This " -"property allows a namespace target to remove the ambiguity." -msgstr "" -"Bei manchen Ressourcentypen sind mehrere Schlüssel/Wert-Paare pro Instanz " -"zulässig. Cinder lässt z. B. Benutzer- und Abbildmetadaten für Datenträger " -"zu. Nur die Metadaten der Imageeigenschaften werden von Nova ausgewertet " -"(Planung oder Treiber). Diese Eigenschaft lässt zu, dass ein " -"Namensbereichsziel die Mehrdeutigkeit entfernt. " - -msgid "Sort direction supplied was not valid." -msgstr "Die angegebene Sortierrichtung war nicht gültig. " - -msgid "Sort key supplied was not valid." -msgstr "Der angegebene Sortierschlüssel war nicht gültig. " - -msgid "" -"Specifies the prefix to use for the given resource type. Any properties in " -"the namespace should be prefixed with this prefix when being applied to the " -"specified resource type. Must include prefix separator (e.g. a colon :)." -msgstr "" -"Gibt das Präfix an, das für den angegebenen Ressourcentyp zu verwenden ist. " -"Alle Eigenschaften im Namensbereich sollten dieses Präfix aufweisen, wenn " -"sie auf den angegebenen Ressourcentyp angewendet werden. Muss " -"Präfixtrennzeichen aufweisen (z. B. einen Doppelpunkt :)." - -msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." -msgstr "Status muss \"pending\", \"accepted\" oder \"rejected\" sein." - -msgid "Status not specified" -msgstr "Status nicht angegeben" - -msgid "Status of the image" -msgstr "Status des Abbildes" - -#, python-format -msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"Der Statusübergang von %(cur_status)s zu %(new_status)s ist nicht zulässig" - -#, python-format -msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "%(serv)s (PID %(pid)s) wird mit Signal (%(sig)s) gestoppt" - -#, python-format -msgid "Store for image_id not found: %s" -msgstr "Speicher für image_id nicht gefunden: %s" - -#, python-format -msgid "Store for scheme %s not found" -msgstr "Speicher für Schema %s nicht gefunden" - -#, python-format -msgid "" -"Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " -"(%(actual)s) did not match. Setting image status to 'killed'." -msgstr "" -"Angaben für %(attr)s (%(supplied)s) und %(attr)s, die aus dem hochgeladenen " -"Abbild (%(actual)s) generiert wurden, stimmten nicht überein. Abbildstatus " -"wird auf 'killed' gesetzt." - -msgid "Supported values for the 'container_format' image attribute" -msgstr "Unterstützte Werte für das 'container_format' Abbild-Attribut" - -msgid "Supported values for the 'disk_format' image attribute" -msgstr "Unterstützte Werte für das Abbildattribut 'disk_format'" - -#, python-format -msgid "Suppressed respawn as %(serv)s was %(rsn)s." -msgstr "Erneute Generierung wurde unterdrückt, da %(serv)s %(rsn)s war." - -msgid "System SIGHUP signal received." -msgstr "System-SIGHUP-Signal empfangen. " - -#, python-format -msgid "Task '%s' is required" -msgstr "Task '%s' ist erforderlich" - -msgid "Task does not exist" -msgstr "Task ist nicht vorhanden" - -msgid "Task failed due to Internal Error" -msgstr "Task fehlgeschlagen. Grund: Interner Fehler" - -msgid "Task was not configured properly" -msgstr "Die Task war nicht ordnungsgemäß konfiguriert" - -#, python-format -msgid "Task with the given id %(task_id)s was not found" -msgstr "Die Task mit der angegebenen ID %(task_id)s wurde nicht gefunden" - -msgid "The \"changes-since\" filter is no longer available on v2." -msgstr "Der Filter \"changes-since\" ist bei Version 2 nicht mehr verfügbar." - -#, python-format -msgid "The CA file you specified %s does not exist" -msgstr "" -"Die von Ihnen angegebene Zertifizierungsstellendatei %s ist nicht vorhanden" - -#, python-format -msgid "" -"The Image %(image_id)s object being created by this task %(task_id)s, is no " -"longer in valid status for further processing." -msgstr "" -"Das Objekt von Abbild %(image_id)s, das von Task %(task_id)s erstellt wurde, " -"befindet sich nicht mehr in einem gültigen Status zur weiteren Verarbeitung." - -msgid "The Store URI was malformed." -msgstr "Die Speicher-URI war fehlerhaft." - -msgid "" -"The URL to the keystone service. If \"use_user_token\" is not in effect and " -"using keystone auth, then URL of keystone can be specified." -msgstr "" -"Die URL des Keystone-Service. Wenn \"use_user_token\" nicht wirksam ist und " -"die Keystone-Authentifizierung verwendet wird, kann die Keystone-URL " -"angegeben werden. " - -msgid "" -"The administrators password. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"Das Administratorkennwort. Wenn \"use_user_token\" nicht wirksam ist, können " -"Berechtigungsnachweise für den Administrator angegeben werden. " - -msgid "" -"The administrators user name. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"Der Administratorname. Wenn \"use_user_token\" nicht wirksam ist, können " -"Berechtigungsnachweise für den Administrator angegeben werden. " - -#, python-format -msgid "The cert file you specified %s does not exist" -msgstr "Die von Ihnen angegebene Zertifizierungsdatei %s ist nicht vorhanden" - -msgid "The current status of this task" -msgstr "Der aktuelle Status dieser Task" - -#, python-format -msgid "" -"The device housing the image cache directory %(image_cache_dir)s does not " -"support xattr. It is likely you need to edit your fstab and add the " -"user_xattr option to the appropriate line for the device housing the cache " -"directory." -msgstr "" -"Das Gerät, auf dem sich das Abbild-Zwischenspeicherverzeichnis " -"%(image_cache_dir)s befindet, unterstützt xattr nicht. Wahrscheinlich müssen " -"Sie fstab bearbeiten und die Option user_xattr zur entsprechenden Zeile für " -"das Gerät, auf dem sich das Zwischenspeicherverzeichnis befindet, hinzufügen." - -#, python-format -msgid "" -"The given uri is not valid. Please specify a valid uri from the following " -"list of supported uri %(supported)s" -msgstr "" -"Der angegebene URI ist ungültig. Geben Sie einen gültigen URI aus der " -"folgenden Liste mit unterstützten URIs %(supported)s an." - -#, python-format -msgid "The incoming image is too large: %s" -msgstr "Das eingehende Abbild ist zu groß: %s" - -#, python-format -msgid "The key file you specified %s does not exist" -msgstr "Die von Ihnen angegebene Schlüsseldatei %s ist nicht vorhanden" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image locations. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Der Grenzwert für die zulässige Anzahl an Abbildpositionen wurde " -"überschritten. Versucht: %(attempted)s, Maximum: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image members for this " -"image. Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Der Grenzwert für die zulässige Anzahl an Abbildmitgliedern wurde für dieses " -"Abbild überschritten. Versucht: %(attempted)s, Maximum: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Der Grenzwert für die zulässige Anzahl an Abbildeigenschaften wurde " -"überschritten. Versucht: %(attempted)s, Maximum: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(num)s, Maximum: %(quota)s" -msgstr "" -"Der Grenzwert für die zulässige Anzahl an Abbildeigenschaften wurde " -"überschritten. Versucht: %(num)s, Maximum: %(quota)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image tags. Attempted: " -"%(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Der Grenzwert für die zulässige Anzahl an Abbildschlagwörter wurde " -"überschritten. Versucht: %(attempted)s, Maximum: %(maximum)s" - -#, python-format -msgid "The location %(location)s already exists" -msgstr "Die Position %(location)s ist bereits vorhanden" - -#, python-format -msgid "The location data has an invalid ID: %d" -msgstr "Die Position weist eine ungültige ID auf: %d" - -#, python-format -msgid "" -"The metadata definition %(record_type)s with name=%(record_name)s not " -"deleted. Other records still refer to it." -msgstr "" -"Die Metadatendefinition %(record_type)s namens %(record_name)s wurde nicht " -"gelöscht. Andere Datensätze verweisen noch darauf. " - -#, python-format -msgid "The metadata definition namespace=%(namespace_name)s already exists." -msgstr "" -"Der Metadatendefinitionsnamensbereich %(namespace_name)s ist bereits " -"vorhanden. " - -#, python-format -msgid "" -"The metadata definition object with name=%(object_name)s was not found in " -"namespace=%(namespace_name)s." -msgstr "" -"Das Metadatendefinitionsobjekt namens %(object_name)s wurde in Namensbereich " -"%(namespace_name)s nicht gefunden. " - -#, python-format -msgid "" -"The metadata definition property with name=%(property_name)s was not found " -"in namespace=%(namespace_name)s." -msgstr "" -"Die Metadatendefinitionseigenschaft namens %(property_name)s wurde nicht in " -"Namensbereich %(namespace_name)s gefunden. " - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s already exists." -msgstr "" -"Die Ressourcentypzuordnung der Metadatendefinition zwischen Ressourcentyp " -"%(resource_type_name)s und Namensbereich %(namespace_name)s ist bereits " -"vorhanden." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s, was not found." -msgstr "" -"Die Ressourcentypzuordnung der Metadatendefinition zwischen Ressourcentyp " -"%(resource_type_name)s und Namensbereich %(namespace_name)s wurde nicht " -"gefunden." - -#, python-format -msgid "" -"The metadata definition resource-type with name=%(resource_type_name)s, was " -"not found." -msgstr "" -"Der Ressourcentyp %(resource_type_name)s der Metadatendefinition wurde nicht " -"gefunden. " - -#, python-format -msgid "" -"The metadata definition tag with name=%(name)s was not found in namespace=" -"%(namespace_name)s." -msgstr "" -"Der Metadatendefinitionstag namens %(name)s wurde in Namensbereich " -"%(namespace_name)s nicht gefunden." - -msgid "The parameters required by task, JSON blob" -msgstr "Die für die Task erforderlichen Parameter, JSON-Blob-Objekt" - -msgid "The provided image is too large." -msgstr "Das angegebene Abbild ist zu groß." - -msgid "" -"The region for the authentication service. If \"use_user_token\" is not in " -"effect and using keystone auth, then region name can be specified." -msgstr "" -"Die Region für den Authentifizierungsservice. Wenn \"use_user_token\" nicht " -"wirksam ist und die Keystone-Authentifizierung verwendet wird, kann der " -"Regionsname angegeben werden. " - -msgid "The request returned 500 Internal Server Error." -msgstr "" -"Die Anforderung hat eine Nachricht vom Typ '500 - interner Serverfehler' " -"zurückgegeben." - -msgid "" -"The request returned 503 Service Unavailable. This generally occurs on " -"service overload or other transient outage." -msgstr "" -"Die Anforderung hat eine Nachricht vom Typ '503 - Service nicht verfügbar' " -"zurückgegeben. Dies geschieht im Allgemeinen bei einer Serviceüberbelastung " -"oder einem anderen temporären Ausfall." - -#, python-format -msgid "" -"The request returned a 302 Multiple Choices. This generally means that you " -"have not included a version indicator in a request URI.\n" -"\n" -"The body of response returned:\n" -"%(body)s" -msgstr "" -"Die Anforderung hat eine Nachricht vom Typ '302 - Mehrere Möglichkeiten' " -"zurückgegeben. Dies weist im Allgemeinen darauf hin, dass Sie bei einem " -"Anfrage-URI keinen Versionsindikator angegeben haben.\n" -"\n" -"Nachrichtentext der zurückgegebenen Antwort:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned a 413 Request Entity Too Large. This generally means " -"that rate limiting or a quota threshold was breached.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"Die Anforderung hat eine Nachricht vom Typ '413 - Anforderungsentität zu " -"groß' zurückgegeben. Dies weist im Allgemeinen darauf hin, dass die " -"Geschwindigkeitsbegrenzung oder ein Kontingentschwellenwert überschritten " -"wurde.\n" -"\n" -"Der Antworttext:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned an unexpected status: %(status)s.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"Die Anforderung hat einen unerwarteten Status zurückgegeben: %(status)s.\n" -"\n" -"Der Antworttext:\n" -"%(body)s" - -msgid "" -"The requested image has been deactivated. Image data download is forbidden." -msgstr "" -"Das angeforderte Abbild wurde deaktiviert. Der Download von Abbilddaten ist " -"nicht zulässig. " - -msgid "The result of current task, JSON blob" -msgstr "Das Ergebnis der aktuellen Task, JSON-Blob-Objekt" - -#, python-format -msgid "" -"The size of the data %(image_size)s will exceed the limit. %(remaining)s " -"bytes remaining." -msgstr "" -"Die Größe der Daten, mit denen %(image_size)s den Grenzwert überschreiten " -"wird. %(remaining)s Byte verbleiben." - -#, python-format -msgid "The specified member %s could not be found" -msgstr "Das angegebene Mitglied %s konnte nicht gefunden werden" - -#, python-format -msgid "The specified metadata object %s could not be found" -msgstr "Das angegebene Metadatenobjekt %s konnte nicht gefunden werden" - -#, python-format -msgid "The specified metadata tag %s could not be found" -msgstr "Das angegebene Metadatenschlagwort %s konnte nicht gefunden werden" - -#, python-format -msgid "The specified namespace %s could not be found" -msgstr "Der angegebene Namensbereich %s konnte nicht gefunden werden" - -#, python-format -msgid "The specified property %s could not be found" -msgstr "Die angegebene Eigenschaft %s konnte nicht gefunden werden" - -#, python-format -msgid "The specified resource type %s could not be found " -msgstr "Der angegebene Ressourcentyp %s konnte nicht gefunden werden" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'" -msgstr "" -"Der Status der Position des gelöschten Abbildes kann nur auf " -"'pending_delete' oder auf 'deleted' gesetzt werden." - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'." -msgstr "" -"Der Status der Position des gelöschten Abbild kann nur auf 'pending_delete' " -"oder auf 'deleted' gesetzt werden." - -msgid "The status of this image member" -msgstr "Der Status dieses Abbildelements" - -msgid "" -"The strategy to use for authentication. If \"use_user_token\" is not in " -"effect, then auth strategy can be specified." -msgstr "" -"Die für die Authentifizierung zu verwendende Strategie. Wenn \"use_user_token" -"\" nicht wirksam ist, kann die Authentifizierungsstrategie angegeben werden. " - -#, python-format -msgid "" -"The target member %(member_id)s is already associated with image " -"%(image_id)s." -msgstr "" -"Das Zielmitglied %(member_id)s ist dem Abbild %(image_id)s bereits " -"zugeordnet." - -msgid "" -"The tenant name of the administrative user. If \"use_user_token\" is not in " -"effect, then admin tenant name can be specified." -msgstr "" -"Der Nutzername des Benutzers mit Verwaltungsaufgaben. Wenn \"use_user_token" -"\" nicht wirksam ist, kann der Administratornutzername angegeben werden. " - -msgid "The type of task represented by this content" -msgstr "Der Typ der durch diesen Inhalt dargestellten Task" - -msgid "The unique namespace text." -msgstr "Der eindeutige Text für den Namensbereich. " - -msgid "The user friendly name for the namespace. Used by UI if available." -msgstr "" -"Der benutzerfreundliche Name für den Namensbereich. Wird von der " -"Benutzerschnittstelle verwendet, falls verfügbar. " - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. Error: %(ioe)s" -msgstr "" -"Es ist ein Problem bei %(error_key_name)s %(error_filename)s aufgetreten. " -"Überprüfen Sie dies. Fehler: %(ioe)s" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. OpenSSL error: %(ce)s" -msgstr "" -"Es ist ein Problem bei %(error_key_name)s %(error_filename)s aufgetreten. " -"Überprüfen Sie dies. OpenSSL-Fehler: %(ce)s" - -#, python-format -msgid "" -"There is a problem with your key pair. Please verify that cert " -"%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" -msgstr "" -"Es gibt ein Problem mit Ihrem Schlüsselpaar. Überprüfen Sie, ob das " -"Zertifikat %(cert_file)s und der Schlüssel %(key_file)s zusammengehören. " -"OpenSSL-Fehler %(ce)s" - -msgid "There was an error configuring the client." -msgstr "Fehler bei Konfiguration des Clients." - -msgid "There was an error connecting to a server" -msgstr "Fehler beim Herstellen einer Verbindung zu einem Server." - -msgid "" -"This operation is currently not permitted on Glance Tasks. They are auto " -"deleted after reaching the time based on their expires_at property." -msgstr "" -"Diese Operation ist derzeit bei Glance-Schlagwörtern nicht zulässig. Sie " -"werden bei Erreichen der in der Eigenschaft 'expires_at' festgelegten Zeit " -"automatisch gelöscht." - -msgid "This operation is currently not permitted on Glance images details." -msgstr "Diese Operation ist derzeit bei Glance-Abbilddetails nicht zulässig." - -msgid "" -"Time in hours for which a task lives after, either succeeding or failing" -msgstr "" -"Zeit in Stunden, für die eine Task anschließend aktiv bleibt, entweder bei " -"Erfolg oder bei Fehlschlag" - -msgid "Too few arguments." -msgstr "Zu wenig Argumente" - -msgid "" -"URI cannot contain more than one occurrence of a scheme.If you have " -"specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " -"you need to change it to use the swift+http:// scheme, like so: swift+http://" -"user:pass@authurl.com/v1/container/obj" -msgstr "" -"URI kann nicht mehrere Vorkommen eines Schemas enthalten. Wenn Sie einen URI " -"wie swift://user:pass@http://authurl.com/v1/container/obj angegeben haben, " -"müssen Sie ihn ändern, um das swift+http://-Schema verwenden zu können. " -"Beispiel: swift+http://user:pass@authurl.com/v1/container/obj" - -msgid "URL to access the image file kept in external store" -msgstr "URL für den Zugriff auf Abbilddatei in externem Speicher" - -#, python-format -msgid "" -"Unable to create pid file %(pid)s. Running as non-root?\n" -"Falling back to a temp file, you can stop %(service)s service using:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" -msgstr "" -"PID-Datei %(pid)s kann nicht erstellt werden. Wird nicht als Root " -"ausgeführt?\n" -"Es wird auf eine temporäre Datei zurückgegriffen; Sie können den Dienst " -"%(service)s stoppen mithilfe von:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" - -#, python-format -msgid "Unable to filter by unknown operator '%s'." -msgstr "Filtern mit dem unbekannten Operator '%s' nicht möglich." - -msgid "Unable to filter on a range with a non-numeric value." -msgstr "Filtern in einem Bereich mit nicht numerischem Wert nicht möglich." - -msgid "Unable to filter on a unknown operator." -msgstr "Filtern mit einem unbekannten Operator nicht möglich." - -msgid "Unable to filter using the specified operator." -msgstr "Filtern mit dem angegebenen Operator nicht möglich." - -msgid "Unable to filter using the specified range." -msgstr "Filtern mit dem angegebenen Bereich nicht möglich." - -#, python-format -msgid "Unable to find '%s' in JSON Schema change" -msgstr "'%s' kann in JSON-Schemaänderung nicht gefunden werden" - -#, python-format -msgid "" -"Unable to find `op` in JSON Schema change. It must be one of the following: " -"%(available)s." -msgstr "" -"'op' wurde in JSON-Schemaänderung nicht gefunden. Es muss eine der folgenden " -"Optionen verwendet werden: %(available)s." - -msgid "Unable to increase file descriptor limit. Running as non-root?" -msgstr "" -"Grenzwert für Dateideskriptoren kann nicht erhöht werden. Wird nicht als " -"Root ausgeführt?" - -#, python-format -msgid "" -"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" -"Got: %(e)r" -msgstr "" -"%(app_name)s kann nicht aus Konfigurationsdatei %(conf_file)s geladen " -"werden.\n" -"Abgerufen: %(e)r" - -#, python-format -msgid "Unable to load schema: %(reason)s" -msgstr "Schema kann nicht geladen werden: %(reason)s" - -#, python-format -msgid "Unable to locate paste config file for %s." -msgstr "Konfigurationsdatei zum Einfügen für %s konnte nicht gefunden werden." - -#, python-format -msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" -msgstr "" -"Hochladen von doppelten Abbilddaten für Abbild %(image_id)s nicht möglich: " -"%(error)s" - -msgid "Unauthorized image access" -msgstr "Unauthorisierter Abbildzugriff" - -msgid "Unexpected body type. Expected list/dict." -msgstr "Unerwarteter Hauptteiltyp. Erwartet wurde list/dict." - -#, python-format -msgid "Unexpected response: %s" -msgstr "Unerwartete Antwort: %s" - -#, python-format -msgid "Unknown auth strategy '%s'" -msgstr "Unbekannte Authentifizierungsstrategie '%s'" - -#, python-format -msgid "Unknown command: %s" -msgstr "Unbekanntes Kommando: %s" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Unbekannte Sortierrichtung; muss 'desc' oder 'asc' sein" - -msgid "Unrecognized JSON Schema draft version" -msgstr "Unerkannte JSON-Schemaentwurfsversion" - -msgid "Unrecognized changes-since value" -msgstr "Unerkannter Wert für 'changes-since'" - -#, python-format -msgid "Unsupported sort_dir. Acceptable values: %s" -msgstr "Nicht unterstützter Wert für 'sort_dir'. Zulässige Werte: %s" - -#, python-format -msgid "Unsupported sort_key. Acceptable values: %s" -msgstr "Nicht unterstützter Wert für 'sort_key'. Zulässige Werte: %s" - -msgid "Virtual size of image in bytes" -msgstr "Virtuelle Größe des Abbildes in Byte" - -#, python-format -msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" -msgstr "" -"Es wurde 15 Sekunden auf den Abbruch von PID %(pid)s (%(file)s) gewartet; " -"Abbruch" - -msgid "" -"When running server in SSL mode, you must specify both a cert_file and " -"key_file option value in your configuration file" -msgstr "" -"Wenn der Server im SSL-Modus läuft, müssen Sie sowohl für die 'cert_file'- " -"als auch für die 'key_file'-Option in Ihrer Konfigurationsdatei einen Wert " -"angeben" - -msgid "" -"Whether to pass through the user token when making requests to the registry. " -"To prevent failures with token expiration during big files upload, it is " -"recommended to set this parameter to False.If \"use_user_token\" is not in " -"effect, then admin credentials can be specified." -msgstr "" -"Gibt an, ob das Benutzertoken durchlaufen werden soll, wenn Anforderungen an " -"die Registry gesendet werden. Um Fehler mit dem Ablauf des Tokens beim " -"Hochladen von großen Dateien zu verhindern, wird empfohlen, diesen Parameter " -"auf False festzulegen. Wenn \"use_user_token\" nicht wirksam ist, können " -"Berechtigungsnachweise für den Administrator angegeben werden." - -#, python-format -msgid "Wrong command structure: %s" -msgstr "Falsche Kommandostruktur: %s" - -msgid "You are not authenticated." -msgstr "Sie sind nicht authentifiziert." - -msgid "You are not authorized to complete this action." -msgstr "Sie sind nicht dazu authorisiert, diese Aktion abzuschließen" - -#, python-format -msgid "You are not authorized to lookup image %s." -msgstr "Sie sind nicht berechtigt, Abbild %s zu suchen." - -#, python-format -msgid "You are not authorized to lookup the members of the image %s." -msgstr "Sie sind nicht berechtigt, die Mitglieder des Abbild %s zu suchen." - -#, python-format -msgid "You are not permitted to create a tag in the namespace owned by '%s'" -msgstr "" -"Sie können keine Schlagwörter in Namensbereichen erstellen, die '%s' gehören." - -msgid "You are not permitted to create image members for the image." -msgstr "Sie können keine Abbildelemente für das Abbild erstellen." - -#, python-format -msgid "You are not permitted to create images owned by '%s'." -msgstr "Sie können keine Abbilder erstellen, die '%s' gehören." - -#, python-format -msgid "You are not permitted to create namespace owned by '%s'" -msgstr "Sie können keine Namensbereiche erstellen, die '%s' gehören." - -#, python-format -msgid "You are not permitted to create object owned by '%s'" -msgstr "Sie können keine Objekte erstellen, die '%s' gehören." - -#, python-format -msgid "You are not permitted to create property owned by '%s'" -msgstr "Sie können keine Eigenschaften erstellen, die '%s' gehören." - -#, python-format -msgid "You are not permitted to create resource_type owned by '%s'" -msgstr "Sie können keinen resource_type erstellen, der '%s' gehört." - -#, python-format -msgid "You are not permitted to create this task with owner as: %s" -msgstr "Sie können diese Task nicht mit dem Eigentümer %s erstellen" - -msgid "You are not permitted to deactivate this image." -msgstr "Sie können dieses Abbild nicht deaktivieren. " - -msgid "You are not permitted to delete this image." -msgstr "Sie können dieses Abbild nicht löschen." - -msgid "You are not permitted to delete this meta_resource_type." -msgstr "Sie können diesen meta_resource_type nicht löschen." - -msgid "You are not permitted to delete this namespace." -msgstr "Sie können diesen Namensbereich nicht löschen." - -msgid "You are not permitted to delete this object." -msgstr "Sie können dieses Objekt nicht löschen." - -msgid "You are not permitted to delete this property." -msgstr "Sie können diese Eigenschaft nicht löschen." - -msgid "You are not permitted to delete this tag." -msgstr "Sie können dieses Schlagwort nicht löschen." - -#, python-format -msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." -msgstr "" -"Sie haben keine Berechtigung, um '%(attr)s' für %(resource)s zu ändern." - -#, python-format -msgid "You are not permitted to modify '%s' on this image." -msgstr "Sie können '%s' bei diesem Abbild nicht ändern." - -msgid "You are not permitted to modify locations for this image." -msgstr "Sie können Positionen für dieses Abbild nicht ändern." - -msgid "You are not permitted to modify tags on this image." -msgstr "Sie können Schlagwörter bei diesem Abbild nicht ändern." - -msgid "You are not permitted to modify this image." -msgstr "Sie können dieses Abbild nicht ändern." - -msgid "You are not permitted to reactivate this image." -msgstr "Sie können dieses Abbild nicht erneut aktivieren. " - -msgid "You are not permitted to set status on this task." -msgstr "Sie können den Status für diese Task nicht festlegen. " - -msgid "You are not permitted to update this namespace." -msgstr "Sie können diesen Namensbereich nicht aktualisieren. " - -msgid "You are not permitted to update this object." -msgstr "Sie können dieses Objekt nicht aktualisieren. " - -msgid "You are not permitted to update this property." -msgstr "Sie können diese Eigenschaft nicht aktualisieren. " - -msgid "You are not permitted to update this tag." -msgstr "Sie können dieses Schlagwort nicht aktualisieren." - -msgid "You are not permitted to upload data for this image." -msgstr "Sie können keine Daten für dieses Abbild hochladen." - -#, python-format -msgid "You cannot add image member for %s" -msgstr "Hinzufügen von Abbildelement für %s nicht möglich" - -#, python-format -msgid "You cannot delete image member for %s" -msgstr "Löschen von Abbildelement für %s nicht möglich" - -#, python-format -msgid "You cannot get image member for %s" -msgstr "Abrufen von Abbildelement für %s nicht möglich" - -#, python-format -msgid "You cannot update image member %s" -msgstr "Aktualisieren von Abbildelement %s nicht möglich" - -msgid "You do not own this image" -msgstr "Sie sind nicht Eigner dieses Images" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a cert, " -"however you have failed to supply either a key_file parameter or set the " -"GLANCE_CLIENT_KEY_FILE environ variable" -msgstr "" -"Sie haben sich dafür entschieden, SSL für die Verbindung zu verwenden, und " -"Sie haben ein Zertifikat angegeben. Allerdings haben Sie weder einen " -"key_file-Parameter angegeben noch die GLANCE_CLIENT_KEY_FILE-" -"Umgebungsvariable festgelegt" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a key, " -"however you have failed to supply either a cert_file parameter or set the " -"GLANCE_CLIENT_CERT_FILE environ variable" -msgstr "" -"Sie haben sich dafür entschieden, SSL für die Verbindung zu verwenden, und " -"Sie haben einen Schlüssel angegeben. Allerdings haben Sie weder einen " -"cert_file-Parameter angegeben noch die GLANCE_CLIENT_CERT_FILE-" -"Umgebungsvariable festgelegt" - -msgid "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" -msgstr "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" - -#, python-format -msgid "__init__() got unexpected keyword argument '%s'" -msgstr "__init__() hat unerwartetes Schlüsselwortargument '%s' erhalten" - -#, python-format -msgid "" -"cannot transition from %(current)s to %(next)s in update (wanted from_state=" -"%(from)s)" -msgstr "" -"Übergang von %(current)s zu %(next)s in Aktualisierung nicht möglich " -"(gewünscht ist from_state=%(from)s)" - -#, python-format -msgid "custom properties (%(props)s) conflict with base properties" -msgstr "" -"Benutzerdefinierte Eigenschaften (%(props)s) stehen im Konflikt mit " -"Basiseigenschaften" - -msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" -msgstr "" -"Hub weder für Eventlet 'poll' noch für 'selects' ist auf dieser Plattform " -"verfügbar" - -msgid "is_public must be None, True, or False" -msgstr "'is_public' muss 'None', 'True' oder 'False' sein" - -msgid "limit param must be an integer" -msgstr "'limit'-Parameter muss eine Ganzzahl sein" - -msgid "limit param must be positive" -msgstr "'limit'-Parameter muss positiv sein" - -msgid "md5 hash of image contents." -msgstr "md5-Hashwert von Abbildinhalten. " - -#, python-format -msgid "new_image() got unexpected keywords %s" -msgstr "new_image() hat unerwartete Schlüsselwörter %s erhalten" - -msgid "protected must be True, or False" -msgstr "'protected' muss 'True' oder 'False' sein" - -#, python-format -msgid "unable to launch %(serv)s. Got error: %(e)s" -msgstr "%(serv)s kann nicht gestartet werden. Fehler: %(e)s" - -#, python-format -msgid "x-openstack-request-id is too long, max size %s" -msgstr "x-openstack-request-id ist zu lang. Max. Größe %s" diff --git a/glance/locale/es/LC_MESSAGES/glance.po b/glance/locale/es/LC_MESSAGES/glance.po deleted file mode 100644 index 73b7ce31..00000000 --- a/glance/locale/es/LC_MESSAGES/glance.po +++ /dev/null @@ -1,2135 +0,0 @@ -# Translations template for glance. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the glance project. -# -# Translators: -# Adriana Chisco Landazábal , 2015 -# Alfredo Matas , 2015 -# Marian Tort , 2015 -# Pablo Sanchez , 2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: glance 15.0.0.0b3.dev29\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-06-23 20:54+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 05:20+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language: es\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: Spanish\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "%(cls)s exception was raised in the last rpc call: %(val)s" -msgstr "Ocurrió excepción %(cls)s en la última llamada a rpc: %(val)s" - -#, python-format -msgid "%(m_id)s not found in the member list of the image %(i_id)s." -msgstr "" -"No se ha encontrado %(m_id)s en la lista de miembros de la imagen %(i_id)s." - -#, python-format -msgid "%(serv)s (pid %(pid)s) is running..." -msgstr "Se esta ejecutando %(serv)s (pid %(pid)s) ..." - -#, python-format -msgid "%(serv)s appears to already be running: %(pid)s" -msgstr "Parece que %(serv)s ya se está ejecutando: %(pid)s" - -#, python-format -msgid "" -"%(strategy)s is registered as a module twice. %(module)s is not being used." -msgstr "" -"%(strategy)s está registrado como módulo dos veces. %(module)s no se " -"encuentra en uso." - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Could not load the " -"filesystem store" -msgstr "" -"%(task_id)s de %(task_type)s no se ha configurado correctamente. No se pudo " -"cargar el almacén de sistema de ficheo" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Missing work dir: " -"%(work_dir)s" -msgstr "" -"%(task_id)s de %(task_type)s no se ha configurado adecuadamente. Hace falta " -"work dir: %(work_dir)s" - -#, python-format -msgid "%(verb)sing %(serv)s" -msgstr "%(verb)s ing %(serv)s" - -#, python-format -msgid "%(verb)sing %(serv)s with %(conf)s" -msgstr "%(verb)s ing %(serv)s con %(conf)s" - -#, python-format -msgid "" -"%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " -"address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " -"separately from the port (i.e., \"[fe80::a:b:c]:9876\")." -msgstr "" -"%s Por favor especifique el par host:puerto, en donde el host es una " -"dirección IPv4, IPv6, nombre de host o FQDN. Si utiliza una dirección IPv6 " -"enciérrela entre paréntesis separados del puerto (por ejemplo \"[fe80::a:b:" -"c]:9876\")." - -#, python-format -msgid "%s can't contain 4 byte unicode characters." -msgstr "%s no puede contener caracteres 4 byte unicode." - -#, python-format -msgid "%s is already stopped" -msgstr "%s ya se detuvo" - -#, python-format -msgid "%s is stopped" -msgstr "%s se ha detenido" - -msgid "" -"--os_auth_url option or OS_AUTH_URL environment variable required when " -"keystone authentication strategy is enabled\n" -msgstr "" -"Se necesita la opción --os_auth_url ovariable de ambiente OS_AUTH_URL cuando " -"la estrategia de autenticación keystone está habilitada\n" - -msgid "A body is not expected with this request." -msgstr "No se espera un cuerpo en esta solicitud." - -#, python-format -msgid "" -"A metadata definition object with name=%(object_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Ya existe el objeto para definición de metadatos de nombre=%(object_name)s " -"en espacio de nombre=%(namespace_name)s." - -#, python-format -msgid "" -"A metadata definition property with name=%(property_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Ya existe la propiedad para definición de metadatos de nombre=" -"%(property_name)s en espacio de nombre=%(namespace_name)s." - -#, python-format -msgid "" -"A metadata definition resource-type with name=%(resource_type_name)s already " -"exists." -msgstr "" -"Ya existe el tipo de recurso para definición de metadatos=" -"%(resource_type_name)s" - -msgid "A set of URLs to access the image file kept in external store" -msgstr "" -"Conjunto de URLs para acceder al archivo de imagen se mantiene en un almacén " -"externo" - -msgid "Amount of disk space (in GB) required to boot image." -msgstr "" -"Cantidad de espacio de disco (en GB) necesario para la imagen de arranque." - -msgid "Amount of ram (in MB) required to boot image." -msgstr "Cantidad de RAM (en MB) necesario para la imagen de arranque." - -msgid "An identifier for the image" -msgstr "Un identificador para la imagen" - -msgid "An identifier for the image member (tenantId)" -msgstr "Un identificador para el miembro de la imagen (tenantId)" - -msgid "An identifier for the owner of this task" -msgstr "Un identificador para el propietario de esta tarea" - -msgid "An identifier for the task" -msgstr "Un identificador para la tarea" - -msgid "An image file url" -msgstr "La URL de un archivo de imagen" - -msgid "An image schema url" -msgstr "La URL de un esquema imagen" - -msgid "An image self url" -msgstr "La URL propia de una imagen" - -#, python-format -msgid "An image with identifier %s already exists" -msgstr "Ya existe una imagen con el identificador %s" - -msgid "An import task exception occurred" -msgstr "Se ha producido una excepción en una tarea de importación" - -msgid "An object with the same identifier already exists." -msgstr "Ya existe un objeto con el mismo identificador." - -msgid "An object with the same identifier is currently being operated on." -msgstr "Ya se está operando un objeto con el mismo identificador." - -msgid "An object with the specified identifier was not found." -msgstr "No se ha encontrado un objeto con el identificador especificado." - -msgid "An unknown exception occurred" -msgstr "Se ha producido una excepción desconocida " - -msgid "An unknown task exception occurred" -msgstr "Se ha producido una excepción desconocida " - -#, python-format -msgid "Attempt to upload duplicate image: %s" -msgstr "Se ha intentado subir imagen duplicada: %s" - -msgid "Attempted to update Location field for an image not in queued status." -msgstr "" -"Se ha intentado actualizar el campo de ubicación para una imagen que no está " -"en estado de cola." - -#, python-format -msgid "Attribute '%(property)s' is read-only." -msgstr "El atributo '%(property)s' es de sólo lectura." - -#, python-format -msgid "Attribute '%(property)s' is reserved." -msgstr "El atributo '%(property)s' está reservado." - -#, python-format -msgid "Attribute '%s' is read-only." -msgstr "El atributo '%s' es de solo lectura." - -#, python-format -msgid "Attribute '%s' is reserved." -msgstr "El atributo '%s' está reservado." - -msgid "Attribute container_format can be only replaced for a queued image." -msgstr "" -"El atributo container_format solo se puede reemplazar por una imagen en cola." - -msgid "Attribute disk_format can be only replaced for a queued image." -msgstr "El atributo isk_format solo se puede remplazar con una imagen en cola." - -#, python-format -msgid "Auth service at URL %(url)s not found." -msgstr "No se ha encontrado el servicio de autorización en el URL %(url)s." - -#, python-format -msgid "" -"Authentication error - the token may have expired during file upload. " -"Deleting image data for %s." -msgstr "" -"Error de autenticación: es posible que el token haya caducado durante la " -"carga de archivos. Borrando los datos de imagen de %s." - -msgid "Authorization failed." -msgstr "Ha fallado la autorización." - -msgid "Available categories:" -msgstr "Categorías disponibles:" - -#, python-format -msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." -msgstr "" -"Formato de filtro de consulta \"%s\" incorrecto. Utilice la notación de " -"DateTime de la ISO 8601." - -#, python-format -msgid "Bad Command: %s" -msgstr "Comando incorrecto: %s" - -#, python-format -msgid "Bad header: %(header_name)s" -msgstr "Cabecera incorrecta: %(header_name)s" - -#, python-format -msgid "Bad value passed to filter %(filter)s got %(val)s" -msgstr "Valores incorrectos pasaron al filtro %(filter)s se obtuvo %(val)s" - -#, python-format -msgid "Badly formed S3 URI: %(uri)s" -msgstr "La URI S3 se realizó de manera incorrecta: %(uri)s" - -#, python-format -msgid "Badly formed credentials '%(creds)s' in Swift URI" -msgstr "Credenciales formadas incorrectamente '%(creds)s' en URI de Swift" - -msgid "Badly formed credentials in Swift URI." -msgstr "Credenciales con formato incorrecto en URI de Swift." - -msgid "Body expected in request." -msgstr "Se esperaba un cuerpo en la solicitud." - -msgid "Cannot be a negative value" -msgstr "No puede ser un valor negativo" - -msgid "Cannot be a negative value." -msgstr "No puede ser un valor negativo." - -#, python-format -msgid "Cannot convert image %(key)s '%(value)s' to an integer." -msgstr "No se puede convertir imagen %(key)s '%(value)s' en un entero." - -msgid "Cannot remove last location in the image." -msgstr "No se puede eliminar la última ubicación de la imagen." - -#, python-format -msgid "Cannot save data for image %(image_id)s: %(error)s" -msgstr "No se pueden guardar los datos para la imagen %(image_id)s: %(error)s" - -msgid "Cannot set locations to empty list." -msgstr "No se puede definir ubicaciones como una lista vacía." - -msgid "Cannot upload to an unqueued image" -msgstr "No se puede subir a una imagen en cola" - -#, python-format -msgid "Checksum verification failed. Aborted caching of image '%s'." -msgstr "" -"Se ha encontrado un error en la verificación de la suma de comprobación. Se " -"ha abortado el almacenamiento en memoria caché de la imagen '%s'." - -msgid "Client disconnected before sending all data to backend" -msgstr "El cliente se desconecto antes de enviar todos los datos a backend" - -msgid "Command not found" -msgstr "Comando no encontrado" - -msgid "Configuration option was not valid" -msgstr "La opción de configuración no era válida " - -#, python-format -msgid "Connect error/bad request to Auth service at URL %(url)s." -msgstr "" -"Solicitud incorrecta/error de conexión a servicio de autorización en el URL " -"%(url)s." - -#, python-format -msgid "Constructed URL: %s" -msgstr "URL construido : %s" - -msgid "Container format is not specified." -msgstr "No se especificó el formato de contenedor." - -msgid "Content-Type must be application/octet-stream" -msgstr "El tipo de contenido debe ser aplicación/serie de octetos" - -#, python-format -msgid "Corrupt image download for image %(image_id)s" -msgstr "Descarga de imagen corrupta para imagen %(image_id)s " - -#, python-format -msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" -msgstr "" -"No se ha podido enlazar con %(host)s:%(port)s después de intentarlo durante " -"30 segundos" - -msgid "Could not find OVF file in OVA archive file." -msgstr "No se ha podido encontrar el archivo OVF en el archivo archivador OVA" - -#, python-format -msgid "Could not find metadata object %s" -msgstr "No se pudo encontrar el objeto de metadatos %s" - -#, python-format -msgid "Could not find metadata tag %s" -msgstr "No se pudo encontrar la etiqueta de metadatos %s" - -#, python-format -msgid "Could not find namespace %s" -msgstr "No se ha podido encontrar el espacio de nombre %s" - -#, python-format -msgid "Could not find property %s" -msgstr "No se pudo encontrar propiedad %s" - -msgid "Could not find required configuration option" -msgstr "No se ha podido encontrar la opción de configuración necesaria " - -#, python-format -msgid "Could not find task %s" -msgstr "No se encontró tarea %s" - -#, python-format -msgid "Could not update image: %s" -msgstr "No se ha podido actualizar la imagen: %s" - -msgid "Currently, OVA packages containing multiple disk are not supported." -msgstr "" -"Actualmente no se da soporte a los paquetes OVA que contengan múltiples " -"discos." - -#, python-format -msgid "Data for image_id not found: %s" -msgstr "No se encuentran los datos de image_id: %s" - -msgid "Data supplied was not valid." -msgstr "Los datos proporcionados no son válidos. " - -msgid "Date and time of image member creation" -msgstr "Fecha y hora de creación del miembro de la imagen" - -msgid "Date and time of image registration" -msgstr "Fecha y hora del registro de la imagen" - -msgid "Date and time of last modification of image member" -msgstr "Fecha y hora de la última modificación del miembro de la imagen" - -msgid "Date and time of namespace creation" -msgstr "Fecha y hora de creación del espacio de nombre" - -msgid "Date and time of object creation" -msgstr "Fecha y hora de creación del objeto" - -msgid "Date and time of resource type association" -msgstr "Fecha y hora de asociación del tipo de recurso" - -msgid "Date and time of tag creation" -msgstr "Fecha y hora de creación de la etiqueta" - -msgid "Date and time of the last image modification" -msgstr "Fecha y hora de la última modificación de la imagen" - -msgid "Date and time of the last namespace modification" -msgstr "Fecha y hora de la última modificación de espacio de nombre" - -msgid "Date and time of the last object modification" -msgstr "Fecha y hora de la última modificación del objeto" - -msgid "Date and time of the last resource type association modification" -msgstr "" -"Fecha y hora de la última modificación de la asociación del tipo de recurso" - -msgid "Date and time of the last tag modification" -msgstr "Fecha y hora de la última modificación de la etiqueta" - -msgid "Datetime when this resource was created" -msgstr "Fecha en la cual se creó este recurso" - -msgid "Datetime when this resource was updated" -msgstr "Fecha en la cual se actualizó este recurso" - -msgid "Datetime when this resource would be subject to removal" -msgstr "Fecha en la cual este recurso estará sujeto a eliminación" - -#, python-format -msgid "Denying attempt to upload image because it exceeds the quota: %s" -msgstr "Denegando intento de carga de imagen porque excede la capacidad: %s" - -#, python-format -msgid "Denying attempt to upload image larger than %d bytes." -msgstr "Denegando intento de cargar una imagen mayor que %d bytes." - -msgid "Descriptive name for the image" -msgstr "Nombre descriptivo para la imagen" - -msgid "Disk format is not specified." -msgstr "No se especificó el formato del disco." - -#, python-format -msgid "" -"Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" -msgstr "" -"El controlador %(driver_name)s no se ha podido configurar correctamente. " -"Razón: %(reason)s" - -msgid "" -"Error decoding your request. Either the URL or the request body contained " -"characters that could not be decoded by Glance" -msgstr "" -"Error al descodificar la solicitud. La URL o el cuerpo solicitado contenían " -"caracteres que se han podido descodificar en Glance" - -#, python-format -msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" -msgstr "Error al captar los miembros de la imagen %(image_id)s: %(inner_msg)s" - -msgid "Error in store configuration. Adding images to store is disabled." -msgstr "" -"Error en la configuración del almacén. Se ha inhabilitado la adición de " -"imágenes a almacen." - -msgid "Expected a member in the form: {\"member\": \"image_id\"}" -msgstr "Se eperaba un miembro con el formato: {\"member\": \"image_id\"}" - -msgid "Expected a status in the form: {\"status\": \"status\"}" -msgstr "Se eperaba un estado con el formato: {\"status\": \"status\"}" - -msgid "External source should not be empty" -msgstr "El origen externo no puede estar vacío" - -#, python-format -msgid "External sources are not supported: '%s'" -msgstr "No se soportan fuentes externas: '%s'" - -#, python-format -msgid "Failed to activate image. Got error: %s" -msgstr "Error al activar imagen. Se ha obtenido error: %s" - -#, python-format -msgid "Failed to add image metadata. Got error: %s" -msgstr "Error al agregar metadatos de imagen. Se obtuvo error: %s" - -#, python-format -msgid "Failed to find image %(image_id)s to delete" -msgstr "No se pudo encontrar imagen %(image_id)s para eliminar" - -#, python-format -msgid "Failed to find image to delete: %s" -msgstr "No se ha encontrado la imagen para eliminar: %s" - -#, python-format -msgid "Failed to find image to update: %s" -msgstr "No se encontró imagen para actualizar: %s" - -#, python-format -msgid "Failed to find resource type %(resourcetype)s to delete" -msgstr "No se encontró tipo de recurso %(resourcetype)s para eliminar" - -#, python-format -msgid "Failed to initialize the image cache database. Got error: %s" -msgstr "" -"No se ha podido inicializar la base de datos de memoria caché de imagen. Se " -"ha obtenido error: %s" - -#, python-format -msgid "Failed to read %s from config" -msgstr "No se ha podido leer %s de la configuración" - -#, python-format -msgid "Failed to reserve image. Got error: %s" -msgstr "Error al reservar imagen. Se ha obtenido error: %s" - -#, python-format -msgid "Failed to update image metadata. Got error: %s" -msgstr "" -"No se han podido actualizar metadatos de imagen. Se ha obtenido error: %s" - -#, python-format -msgid "Failed to upload image %s" -msgstr "No se cargó imagen %s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to HTTP error: " -"%(error)s" -msgstr "" -"No se permite cargar datos de imagen para imagen %(image_id)s a causa de un " -"error HTTP: %(error)s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to internal error: " -"%(error)s" -msgstr "" -"Error al cargar datos de imagen para imagen %(image_id)s a causa de un error " -"interno: %(error)s" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"El archivo %(path)s tiene un archivo de respaldo %(bfile)s no válido, " -"terminando de forma anormal." - -msgid "" -"File based imports are not allowed. Please use a non-local source of image " -"data." -msgstr "" -"No se permiten las importaciones basadas en ficheros. Por favor use una " -"fuente no-local de datos de imagen." - -msgid "Forbidden image access" -msgstr "Acceso prohibido a la imagen" - -#, python-format -msgid "Forbidden to delete a %s image." -msgstr "Se prohíbe eliminar una imagen %s." - -#, python-format -msgid "Forbidden to delete image: %s" -msgstr "Está prohibido eliminar imagen: %s" - -#, python-format -msgid "Forbidden to modify '%(key)s' of %(status)s image." -msgstr "Prohibido modificar '%(key)s' de la imagen en estado %(status)s." - -#, python-format -msgid "Forbidden to modify '%s' of image." -msgstr "Prohibido modificar '%s' de la imagen." - -msgid "Forbidden to reserve image." -msgstr "La reserva de imagen está prohibida." - -msgid "Forbidden to update deleted image." -msgstr "La actualización de una imagen suprimida está prohibida." - -#, python-format -msgid "Forbidden to update image: %s" -msgstr "Se prohíbe actualizar imagen: %s" - -#, python-format -msgid "Forbidden upload attempt: %s" -msgstr "Intento de carga prohibido: %s" - -#, python-format -msgid "Forbidding request, metadata definition namespace=%s is not visible." -msgstr "" -"Solicitud no permitida, el espacio de nombre para la definición de metadatos=" -"%s no es visible" - -#, python-format -msgid "Forbidding request, task %s is not visible" -msgstr "Solicitud no permitida, la tarea %s no es visible" - -msgid "Format of the container" -msgstr "Formato del contenedor" - -msgid "Format of the disk" -msgstr "Formato del disco" - -#, python-format -msgid "Host \"%s\" is not valid." -msgstr "Host \"%s\" no es válido." - -#, python-format -msgid "Host and port \"%s\" is not valid." -msgstr "Host y puerto \"%s\" no es válido." - -msgid "" -"Human-readable informative message only included when appropriate (usually " -"on failure)" -msgstr "" -"Solo se incluye mensaje informativo legible para humanos cuando sea " -"apropiado (usualmente en error)" - -msgid "If true, image will not be deletable." -msgstr "Si es true, la imagen no se podrá suprimir." - -msgid "If true, namespace will not be deletable." -msgstr "Si es true, no se podrá eliminar el espacio de nombre." - -#, python-format -msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" -msgstr "No se pudo eliminar imagen %(id)s porque está en uso: %(exc)s" - -#, python-format -msgid "Image %(id)s not found" -msgstr "No se ha encontrado la imagen %(id)s" - -#, python-format -msgid "" -"Image %(image_id)s could not be found after upload. The image may have been " -"deleted during the upload: %(error)s" -msgstr "" -"No se pudo encontrar imagen %(image_id)s después de subirla. Es posible que " -"la imagen haya sido eliminada durante la carga: %(error)s" - -#, python-format -msgid "Image %(image_id)s is protected and cannot be deleted." -msgstr "La imagen %(image_id)s está protegida y no se puede suprimir." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload, cleaning up the chunks uploaded." -msgstr "" -"No se pudo encontrar la imagen %s después de subirla. Es posible que la " -"imagen haya sido eliminada durante la carga, limpiando los fragmentos " -"cargados." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload." -msgstr "" -"No se puede encontrar la imagen %s después de la carga. Es posible que la " -"imagen se haya eliminado durante la carga." - -#, python-format -msgid "Image %s is deactivated" -msgstr "Se ha desactivado la imagen %s" - -#, python-format -msgid "Image %s is not active" -msgstr "La imagen %s no está activa" - -#, python-format -msgid "Image %s not found." -msgstr "No se encontró imagen %s." - -#, python-format -msgid "Image exceeds the storage quota: %s" -msgstr "La imagen excede la capacidad de almacenamiento: %s" - -msgid "Image id is required." -msgstr "Se necesita id de imagen" - -msgid "Image is protected" -msgstr "La imagen está protegida " - -#, python-format -msgid "Image member limit exceeded for image %(id)s: %(e)s:" -msgstr "" -"Se ha excedido el límite de miembro de imagen para imagen %(id)s: %(e)s:" - -#, python-format -msgid "Image name too long: %d" -msgstr "Nombre de imagen demasiado largo: %d" - -msgid "Image operation conflicts" -msgstr "Conflictos de operación de imagen" - -#, python-format -msgid "" -"Image status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "No se permite la transición de estado %(cur_status)s a %(new_status)s" - -#, python-format -msgid "Image storage media is full: %s" -msgstr "El soporte de almacenamiento de imagen está lleno: %s" - -#, python-format -msgid "Image tag limit exceeded for image %(id)s: %(e)s:" -msgstr "" -"Se ha excedido el límite de etiqueta de imagen para imagen %(id)s: %(e)s:" - -#, python-format -msgid "Image upload problem: %s" -msgstr "Problema al cargar la imagen: %s" - -#, python-format -msgid "Image with identifier %s already exists!" -msgstr "¡Ya existe una imagen con el identificador %s!" - -#, python-format -msgid "Image with identifier %s has been deleted." -msgstr "Se ha eliminado imagen identificada como %s." - -#, python-format -msgid "Image with identifier %s not found" -msgstr "No se ha encontrado la imagen con el identificador %s" - -#, python-format -msgid "Image with the given id %(image_id)s was not found" -msgstr "No se ha podido encontrar la imagen con ID %(image_id)s" - -#, python-format -msgid "" -"Incorrect auth strategy, expected \"%(expected)s\" but received " -"\"%(received)s\"" -msgstr "" -"Estrategia de autorización incorrecta, se esperaba \"%(expected)s\" pero se " -"ha recibido \"%(received)s\"" - -#, python-format -msgid "Incorrect request: %s" -msgstr "Solicitud incorrecta: %s" - -#, python-format -msgid "Input does not contain '%(key)s' field" -msgstr "La entrada no contiene el campo '%(key)s'" - -#, python-format -msgid "Insufficient permissions on image storage media: %s" -msgstr "Permisos insuficientes en el soporte de almacenamiento de imagen: %s " - -#, python-format -msgid "Invalid JSON pointer for this resource: '/%s'" -msgstr "Puntero JSON no válido para este recurso: '/%s'" - -#, python-format -msgid "Invalid checksum '%s': can't exceed 32 characters" -msgstr "Suma de verificación '%s': no puede exceder los 32 caracteres" - -msgid "Invalid configuration in glance-swift conf file." -msgstr "Configuración en fichero en glance-swift no válida." - -msgid "Invalid configuration in property protection file." -msgstr "Configuración en fichero de protección de propiedad no válida." - -#, python-format -msgid "Invalid container format '%s' for image." -msgstr "Formato de contenedor '%s' no válido para imagen." - -#, python-format -msgid "Invalid content type %(content_type)s" -msgstr "Tipo de contenido no válido %(content_type)s" - -#, python-format -msgid "Invalid disk format '%s' for image." -msgstr "Formato de disco '%s' no válido para imagen." - -#, python-format -msgid "Invalid filter value %s. The quote is not closed." -msgstr "Valor de filtro no válido %s. No se han cerrado comillas." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma after closing quotation mark." -msgstr "" -"Valor de filtro no válido %s. No hay una coma antes de cerrar comillas." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma before opening quotation mark." -msgstr "Valor de filtro no válido %s. No hay una coma antes de abrir comillas." - -msgid "Invalid image id format" -msgstr "Formato de id de imagen no válido" - -msgid "Invalid location" -msgstr "Ubicación no válida" - -#, python-format -msgid "Invalid location %s" -msgstr "Ubicación %s no válida" - -#, python-format -msgid "Invalid location: %s" -msgstr "Ubicaciones no válidas: %s" - -#, python-format -msgid "" -"Invalid location_strategy option: %(name)s. The valid strategy option(s) " -"is(are): %(strategies)s" -msgstr "" -"Opción location_strategy no válida: %(name)s. La opción(es) válida(s) es/" -"son: %(strategies)s" - -msgid "Invalid locations" -msgstr "Ubicaciones no válidas" - -#, python-format -msgid "Invalid locations: %s" -msgstr "Ubicaciones no válidas: %s" - -msgid "Invalid marker format" -msgstr "Formato de marcador no válido" - -msgid "Invalid marker. Image could not be found." -msgstr "Marcador no válido. No se ha podido encontrar la imagen. " - -#, python-format -msgid "Invalid membership association: %s" -msgstr "Asociación de pertenencia no válida: %s " - -msgid "" -"Invalid mix of disk and container formats. When setting a disk or container " -"format to one of 'aki', 'ari', or 'ami', the container and disk formats must " -"match." -msgstr "" -"Mezcla no válida de formatos de disco y contenedor. Al definir un formato de " -"disco o de contenedor como 'aki', 'ari' o 'ami', los formatos de contenedor " -"y de disco deben coincidir." - -#, python-format -msgid "" -"Invalid operation: `%(op)s`. It must be one of the following: %(available)s." -msgstr "" -"Operación: `%(op)s` no válida. Debe ser una de las siguientes: %(available)s." - -msgid "Invalid position for adding a location." -msgstr "Posición no válida para agregar ubicación." - -msgid "Invalid position for removing a location." -msgstr "Posición no válida para eliminar ubicación." - -msgid "Invalid service catalog json." -msgstr "JSON de catálogo de servicios no válido." - -#, python-format -msgid "Invalid sort direction: %s" -msgstr "Dirección de ordenación no válida : %s" - -#, python-format -msgid "" -"Invalid sort key: %(sort_key)s. It must be one of the following: " -"%(available)s." -msgstr "" -"Clave de ordenación no válida: %(sort_key)s. Debe ser una de las siguientes: " -"%(available)s." - -#, python-format -msgid "Invalid status value: %s" -msgstr "Valor de estado no válido: %s" - -#, python-format -msgid "Invalid status: %s" -msgstr "Estado no válido: %s" - -#, python-format -msgid "Invalid time format for %s." -msgstr "Formato de hora no válido para %s." - -#, python-format -msgid "Invalid type value: %s" -msgstr "Valor de tipo no válido: %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition namespace " -"with the same name of %s" -msgstr "" -"Actualización no válida. Como resultado será un espacio de nombre para la " -"definición de metadatos duplicado con el mismo nombre de %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Actualización no válida. El resultado será un objeto para la definición de " -"metadatos duplicado con el mismo nombre de=%(name)s en el espacio de nombre=" -"%(namespace_name)s." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Actualización no válida. El resultado será un objeto para la definición de " -"metadatos duplicado con el mismo nombre de=%(name)s en el espacio de nombre=" -"%(namespace_name)s." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition property " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Actualización no válida. El resultado será una propiedad para la definición " -"de metadatos duplicada con el mismo nombre de=%(name)s en espacio de nombre=" -"%(namespace_name)s." - -#, python-format -msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" -msgstr "Valor no válido'%(value)s' para parametro '%(param)s': %(extra_msg)s" - -#, python-format -msgid "Invalid value for option %(option)s: %(value)s" -msgstr "Valor no válido para opción %(option)s: %(value)s" - -#, python-format -msgid "Invalid visibility value: %s" -msgstr "Valor de visibilidad no válido : %s" - -msgid "It's invalid to provide multiple image sources." -msgstr "Proporcionar múltiples fuentes para la imagen no es válido." - -msgid "It's not allowed to add locations if locations are invisible." -msgstr "No se permite añadir ubicaciones si son invisibles." - -msgid "It's not allowed to remove locations if locations are invisible." -msgstr "No se permite eliminar ubicaciones si son invisibles." - -msgid "It's not allowed to update locations if locations are invisible." -msgstr "No se permite actualizar las ubicaciones si son invisibles." - -msgid "List of strings related to the image" -msgstr "Lista de series relacionadas con la imagen" - -msgid "Malformed JSON in request body." -msgstr "JSON con formato incorrecto en el cuerpo de la solicitud." - -msgid "Maximal age is count of days since epoch." -msgstr "La edad máxima es el recuento de días desde epoch." - -#, python-format -msgid "Maximum redirects (%(redirects)s) was exceeded." -msgstr "Se ha superado el máximo de redirecciones (%(redirects)s)." - -#, python-format -msgid "Member %(member_id)s is duplicated for image %(image_id)s" -msgstr "Se ha duplicado miembro %(member_id)s para imagen %(image_id)s" - -msgid "Member can't be empty" -msgstr "Miembro no puede estar vacío" - -msgid "Member to be added not specified" -msgstr "No se ha especificado el miembro que añadir" - -msgid "Membership could not be found." -msgstr "La pertenencia no se ha podido encontrar." - -#, python-format -msgid "" -"Metadata definition namespace %(namespace)s is protected and cannot be " -"deleted." -msgstr "" -"El espacio de nombre %(namespace)s de definición de metadatos está " -"protegido y no puede eliminarse." - -#, python-format -msgid "Metadata definition namespace not found for id=%s" -msgstr "" -"No se encontró espacio de nombre para la definición de metadatos para id=%s" - -#, python-format -msgid "" -"Metadata definition object %(object_name)s is protected and cannot be " -"deleted." -msgstr "" -"El objeto %(object_name)s de definición de metadatos está protegido y no " -"puede eliminarse." - -#, python-format -msgid "Metadata definition object not found for id=%s" -msgstr "No se encontró el objeto para la definición de metadatos para id=%s" - -#, python-format -msgid "" -"Metadata definition property %(property_name)s is protected and cannot be " -"deleted." -msgstr "" -"La propiedad %(property_name)s de definición de metadatos está protegida y " -"no puede eliminarse." - -#, python-format -msgid "Metadata definition property not found for id=%s" -msgstr "No se encontró propiedad para la definición de metadatos para id=%s" - -#, python-format -msgid "" -"Metadata definition resource-type %(resource_type_name)s is a seeded-system " -"type and cannot be deleted." -msgstr "" -"El tipo de recurso para la definición de metadatos %(resource_type_name)s es " -"un tipo de sistema seeded y no puede eliminarse." - -#, python-format -msgid "" -"Metadata definition resource-type-association %(resource_type)s is protected " -"and cannot be deleted." -msgstr "" -"La asociación de tipo de recurso %(resource_type)s de definición de " -"metadatos está protegida y no puede eliminarse." - -#, python-format -msgid "" -"Metadata definition tag %(tag_name)s is protected and cannot be deleted." -msgstr "" -"Etiqueta de definición de metadatos %(tag_name)s está protegida y no puede " -"eliminarse." - -#, python-format -msgid "Metadata definition tag not found for id=%s" -msgstr "No se encontró etiqueta para la definición de metadatos para id=%s" - -msgid "Minimal rows limit is 1." -msgstr "El número mínimo de filas es." - -#, python-format -msgid "Missing required credential: %(required)s" -msgstr "Falta la credencial necesaria :%(required)s " - -#, python-format -msgid "" -"Multiple 'image' service matches for region %(region)s. This generally means " -"that a region is required and you have not supplied one." -msgstr "" -"Varias coincidencias de servicio 'image' para la región %(region)s. Esto " -"generalmente significa que es necesaria una región y que no se ha " -"proporcionado ninguna." - -msgid "No authenticated user" -msgstr "Ningún usuario autenticado " - -#, python-format -msgid "No image found with ID %s" -msgstr "No se encontró imagen con ID %s" - -#, python-format -msgid "No location found with ID %(loc)s from image %(img)s" -msgstr "No se encontró ubicación con ID %(loc)s de imagen %(img)s" - -msgid "No permission to share that image" -msgstr "No existe permiso para compartir esa imagen" - -#, python-format -msgid "Not allowed to create members for image %s." -msgstr "No se permite crear miembros para imagen %s." - -#, python-format -msgid "Not allowed to deactivate image in status '%s'" -msgstr "No está permitido eliminar imagen en estado '%s'" - -#, python-format -msgid "Not allowed to delete members for image %s." -msgstr "No se permite eliminar miembros para imagen %s." - -#, python-format -msgid "Not allowed to delete tags for image %s." -msgstr "No se permite eliminar etiquetas para imagen %s." - -#, python-format -msgid "Not allowed to list members for image %s." -msgstr "No se permite listar miembros para imagen %s." - -#, python-format -msgid "Not allowed to reactivate image in status '%s'" -msgstr "No está permitido reactivar imagen en estado'%s'" - -#, python-format -msgid "Not allowed to update members for image %s." -msgstr "No se permite actualizar miembros para imagen %s." - -#, python-format -msgid "Not allowed to update tags for image %s." -msgstr "No se permite actualizar etiquetas para imagen %s." - -#, python-format -msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" -msgstr "" -"No se permite cargar datos de imagen para imagen %(image_id)s: %(error)s" - -msgid "Number of sort dirs does not match the number of sort keys" -msgstr "" -"El número de dirs de ordenación no coincide con el número de claves de " -"ordenación" - -msgid "OVA extract is limited to admin" -msgstr "La extracción de OVA está limitada al administrador" - -msgid "Old and new sorting syntax cannot be combined" -msgstr "No se puede combinar la antigua y nueva sintaxis de ordenación" - -#, python-format -msgid "Operation \"%s\" requires a member named \"value\"." -msgstr "La operación \"%s\" requiere un miembro llamado \"value\"." - -msgid "" -"Operation objects must contain exactly one member named \"add\", \"remove\", " -"or \"replace\"." -msgstr "" -"Los objetos de operación pueden contener exactamente un miembro llamado \"add" -"\", \"remove\" o \"replace\"." - -msgid "" -"Operation objects must contain only one member named \"add\", \"remove\", or " -"\"replace\"." -msgstr "" -"Los objetos de operación solo pueden contener un miembro llamado \"add\", " -"\"remove\" o \"replace\"." - -msgid "Operations must be JSON objects." -msgstr "Las operaciones deben ser objetos JSON." - -#, python-format -msgid "Original locations is not empty: %s" -msgstr "Las ubicaciones originales no están vacías: %s" - -msgid "Owner can't be updated by non admin." -msgstr "Un usuario no admin no puede actualizar al propietario." - -msgid "Owner must be specified to create a tag." -msgstr "Se debe especificar el propietario para crear etiqueta." - -msgid "Owner of the image" -msgstr "Propietario de la imagen" - -msgid "Owner of the namespace." -msgstr "Propietario del espacio de nombre." - -msgid "Param values can't contain 4 byte unicode." -msgstr "Los valores de parámetro no pueden contener 4 byte unicode." - -#, python-format -msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." -msgstr "" -"El puntero `%s` contiene un \"~\" que no forma parte de una secuencia de " -"escape reconocida." - -#, python-format -msgid "Pointer `%s` contains adjacent \"/\"." -msgstr "El puntero `%s` contiene adyacente \"/\"." - -#, python-format -msgid "Pointer `%s` does not contains valid token." -msgstr "El puntero `%s` contiene un token no válido." - -#, python-format -msgid "Pointer `%s` does not start with \"/\"." -msgstr "El puntero `%s` no empieza por \"/\"." - -#, python-format -msgid "Pointer `%s` end with \"/\"." -msgstr "El puntero `%s` termina en \"/\"." - -#, python-format -msgid "Port \"%s\" is not valid." -msgstr "Puerto \"%s\" no es válido." - -#, python-format -msgid "Process %d not running" -msgstr "No se está ejecutando proceso %d" - -#, python-format -msgid "Properties %s must be set prior to saving data." -msgstr "Las propiedades %s deben definirse antes de guardar datos." - -#, python-format -msgid "" -"Property %(property_name)s does not start with the expected resource type " -"association prefix of '%(prefix)s'." -msgstr "" -"La propiedad %(property_name)s no inicia con el prefijo de asociación del " -"tipo de recurso esperado de '%(prefix)s'." - -#, python-format -msgid "Property %s already present." -msgstr "La propiedad %s ya está presente." - -#, python-format -msgid "Property %s does not exist." -msgstr "La propiedad %s no existe." - -#, python-format -msgid "Property %s may not be removed." -msgstr "La propiedad %s no se puede eliminar." - -#, python-format -msgid "Property %s must be set prior to saving data." -msgstr "La propiedad %s debe definirse antes de guardar datos." - -#, python-format -msgid "Property '%s' is protected" -msgstr "Propiedad '%s' está protegida" - -msgid "Property names can't contain 4 byte unicode." -msgstr "Los nombre de propiedad no pueden contener 4 byte unicode." - -#, python-format -msgid "" -"Provided image size must match the stored image size. (provided size: " -"%(ps)d, stored size: %(ss)d)" -msgstr "" -"El tamaño de imagen proporcionado debe coincidir con el tamaño de la imagen " -"almacenada. (tamaño proporcionado: %(ps)d, tamaño almacenado: %(ss)d)" - -#, python-format -msgid "Provided object does not match schema '%(schema)s': %(reason)s" -msgstr "" -"El objeto proporcionado no coincide con el esquema '%(schema)s': %(reason)s" - -#, python-format -msgid "Provided status of task is unsupported: %(status)s" -msgstr "No se soporta el estado de tarea proporcionado: %(status)s" - -#, python-format -msgid "Provided type of task is unsupported: %(type)s" -msgstr "No se soporta el tipo de tarea proporcionado: %(type)s" - -msgid "Provides a user friendly description of the namespace." -msgstr "Proporciona una descripción sencilla del espacio de nombre." - -msgid "Received invalid HTTP redirect." -msgstr "Se ha recibido redirección HTTP no válida. " - -#, python-format -msgid "Redirecting to %(uri)s for authorization." -msgstr "Redirigiendo a %(uri)s para la autorización. " - -#, python-format -msgid "Registry service can't use %s" -msgstr "El servicio de registro no puede usar %s" - -#, python-format -msgid "Registry was not configured correctly on API server. Reason: %(reason)s" -msgstr "" -"El registro no se ha configurado correctamente en el servidor de API. Razón: " -"%(reason)s" - -#, python-format -msgid "Reload of %(serv)s not supported" -msgstr "No se soporta la recarga de %(serv)s" - -#, python-format -msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "Recargando %(serv)s (pid %(pid)s) con señal (%(sig)s)" - -#, python-format -msgid "Removing stale pid file %s" -msgstr "Eliminando fichero de identificación positiva obsoleto %s" - -msgid "Request body must be a JSON array of operation objects." -msgstr "" -"El cuerpo de la solicitud debe ser una matriz JSON de objetos de operación." - -msgid "Request must be a list of commands" -msgstr "La solicitud debe ser una lista de comandos." - -#, python-format -msgid "Required store %s is invalid" -msgstr "El almacén %s solicitado no es válido" - -msgid "" -"Resource type names should be aligned with Heat resource types whenever " -"possible: http://docs.openstack.org/developer/heat/template_guide/openstack." -"html" -msgstr "" -"Los nombres de tipo de recurso beben alinearse con los tipos de recurso Heat " -"en cualquier momento: http://docs.openstack.org/developer/heat/" -"template_guide/openstack.html" - -msgid "Response from Keystone does not contain a Glance endpoint." -msgstr "La respuesta de Keystone no contiene un punto final Glance." - -msgid "Scope of image accessibility" -msgstr "Ámbito de accesibilidad de la imagen" - -msgid "Scope of namespace accessibility." -msgstr "Alcance de accesibilidad del espacio de nombre." - -#, python-format -msgid "Server %(serv)s is stopped" -msgstr "El servidor %(serv)s se ha detenido" - -#, python-format -msgid "Server worker creation failed: %(reason)s." -msgstr "" -"Se ha encontrado un error en la creación del trabajador de servidor: " -"%(reason)s." - -msgid "Signature verification failed" -msgstr "Ha fallado la verificación de firma" - -msgid "Size of image file in bytes" -msgstr "Tamaño del archivo de imagen en bytes" - -msgid "" -"Some resource types allow more than one key / value pair per instance. For " -"example, Cinder allows user and image metadata on volumes. Only the image " -"properties metadata is evaluated by Nova (scheduling or drivers). This " -"property allows a namespace target to remove the ambiguity." -msgstr "" -"Algunos tipos de recurso aceptan más de una clave / par de valor por " -"instancia. Por ejemplo, Cinder permite metadatos de usuario e imagen en " -"volúmenes. Nova solo evalúa los metadatos de propiedades de imagen " -"(planeadores y controladores). Esta propiedad permite un espacio de nombre " -"para eliminar la ambigüedad." - -msgid "Sort direction supplied was not valid." -msgstr "La dirección de ordenación proporcionada no es válida." - -msgid "Sort key supplied was not valid." -msgstr "La clave de clasificación proporcionada no es válida. " - -msgid "" -"Specifies the prefix to use for the given resource type. Any properties in " -"the namespace should be prefixed with this prefix when being applied to the " -"specified resource type. Must include prefix separator (e.g. a colon :)." -msgstr "" -"Especifica el prefijo que se usará para el tipo de recurso dado. Cualquier " -"propiedad en el espacio de nombre deben tener este prefijo cuando se aplica " -"al tipo de recurso especificado. Debe incluir separador de prefijo(por " -"ejemplo un punto :)." - -msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." -msgstr "el estado debe ser \"pending\", \"accepted\" o \"rejected\"." - -msgid "Status not specified" -msgstr "Estado no especificado" - -msgid "Status of the image" -msgstr "Estado de la imaen" - -#, python-format -msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "No se permite la transición de %(cur_status)s a %(new_status)s" - -#, python-format -msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "Deteniendo %(serv)s (pid %(pid)s) con señal (%(sig)s)" - -#, python-format -msgid "Store for image_id not found: %s" -msgstr "No se ha encontrado el almacenamiento para image_id: %s" - -#, python-format -msgid "Store for scheme %s not found" -msgstr "El almacén para el esquema %s no se ha encontrado" - -#, python-format -msgid "" -"Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " -"(%(actual)s) did not match. Setting image status to 'killed'." -msgstr "" -"%(attr)s (%(supplied)s) y %(attr)s proporcionados que se han generado desde " -"la imagen cargada (%(actual)s) no coinciden. Definiendo estado de imagen " -"como 'killed'." - -msgid "Supported values for the 'container_format' image attribute" -msgstr "Valores para el atributo de imagen 'container_format' soportados" - -msgid "Supported values for the 'disk_format' image attribute" -msgstr "Valores para el atributo de imagen 'disk_format' soportados" - -#, python-format -msgid "Suppressed respawn as %(serv)s was %(rsn)s." -msgstr "Se suprimió respawn como %(serv)s era %(rsn)s." - -msgid "System SIGHUP signal received." -msgstr "Se ha recibido señal de sistema SIGHUP." - -#, python-format -msgid "Task '%s' is required" -msgstr "Se necesita tarea '%s'" - -msgid "Task does not exist" -msgstr "La tarea no existe" - -msgid "Task failed due to Internal Error" -msgstr "La tarea ha fallado a causa de un Error Interno" - -msgid "Task was not configured properly" -msgstr "La tarea no se configuró correctamente" - -#, python-format -msgid "Task with the given id %(task_id)s was not found" -msgstr "No se encontró tarea con id %(task_id)s proporcionado" - -msgid "The \"changes-since\" filter is no longer available on v2." -msgstr "El filtro \"changes-since\" ya no está disponible en v2." - -#, python-format -msgid "The CA file you specified %s does not exist" -msgstr "El archivo CA %s que ha especificado no existe" - -#, python-format -msgid "" -"The Image %(image_id)s object being created by this task %(task_id)s, is no " -"longer in valid status for further processing." -msgstr "" -"El objeto de imagen %(image_id)s que la tarea %(task_id)s está creando, ya " -"no tiene un estado válido para un próximo procesamiento. " - -msgid "The Store URI was malformed." -msgstr "El URI del almacén tenía un formato incorrecto." - -msgid "" -"The URL to the keystone service. If \"use_user_token\" is not in effect and " -"using keystone auth, then URL of keystone can be specified." -msgstr "" -"La URL al servicio de keystone. Si \"use_user_token\" no tiene efecto y usa " -"keystone auth, entonces se puede especificar la URL de keystone." - -msgid "" -"The administrators password. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"La contraseña de los administradores. Si \"use_user_token\" no tiene " -"efecto, entonces se puede especificar las credenciales del administrador." - -msgid "" -"The administrators user name. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"El nombre de usuario del administrador. Si \"use_user_token\" no tiene " -"efecto, entonces se pueden especificar las credenciales del administrador." - -#, python-format -msgid "The cert file you specified %s does not exist" -msgstr "El archivo de certificado que ha especificado %s no existe" - -msgid "The current status of this task" -msgstr "El estado actual de esta tarea" - -#, python-format -msgid "" -"The device housing the image cache directory %(image_cache_dir)s does not " -"support xattr. It is likely you need to edit your fstab and add the " -"user_xattr option to the appropriate line for the device housing the cache " -"directory." -msgstr "" -"El dispositivo que aloja el directorio de caché de imágenes " -"%(image_cache_dir)s no soporta xattr. Es probable que tenga que editar fstab " -"y añadir la poción user_xattr en la línea adecuada para que el dispositivo " -"aloje el directorio de caché." - -#, python-format -msgid "" -"The given uri is not valid. Please specify a valid uri from the following " -"list of supported uri %(supported)s" -msgstr "" -"El uri proporcionado no es válido. Por favor especifique un uri válido de la " -"siguiente lista de uri soportados %(supported)s" - -#, python-format -msgid "The incoming image is too large: %s" -msgstr "La imagen de entrada es demasiado grande: %s" - -#, python-format -msgid "The key file you specified %s does not exist" -msgstr "El archivo de claves que ha especificado %s no existe" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image locations. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Se ha excedido el límite en el número permitido para ubicaciones de imagen. " -"Intento: %(attempted)s, Máximo: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image members for this " -"image. Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Se ha excedido el límite en el número de miembros de imagen para esta " -"imagen. Intentos: %(attempted)s, Máximo: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Se ha excedido el límite en el número permitido para propiedades de imagen. " -"Intento: %(attempted)s, Máximo: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(num)s, Maximum: %(quota)s" -msgstr "" -"Se ha excedido el límite en el número de propiedades de imagen permitidas. " -"Intentos: %(num)s, Máximo: %(quota)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image tags. Attempted: " -"%(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Se ha excedido el límite en el número permitido para etiquetas de imagen. " -"Intento: %(attempted)s, Máximo: %(maximum)s" - -#, python-format -msgid "The location %(location)s already exists" -msgstr "Ya existe la ubicación %(location)s" - -#, python-format -msgid "The location data has an invalid ID: %d" -msgstr "Los datos de ubicación contienen un ID no válido: %d" - -#, python-format -msgid "" -"The metadata definition %(record_type)s with name=%(record_name)s not " -"deleted. Other records still refer to it." -msgstr "" -"No se borró la definición de metadatos%(record_type)s de nombre=" -"%(record_name)s- Otros archivos aún se refieren a ésta." - -#, python-format -msgid "The metadata definition namespace=%(namespace_name)s already exists." -msgstr "" -"Ya existe el espacio de nombre para definición de metadatos=" -"%(namespace_name)s" - -#, python-format -msgid "" -"The metadata definition object with name=%(object_name)s was not found in " -"namespace=%(namespace_name)s." -msgstr "" -"No se encontró el objeto para definición de metadatos de nombre=" -"%(object_name)s en espacio de nombre=%(namespace_name)s." - -#, python-format -msgid "" -"The metadata definition property with name=%(property_name)s was not found " -"in namespace=%(namespace_name)s." -msgstr "" -"No se encontró la propiedad para definición de metadatos de nombre=" -"%(property_name)s en espacio de nombre=%(namespace_name)s." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s already exists." -msgstr "" -"Ya existe la asociación de tipo de recurso del tipo de recurso=" -"%(resource_type_name)s para el espacio de nombre=%(namespace_name)s." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s, was not found." -msgstr "" -"No se encontró la asociación de tipo de recurso del tipo de recurso para " -"definición de metadatos=%(resource_type_name)s para el espacio de nombre=" -"%(namespace_name)s." - -#, python-format -msgid "" -"The metadata definition resource-type with name=%(resource_type_name)s, was " -"not found." -msgstr "" -"No se encontró el tipo de recurso para definición de metadatos de nombre=" -"%(resource_type_name)s" - -#, python-format -msgid "" -"The metadata definition tag with name=%(name)s was not found in namespace=" -"%(namespace_name)s." -msgstr "" -"No se encontró la etiqueta para definición de metadatos de nombre=%(name)s " -"en el espacio de nombre=%(namespace_name)s." - -msgid "The parameters required by task, JSON blob" -msgstr "Los parámetros requeridos por tarea, objeto JSON" - -msgid "The provided image is too large." -msgstr "La imagen proporcionada es demasiado grande." - -msgid "" -"The region for the authentication service. If \"use_user_token\" is not in " -"effect and using keystone auth, then region name can be specified." -msgstr "" -"La región para el servicio de autenticación. Si \"use_user_token\" no tiene " -"efecto y utiliza keystone auth, entonces se puede especificar el nombre de " -"la región." - -msgid "The request returned 500 Internal Server Error." -msgstr "La solicitud ha devuelto el mensaje 500 Error interno del servidor." - -msgid "" -"The request returned 503 Service Unavailable. This generally occurs on " -"service overload or other transient outage." -msgstr "" -"La solicitud ha devuelto un error 503 Servicio no disponible. Esto sucede " -"generalmente por una sobrecarga del servicio o una interrupción transitoria." - -#, python-format -msgid "" -"The request returned a 302 Multiple Choices. This generally means that you " -"have not included a version indicator in a request URI.\n" -"\n" -"The body of response returned:\n" -"%(body)s" -msgstr "" -"La solicitud ha devuelto un 302 Múltiples opciones. Generalmente esto " -"significa que no se ha incluido un indicador de versión en un URI de " -"solicitud.\n" -"\n" -"El cuerpo de la respuesta devuelta:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned a 413 Request Entity Too Large. This generally means " -"that rate limiting or a quota threshold was breached.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"La solicitud ha devuelto un error 413 Entidad de solicitud demasiado grande. " -"Esto generalmente significa que se ha infringido el límite de índice o un " -"umbral de cuota.\n" -"\n" -"El cuerpo de la respuesta:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned an unexpected status: %(status)s.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"La solicitud ha devuelto un estado inesperado: %(status)s.\n" -"\n" -"El cuerpo de la respuesta:\n" -"%(body)s" - -msgid "" -"The requested image has been deactivated. Image data download is forbidden." -msgstr "" -"Se ha desactivado la imagen solicitada. Se prohíbe la descarga de datos de " -"imagen." - -msgid "The result of current task, JSON blob" -msgstr "El resultado de la tarea, objeto JSON actual" - -#, python-format -msgid "" -"The size of the data %(image_size)s will exceed the limit. %(remaining)s " -"bytes remaining." -msgstr "" -"El tamaño de los datos %(image_size)s excederá el límite. Quedan " -"%(remaining)s bytes" - -#, python-format -msgid "The specified member %s could not be found" -msgstr "No se pudo encontrar el miembro %s especificado" - -#, python-format -msgid "The specified metadata object %s could not be found" -msgstr "No se pudo encontrar el objeto de metadatos %s especificado" - -#, python-format -msgid "The specified metadata tag %s could not be found" -msgstr "No se pudo encontrar la etiqueta de metadatos %s especificada" - -#, python-format -msgid "The specified namespace %s could not be found" -msgstr "No se ha podido encontrar el espacio de nombre %s especificado" - -#, python-format -msgid "The specified property %s could not be found" -msgstr "No se pudo encontrar la propiedad %s especificada" - -#, python-format -msgid "The specified resource type %s could not be found " -msgstr "No se pudo encontrar el tipo de recurso %s especificado" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'" -msgstr "" -"El estado de la ubicación de la imagen eliminada solo se puede establecer " -"como 'pending_delete' o 'deleted'." - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'." -msgstr "" -"El estado de la ubicación de imagen eliminada solo se puede establecer como " -"'pending_delete' o 'deleted'." - -msgid "The status of this image member" -msgstr "El estado de este miembro de la imagen" - -msgid "" -"The strategy to use for authentication. If \"use_user_token\" is not in " -"effect, then auth strategy can be specified." -msgstr "" -"La estrategia a usar para la autenticación. SI \"use_user_token\" no tiene " -"efecto, entonces, se puede especificar la estrategia auth." - -#, python-format -msgid "" -"The target member %(member_id)s is already associated with image " -"%(image_id)s." -msgstr "" -"El miembro meta %(member_id)s ya está asociado con la imagen %(image_id)s." - -msgid "" -"The tenant name of the administrative user. If \"use_user_token\" is not in " -"effect, then admin tenant name can be specified." -msgstr "" -"El nombre global del usuario de administrador. Si \"use_user_token\" no " -"tiene efecto, entonces se puede especificar el nombre global del " -"administrador." - -msgid "The type of task represented by this content" -msgstr "El tipo de tarea representada por este contenido" - -msgid "The unique namespace text." -msgstr "EL único texto de espacio de nombre." - -msgid "The user friendly name for the namespace. Used by UI if available." -msgstr "" -"El nombre fácil de usar para el espacio de nombre. Utilizado por UI si está " -"disponible." - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. Error: %(ioe)s" -msgstr "" -"Hay un problema con %(error_key_name)s %(error_filename)s. Por favor " -"verifique. Error: %(ioe)s" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. OpenSSL error: %(ce)s" -msgstr "" -"Hay un problema con %(error_key_name)s %(error_filename)s. Por favor " -"verifique. Error OpenSSL: %(ce)s" - -#, python-format -msgid "" -"There is a problem with your key pair. Please verify that cert " -"%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" -msgstr "" -"Hay un problema con el par de claves. Por favor verifique que el certificado " -"%(cert_file)s y clave %(key_file)s deben estar juntas. Error OpenSSL %(ce)s" - -msgid "There was an error configuring the client." -msgstr "Se ha producido un error al configurar el cliente. " - -msgid "There was an error connecting to a server" -msgstr "Se ha producido un error al conectar a un servidor " - -msgid "" -"This operation is currently not permitted on Glance Tasks. They are auto " -"deleted after reaching the time based on their expires_at property." -msgstr "" -"Actualmente no se permite esta operación en las tareas Glance. Se eliminarán " -"automáticamente después de alcanzar el tiempo con base en expires_at " -"property." - -msgid "This operation is currently not permitted on Glance images details." -msgstr "" -"Actualmente no se permite la operación en los detalles de imagen de Glance." - -msgid "" -"Time in hours for which a task lives after, either succeeding or failing" -msgstr "Tiempo de vida en horas para la tarea, así tenga éxito o fracase" - -msgid "Too few arguments." -msgstr "Muy pocos argumentos." - -msgid "" -"URI cannot contain more than one occurrence of a scheme.If you have " -"specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " -"you need to change it to use the swift+http:// scheme, like so: swift+http://" -"user:pass@authurl.com/v1/container/obj" -msgstr "" -"El URI no puede contener más de una aparición de un esquema. Si ha " -"especificado un URI como swift://user:pass@http://authurl.com/v1/container/" -"obj, tiene que cambiarlo para que utilice el esquema swift+http://, como: " -"swift+http://user:pass@authurl.com/v1/container/obj" - -msgid "URL to access the image file kept in external store" -msgstr "" -"La URL para acceder al archivo de imagen se encuentra en un almacén externo" - -#, python-format -msgid "" -"Unable to create pid file %(pid)s. Running as non-root?\n" -"Falling back to a temp file, you can stop %(service)s service using:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" -msgstr "" -"No se puede crear fichero pid %(pid)s. ¿Ejecutar como non-root?\n" -"Retrocediendo a fichero temporal, puede detener el uso de servicio " -"%(service)s:\n" -" %(file)s %(server)s detener--fichero-pid %(fb)s" - -#, python-format -msgid "Unable to filter by unknown operator '%s'." -msgstr "No se puede filtrar con el operador desconocido '%s'." - -msgid "Unable to filter on a range with a non-numeric value." -msgstr "No se ha podido filtrar en un rango con un valor no numérico." - -msgid "Unable to filter on a unknown operator." -msgstr "No se puede filtrar con un operador desconocido." - -msgid "Unable to filter using the specified operator." -msgstr "No se ha podido filtrar utilizando el operador especificado." - -msgid "Unable to filter using the specified range." -msgstr "No se ha podido filtrar mediante el rango especificado." - -#, python-format -msgid "Unable to find '%s' in JSON Schema change" -msgstr "No se ha podido encontrar '%s' en el cambio del esquema JSON" - -#, python-format -msgid "" -"Unable to find `op` in JSON Schema change. It must be one of the following: " -"%(available)s." -msgstr "" -"No es posible encontrar `op` en cambio de JSON Schema. Debe ser uno de los " -"siguientes: %(available)s. " - -msgid "Unable to increase file descriptor limit. Running as non-root?" -msgstr "" -"No se puede aumentar el límite de descripción de fichero ¿Desea ejecutar " -"como non-root?" - -#, python-format -msgid "" -"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" -"Got: %(e)r" -msgstr "" -"No se ha podido cargar %(app_name)s desde el archivo de configuración " -"%(conf_file)s.\n" -"Se ha obtenido: %(e)r" - -#, python-format -msgid "Unable to load schema: %(reason)s" -msgstr "No se ha podido cargar el esquema: %(reason)s" - -#, python-format -msgid "Unable to locate paste config file for %s." -msgstr "No se puede ubicar el fichero de configuración de pegado para %s." - -#, python-format -msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" -msgstr "No se puede cargar datos de imagen duplicada %(image_id)s: %(error)s" - -msgid "Unauthorized image access" -msgstr "Acceso a imagen no autorizado" - -msgid "Unexpected body type. Expected list/dict." -msgstr "Tipo de cuerpo inesperado. Se esperaba list/dict." - -#, python-format -msgid "Unexpected response: %s" -msgstr "Respuesta inesperada : %s " - -#, python-format -msgid "Unknown auth strategy '%s'" -msgstr "Estrategia de autenticación desconocida '%s' " - -#, python-format -msgid "Unknown command: %s" -msgstr "Comando desconocido %s" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Dirección de clasificación desconocida, debe ser 'desc' o ' asc'" - -msgid "Unrecognized JSON Schema draft version" -msgstr "Versión de borrador de esquema JSON no reconocida" - -msgid "Unrecognized changes-since value" -msgstr "Valor de changes-since no reconocido" - -#, python-format -msgid "Unsupported sort_dir. Acceptable values: %s" -msgstr "sort_dir no soportado. Valores aceptables: %s" - -#, python-format -msgid "Unsupported sort_key. Acceptable values: %s" -msgstr "sort_key no soportado. Valores aceptables: %s" - -msgid "Virtual size of image in bytes" -msgstr "Tamaño virtual de la imagen en bytes" - -#, python-format -msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" -msgstr "" -"Se esperó 15 segundos para que pid %(pid)s (%(file)s) muriera; desistiendo" - -msgid "" -"When running server in SSL mode, you must specify both a cert_file and " -"key_file option value in your configuration file" -msgstr "" -"Al ejecutar el servidor en modalidad SSL, debe especificar un valor para las " -"opciones cert_file y key_file en el archivo de configuración" - -msgid "" -"Whether to pass through the user token when making requests to the registry. " -"To prevent failures with token expiration during big files upload, it is " -"recommended to set this parameter to False.If \"use_user_token\" is not in " -"effect, then admin credentials can be specified." -msgstr "" -"Si se debe o no pasar a través del token del usuario cuando se hacen " -"solicitudes al registro. Para prevenir fallas con la expiración del token " -"durante la carga de ficheros grandes, se recomienda configurar este " -"parámetro en False. Si \"use_user_token\" no tiene efecto, entonces se " -"pueden especificar credenciales de administración." - -#, python-format -msgid "Wrong command structure: %s" -msgstr "Estructura de comando incorrecta: %s" - -msgid "You are not authenticated." -msgstr "No está autenticado." - -msgid "You are not authorized to complete this action." -msgstr "No está autorizado a completar esta acción." - -#, python-format -msgid "You are not authorized to lookup image %s." -msgstr "No tiene autorización para buscar la imagen %s." - -#, python-format -msgid "You are not authorized to lookup the members of the image %s." -msgstr "No tiene autorización para buscar los miembros de la imagen %s." - -#, python-format -msgid "You are not permitted to create a tag in the namespace owned by '%s'" -msgstr "" -"No tiene permiso para crear etiqueta en el espacio de nombre propiedad de " -"'%s'" - -msgid "You are not permitted to create image members for the image." -msgstr "No tiene permiso para crear miembros de imagen para la imagen." - -#, python-format -msgid "You are not permitted to create images owned by '%s'." -msgstr "No tiene permiso para crear imágenes propiedad de '%s'." - -#, python-format -msgid "You are not permitted to create namespace owned by '%s'" -msgstr "No tiene permiso para crear espacio de nombre propiedad de '%s'" - -#, python-format -msgid "You are not permitted to create object owned by '%s'" -msgstr "No tiene permiso para crear objeto propiedad de '%s'" - -#, python-format -msgid "You are not permitted to create property owned by '%s'" -msgstr "No tiene permiso para crear propiedad perteneciente a'%s'" - -#, python-format -msgid "You are not permitted to create resource_type owned by '%s'" -msgstr "No tiene permiso para crear resource_type propiedad de '%s'" - -#, python-format -msgid "You are not permitted to create this task with owner as: %s" -msgstr "No tiene permiso para crear esta tarea como propiedad de: '%s" - -msgid "You are not permitted to deactivate this image." -msgstr "No tiene permiso para deactivar esta imagen." - -msgid "You are not permitted to delete this image." -msgstr "No tiene permiso para suprimir esta imagen." - -msgid "You are not permitted to delete this meta_resource_type." -msgstr "No tiene permiso para eliminar este meta_resource_type." - -msgid "You are not permitted to delete this namespace." -msgstr "No tiene permiso para eliminar este espacio de nombre." - -msgid "You are not permitted to delete this object." -msgstr "No tiene permiso para eliminar este objeto." - -msgid "You are not permitted to delete this property." -msgstr "No tiene permiso para eliminar esta propiedad." - -msgid "You are not permitted to delete this tag." -msgstr "No tiene permiso para eliminar esta etiqueta." - -#, python-format -msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." -msgstr "No tiene permiso para modificar '%(attr)s' en este %(resource)s." - -#, python-format -msgid "You are not permitted to modify '%s' on this image." -msgstr "No tiene permiso para modificar '%s' en esta imagen." - -msgid "You are not permitted to modify locations for this image." -msgstr "No tiene permiso para modificar ubicaciones para esta imagen." - -msgid "You are not permitted to modify tags on this image." -msgstr "No tiene permiso para modificar etiquetas en esta imagen." - -msgid "You are not permitted to modify this image." -msgstr "No tiene permiso para modificar esta imagen." - -msgid "You are not permitted to reactivate this image." -msgstr "No tiene permiso para reactivar esta imagen." - -msgid "You are not permitted to set status on this task." -msgstr "No tiene permiso para configurar estado en esta tarea." - -msgid "You are not permitted to update this namespace." -msgstr "No tiene permiso para actualizar este espacio de nombre." - -msgid "You are not permitted to update this object." -msgstr "No tiene permiso para actualizar este objeto." - -msgid "You are not permitted to update this property." -msgstr "No tiene permiso para actualizar esta propiedad." - -msgid "You are not permitted to update this tag." -msgstr "No tiene permiso para actualizar esta etiqueta." - -msgid "You are not permitted to upload data for this image." -msgstr "No tiene permiso para cargar datos para esta imagen." - -#, python-format -msgid "You cannot add image member for %s" -msgstr "No se puede añadir el miembro de la imagen para %s" - -#, python-format -msgid "You cannot delete image member for %s" -msgstr "No se puede suprimir el miembro de la imagen para %s" - -#, python-format -msgid "You cannot get image member for %s" -msgstr "No se puede obtener el miembro de la imagen para %s" - -#, python-format -msgid "You cannot update image member %s" -msgstr "No se puede actualizar el miembro de la imagen %s" - -msgid "You do not own this image" -msgstr "No es propietario de esta imagen " - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a cert, " -"however you have failed to supply either a key_file parameter or set the " -"GLANCE_CLIENT_KEY_FILE environ variable" -msgstr "" -"Ha seleccionado utilizar SSL en la conexión y ha proporcionado un " -"certificado, pero no ha proporcionado un parámetro key_file ni ha definido " -"la variable de entorno GLANCE_CLIENT_KEY_FILE" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a key, " -"however you have failed to supply either a cert_file parameter or set the " -"GLANCE_CLIENT_CERT_FILE environ variable" -msgstr "" -"Ha seleccionado utilizar SSL en la conexión y ha proporcionado una clave, " -"pero no ha proporcionado un parámetro cert_file ni ha definido la variable " -"de entorno GLANCE_CLIENT_CERT_FILE" - -msgid "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" -msgstr "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" - -#, python-format -msgid "__init__() got unexpected keyword argument '%s'" -msgstr "__init__() obtuvo un argumento de búsqueda inesperado '%s'" - -#, python-format -msgid "" -"cannot transition from %(current)s to %(next)s in update (wanted from_state=" -"%(from)s)" -msgstr "" -"No se puede pasar de %(current)s a %(next)s en la actualización (se desea " -"from_state=%(from)s)" - -#, python-format -msgid "custom properties (%(props)s) conflict with base properties" -msgstr "" -"las propiedades personalizadas (%(props)s) están en conflicto con las " -"propiedades base" - -msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" -msgstr "" -"Los concentradores de 'sondeo' y los de 'selección' no están disponibles en " -"esta plataforma" - -msgid "is_public must be None, True, or False" -msgstr "is_public debe ser None, True o False" - -msgid "limit param must be an integer" -msgstr "el parámetro de límite debe ser un entero" - -msgid "limit param must be positive" -msgstr "el parámetro de límite debe ser positivo" - -msgid "md5 hash of image contents." -msgstr "md5 hash de contenidos de imagen." - -#, python-format -msgid "new_image() got unexpected keywords %s" -msgstr "new_image() obtuvo argumentos de búsqueda inesperados %s" - -msgid "protected must be True, or False" -msgstr "protected debe ser True o False" - -#, python-format -msgid "unable to launch %(serv)s. Got error: %(e)s" -msgstr "No se puede iniciar %(serv)s. Se ha obtenido error: %(e)s" - -#, python-format -msgid "x-openstack-request-id is too long, max size %s" -msgstr "x-openstack-request-id es demasiado largo, el tamaño máximo es %s" diff --git a/glance/locale/fr/LC_MESSAGES/glance.po b/glance/locale/fr/LC_MESSAGES/glance.po deleted file mode 100644 index 85c091fc..00000000 --- a/glance/locale/fr/LC_MESSAGES/glance.po +++ /dev/null @@ -1,2170 +0,0 @@ -# Translations template for glance. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the glance project. -# -# Translators: -# Arnaud Legendre , 2013 -# Christophe kryskool , 2013 -# EVEILLARD , 2013-2014 -# Maxime COQUEREL , 2014 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: glance 15.0.0.0b3.dev29\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-06-23 20:54+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 05:20+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language: fr\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: French\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "%(cls)s exception was raised in the last rpc call: %(val)s" -msgstr "" -"Une exception %(cls)s s'est produite dans le dernier appel d'une procédure " -"distante : %(val)s" - -#, python-format -msgid "%(m_id)s not found in the member list of the image %(i_id)s." -msgstr "%(m_id)s introuvable dans la liste des membres de l'image %(i_id)s." - -#, python-format -msgid "%(serv)s (pid %(pid)s) is running..." -msgstr "%(serv)s (pid %(pid)s) est en cours d'exécution..." - -#, python-format -msgid "%(serv)s appears to already be running: %(pid)s" -msgstr "%(serv)s semble déjà en cours d'exécution : %(pid)s" - -#, python-format -msgid "" -"%(strategy)s is registered as a module twice. %(module)s is not being used." -msgstr "" -"%(strategy)s est enregistré deux fois comme module. %(module)s n'est pas " -"utilisé." - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Could not load the " -"filesystem store" -msgstr "" -"%(task_id)s de %(task_type)s ne sont pas configurés correctement. Impossible " -"de charger le magasin de système de fichiers" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Missing work dir: " -"%(work_dir)s" -msgstr "" -"%(task_id)s de %(task_type)s ne sont pas configurés correctement. Rép de " -"travail manquant : %(work_dir)s" - -#, python-format -msgid "%(verb)sing %(serv)s" -msgstr "%(verb)s %(serv)s" - -#, python-format -msgid "%(verb)sing %(serv)s with %(conf)s" -msgstr "Opération %(verb)s en cours sur %(serv)s avec %(conf)s" - -#, python-format -msgid "" -"%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " -"address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " -"separately from the port (i.e., \"[fe80::a:b:c]:9876\")." -msgstr "" -"%s Veuillez indiquer une paire hôte:port, dans laquelle hôte est une adresse " -"IPv4, une adresse IPv6, un nom d'hôte ou un nom de domaine complet. Si vous " -"utilisez une adresse IPv6, faites-la figurer entre crochets de façon à la " -"séparer du port (par ex., \"[fe80::a:b:c]:9876\")." - -#, python-format -msgid "%s can't contain 4 byte unicode characters." -msgstr "%s ne peut pas contenir de caractère Unicode de 4 octets." - -#, python-format -msgid "%s is already stopped" -msgstr "%s est déjà stoppé" - -#, python-format -msgid "%s is stopped" -msgstr "%s est arrêté" - -msgid "" -"--os_auth_url option or OS_AUTH_URL environment variable required when " -"keystone authentication strategy is enabled\n" -msgstr "" -"Option --os_auth_url ou variable d'environnement OS_AUTH_URL requise lorsque " -"la stratégie d'authentification keystone est activée\n" - -msgid "A body is not expected with this request." -msgstr "Un corps n'est pas attendu avec cette demande." - -#, python-format -msgid "" -"A metadata definition object with name=%(object_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Un objet de la définition de métadonnées avec le nom %(object_name)s existe " -"déjà dans l'espace de nom %(namespace_name)s." - -#, python-format -msgid "" -"A metadata definition property with name=%(property_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Une propriété de la définition de métadonnées avec le nom %(property_name)s " -"existe déjà dans l'espace de nom %(namespace_name)s." - -#, python-format -msgid "" -"A metadata definition resource-type with name=%(resource_type_name)s already " -"exists." -msgstr "" -"Un type de ressource de la définition de métadonnées avec le nom " -"%(resource_type_name)s existe déjà." - -msgid "A set of URLs to access the image file kept in external store" -msgstr "" -"Ensemble d'URL pour accéder au fichier image conservé dans le magasin externe" - -msgid "Amount of disk space (in GB) required to boot image." -msgstr "" -"Quantité d'espace disque (en Go) requise pour l'image d'initialisation." - -msgid "Amount of ram (in MB) required to boot image." -msgstr "Quantité de mémoire RAM (en Mo) requise pour l'image d'initialisation." - -msgid "An identifier for the image" -msgstr "Identificateur de l'image" - -msgid "An identifier for the image member (tenantId)" -msgstr "Identificateur pour le membre de l'image (tenantId)" - -msgid "An identifier for the owner of this task" -msgstr "Un identificateur pour le propriétaire de cette tâche" - -msgid "An identifier for the task" -msgstr "Un identificateur pour la tâche" - -msgid "An image file url" -msgstr "URL d'un fichier image" - -msgid "An image schema url" -msgstr "URL d'un schéma d'image" - -msgid "An image self url" -msgstr "URL d'une image self" - -#, python-format -msgid "An image with identifier %s already exists" -msgstr "Une image avec l'identificateur %s existe déjà" - -msgid "An import task exception occurred" -msgstr "Une exception liée à la tâche d'importation s'est produite" - -msgid "An object with the same identifier already exists." -msgstr "Un objet avec le même identificateur existe déjà." - -msgid "An object with the same identifier is currently being operated on." -msgstr "Un objet avec le même identificateur est déjà en cours d'utilisation." - -msgid "An object with the specified identifier was not found." -msgstr "Un objet avec l'identificateur spécifié est introuvable." - -msgid "An unknown exception occurred" -msgstr "Une exception inconnue s'est produite" - -msgid "An unknown task exception occurred" -msgstr "Une exception de tâche inconnue s'est produite" - -#, python-format -msgid "Attempt to upload duplicate image: %s" -msgstr "Tentative de téléchargement d'image en double : %s" - -msgid "Attempted to update Location field for an image not in queued status." -msgstr "" -"Vous avez tenté de mettre à jour la zone Emplacement pour une image qui n'a " -"pas le statut en file d'attente." - -#, python-format -msgid "Attribute '%(property)s' is read-only." -msgstr "L'attribut '%(property)s' est en lecture seule." - -#, python-format -msgid "Attribute '%(property)s' is reserved." -msgstr "L'attribut '%(property)s' est réservé." - -#, python-format -msgid "Attribute '%s' is read-only." -msgstr "L'attribut '%s' est en lecture seule." - -#, python-format -msgid "Attribute '%s' is reserved." -msgstr "Attribut '%s' est réservé." - -msgid "Attribute container_format can be only replaced for a queued image." -msgstr "" -"L'attribut container_format ne peut être remplacé que pour une image mise en " -"file d'attente." - -msgid "Attribute disk_format can be only replaced for a queued image." -msgstr "" -"L'attribut disk_format ne peut être remplacé que pour une image mise en file " -"d'attente." - -#, python-format -msgid "Auth service at URL %(url)s not found." -msgstr "Service d'auth à l'URL %(url)s non trouvé." - -#, python-format -msgid "" -"Authentication error - the token may have expired during file upload. " -"Deleting image data for %s." -msgstr "" -"Erreur d'authentification - le jeton a peut-être expiré lors du " -"téléchargement de fichier. Suppression des données d'image pour %s." - -msgid "Authorization failed." -msgstr "Echec de l'autorisation." - -msgid "Available categories:" -msgstr "Catégories disponibles :" - -#, python-format -msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." -msgstr "" -"Format de filtre de requête \"%s\" incorrect. Utilisez la notation de date " -"et heure ISO 8601." - -#, python-format -msgid "Bad Command: %s" -msgstr "Commande %s erronée " - -#, python-format -msgid "Bad header: %(header_name)s" -msgstr "Erreur d’entête: %(header_name)s" - -#, python-format -msgid "Bad value passed to filter %(filter)s got %(val)s" -msgstr "Valeur incorrecte transmise pour filtrer %(filter)s, %(val)s obtenu" - -#, python-format -msgid "Badly formed S3 URI: %(uri)s" -msgstr "URI S3 incorrecte : %(uri)s" - -#, python-format -msgid "Badly formed credentials '%(creds)s' in Swift URI" -msgstr "Données d'identification incorrectes '%(creds)s' dans l'URI Swift" - -msgid "Badly formed credentials in Swift URI." -msgstr "Données d'identification incorrectes dans l'URI Swift." - -msgid "Body expected in request." -msgstr "Corps attendu dans la demande" - -msgid "Cannot be a negative value" -msgstr "Ne peut pas être une valeur négative" - -msgid "Cannot be a negative value." -msgstr "Ne peut pas être une valeur négative." - -#, python-format -msgid "Cannot convert image %(key)s '%(value)s' to an integer." -msgstr "Impossible de convertir l'image %(key)s '%(value)s' en entier." - -msgid "Cannot remove last location in the image." -msgstr "Impossible de supprimer le dernier emplacement dans l'image." - -#, python-format -msgid "Cannot save data for image %(image_id)s: %(error)s" -msgstr "" -"Les données pour l'image %(image_id)s ne peuvent pas être sauvegardées : " -"erreur %(error)s" - -msgid "Cannot set locations to empty list." -msgstr "Impossible de définir des emplacements avec une liste vide." - -msgid "Cannot upload to an unqueued image" -msgstr "Téléchargement impossible dans une image non placée en file d'attente" - -#, python-format -msgid "Checksum verification failed. Aborted caching of image '%s'." -msgstr "" -"Echec de vérification du total de contrôle. Mise en cache de l'image '%s' " -"annulée." - -msgid "Client disconnected before sending all data to backend" -msgstr "Client déconnecté avant l'envoi de toutes les données au backend" - -msgid "Command not found" -msgstr "La commande n'a pas été trouvée" - -msgid "Configuration option was not valid" -msgstr "L'option de configuration n'était pas valide" - -#, python-format -msgid "Connect error/bad request to Auth service at URL %(url)s." -msgstr "" -"Erreur de connexion/demande erronée pour le service d'auth à l'URL %(url)s." - -#, python-format -msgid "Constructed URL: %s" -msgstr "URL construite : %s" - -msgid "Container format is not specified." -msgstr "Le format de conteneur n'a pas été spécifié." - -msgid "Content-Type must be application/octet-stream" -msgstr "Le type de contenu doit être application/octet-stream" - -#, python-format -msgid "Corrupt image download for image %(image_id)s" -msgstr "téléchargement d'image endommagée pour l'image %(image_id)s" - -#, python-format -msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" -msgstr "" -"Liaison impossible à %(host)s:%(port)s après une tentative de 30 secondes" - -msgid "Could not find OVF file in OVA archive file." -msgstr "Fichier OVF introuvable dans le fichier archive OVA." - -#, python-format -msgid "Could not find metadata object %s" -msgstr "L'objet métadonnées %s est introuvable" - -#, python-format -msgid "Could not find metadata tag %s" -msgstr "Balise de métadonnées %s introuvable" - -#, python-format -msgid "Could not find namespace %s" -msgstr "Espace de nom %s introuvable" - -#, python-format -msgid "Could not find property %s" -msgstr "Propriété %s introuvable" - -msgid "Could not find required configuration option" -msgstr "Option de configuration obligatoire introuvable" - -#, python-format -msgid "Could not find task %s" -msgstr "La tâche %s est introuvable" - -#, python-format -msgid "Could not update image: %s" -msgstr "Impossible de mettre à jour l'image : %s" - -msgid "Currently, OVA packages containing multiple disk are not supported." -msgstr "" -"Actuellement, les packages OVA contenant plusieurs disques ne sont pas pris " -"en charge." - -#, python-format -msgid "Data for image_id not found: %s" -msgstr "Données d'image_id introuvables : %s" - -msgid "Data supplied was not valid." -msgstr "Les données fournies n'étaient pas valides." - -msgid "Date and time of image member creation" -msgstr "Date et heure de création du membre de l'image" - -msgid "Date and time of image registration" -msgstr "Date et heure d'enregistrement de l'image" - -msgid "Date and time of last modification of image member" -msgstr "Date et heure de dernière modification du membre de l'image" - -msgid "Date and time of namespace creation" -msgstr "Date et heure de création de l'espace de nom" - -msgid "Date and time of object creation" -msgstr "Date et heure de création de l'objet" - -msgid "Date and time of resource type association" -msgstr "Date et heure d'association de type de ressource" - -msgid "Date and time of tag creation" -msgstr "Date et heure de création de la balise" - -msgid "Date and time of the last image modification" -msgstr "Date et heure de dernière modification de l'image" - -msgid "Date and time of the last namespace modification" -msgstr "Date et heure de dernière modification de l'espace de nom" - -msgid "Date and time of the last object modification" -msgstr "Date et heure de dernière modification de l'objet" - -msgid "Date and time of the last resource type association modification" -msgstr "" -"Date et heure de dernière modification d'association de type de ressource " - -msgid "Date and time of the last tag modification" -msgstr "Date et heure de dernière modification de la balise " - -msgid "Datetime when this resource was created" -msgstr "Date-heure à laquelle cette ressource a été créée" - -msgid "Datetime when this resource was updated" -msgstr "Date-heure à laquelle cette ressource a été mise à jour" - -msgid "Datetime when this resource would be subject to removal" -msgstr "Date-heure à laquelle cette ressource serait soumise à une suppression" - -#, python-format -msgid "Denying attempt to upload image because it exceeds the quota: %s" -msgstr "" -"Refus de la tentative de téléchargement d'une image qui dépasse le quota : %s" - -#, python-format -msgid "Denying attempt to upload image larger than %d bytes." -msgstr "" -"Refus de la tentative de téléchargement d'une image dont la taille est " -"supérieure à %d octets." - -msgid "Descriptive name for the image" -msgstr "Nom descriptif de l'image" - -msgid "Disk format is not specified." -msgstr "Le format de disque n'a pas été spécifié." - -#, python-format -msgid "" -"Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" -msgstr "" -"Impossible de configurer le pilote %(driver_name)s correctement. Cause : " -"%(reason)s" - -msgid "" -"Error decoding your request. Either the URL or the request body contained " -"characters that could not be decoded by Glance" -msgstr "" -"Erreur lors du décodage de votre demande. L'URL ou le corps de la demande " -"contiennent des caractères que Glance ne peut pas décoder" - -#, python-format -msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" -msgstr "" -"Erreur lors de l'extraction des membres de l'image %(image_id)s : " -"%(inner_msg)s" - -msgid "Error in store configuration. Adding images to store is disabled." -msgstr "" -"Erreur de configuration du magasin. L'ajout d'images au magasin est " -"désactivé." - -msgid "Expected a member in the form: {\"member\": \"image_id\"}" -msgstr "Membre attendu sous la forme : {\"member\": \"image_id\"}" - -msgid "Expected a status in the form: {\"status\": \"status\"}" -msgstr "Statut attendu sous la forme : {\"status\": \"status\"}" - -msgid "External source should not be empty" -msgstr "La source externe ne doit pas être vide" - -#, python-format -msgid "External sources are not supported: '%s'" -msgstr "Sources externes non prises en charge : '%s'" - -#, python-format -msgid "Failed to activate image. Got error: %s" -msgstr "Echec de l'activation de l'image. Erreur obtenue : %s" - -#, python-format -msgid "Failed to add image metadata. Got error: %s" -msgstr "Impossible d'ajouter les métadonnées d'image. Erreur obtenue : %s" - -#, python-format -msgid "Failed to find image %(image_id)s to delete" -msgstr "Échec pour trouver image %(image_id)s à supprimer." - -#, python-format -msgid "Failed to find image to delete: %s" -msgstr "Échec pour trouver l'image à supprimer: %s" - -#, python-format -msgid "Failed to find image to update: %s" -msgstr "Echec pour trouver l'image à mettre à jour: %s" - -#, python-format -msgid "Failed to find resource type %(resourcetype)s to delete" -msgstr "Echec pour trouver le type de ressource %(resourcetype)s a supprimer" - -#, python-format -msgid "Failed to initialize the image cache database. Got error: %s" -msgstr "" -"Impossible d'initialiser la base de données de caches d'image. Erreur " -"obtenue : %s" - -#, python-format -msgid "Failed to read %s from config" -msgstr "Echec de la lecture de %s à partir de la config" - -#, python-format -msgid "Failed to reserve image. Got error: %s" -msgstr "Impossible de réserver l'image. Erreur obtenue : %s" - -#, python-format -msgid "Failed to update image metadata. Got error: %s" -msgstr "" -"Impossible de mettre à jour les métadonnées d'image. Erreur obtenue : %s" - -#, python-format -msgid "Failed to upload image %s" -msgstr "Impossible de charger l'image %s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to HTTP error: " -"%(error)s" -msgstr "" -"Echec de téléchargement des données image pour l'image %(image_id)s en " -"raison d'une erreur HTTP : %(error)s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to internal error: " -"%(error)s" -msgstr "" -"Echec de téléchargement des données image pour l'image %(image_id)s en " -"raison d'une erreur interne : %(error)s" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"Le fichier %(path)s dispose d'un fichier de sauvegarde non valide : " -"%(bfile)s. L'opération est abandonnée." - -msgid "" -"File based imports are not allowed. Please use a non-local source of image " -"data." -msgstr "" -"Les importations à partir de fichiers sont interdites. Utilisez une source " -"externe de données image." - -msgid "Forbidden image access" -msgstr "Accès interdit à l'image" - -#, python-format -msgid "Forbidden to delete a %s image." -msgstr "Interdiction de supprimer une image %s" - -#, python-format -msgid "Forbidden to delete image: %s" -msgstr "Interdiction de supprimer l'image: %s" - -#, python-format -msgid "Forbidden to modify '%(key)s' of %(status)s image." -msgstr "Interdiction de modifier '%(key)s' de l'image %(status)s." - -#, python-format -msgid "Forbidden to modify '%s' of image." -msgstr "Interdiction de modifier l'élément '%s' de l'image." - -msgid "Forbidden to reserve image." -msgstr "Interdiction de réserver une image." - -msgid "Forbidden to update deleted image." -msgstr "Interdiction de mettre à jour l'image supprimée." - -#, python-format -msgid "Forbidden to update image: %s" -msgstr "Interdiction de mise à jour de l'image: %s" - -#, python-format -msgid "Forbidden upload attempt: %s" -msgstr "Tentative de téléchargement interdite : %s" - -#, python-format -msgid "Forbidding request, metadata definition namespace=%s is not visible." -msgstr "" -"Interdiction de la demande, l'espace de nom %s de la définition de " -"métadonnées n'est pas visible." - -#, python-format -msgid "Forbidding request, task %s is not visible" -msgstr "Interdiction de la demande, la tâche %s n'est pas visible" - -msgid "Format of the container" -msgstr "Format du conteneur" - -msgid "Format of the disk" -msgstr "Format du disque" - -#, python-format -msgid "Host \"%s\" is not valid." -msgstr "Host \"%s\" n'est pas valide." - -#, python-format -msgid "Host and port \"%s\" is not valid." -msgstr "Host et port \"%s\" ne sont pas valides." - -msgid "" -"Human-readable informative message only included when appropriate (usually " -"on failure)" -msgstr "" -"Message d'information lisible par l'homme inclus uniquement si approprié " -"(habituellement en cas d'incident)" - -msgid "If true, image will not be deletable." -msgstr "Si true, l'image ne pourra pas être supprimée." - -msgid "If true, namespace will not be deletable." -msgstr "Si true, l'espace de nom ne pourra pas être supprimé." - -#, python-format -msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" -msgstr "" -"L'image %(id)s n'a pas pu être supprimée car elle est utilisée : %(exc)s" - -#, python-format -msgid "Image %(id)s not found" -msgstr "Image %(id)s non trouvé" - -#, python-format -msgid "" -"Image %(image_id)s could not be found after upload. The image may have been " -"deleted during the upload: %(error)s" -msgstr "" -"Image %(image_id)s introuvable après le téléchargement. Elle a sans doute " -"été supprimée au cours du téléchargement : %(error)s" - -#, python-format -msgid "Image %(image_id)s is protected and cannot be deleted." -msgstr "L'image %(image_id)s est protégée et ne peut pas être supprimée." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload, cleaning up the chunks uploaded." -msgstr "" -"L'image %s n'a pas été trouvée après le téléchargement. Elle a sans doute " -"été supprimée pendant le téléchargement. Nettoyage des blocs téléchargés." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload." -msgstr "" -"L'image %s est introuvable après le chargement. L'image a peut-être été " -"supprimée lors du chargement." - -#, python-format -msgid "Image %s is deactivated" -msgstr "L'image %s est désactivée" - -#, python-format -msgid "Image %s is not active" -msgstr "L'image %s n'est pas active" - -#, python-format -msgid "Image %s not found." -msgstr "Image %s introuvable." - -#, python-format -msgid "Image exceeds the storage quota: %s" -msgstr "l'image %s dépasse le quota de stockage" - -msgid "Image id is required." -msgstr "Id image est requis." - -msgid "Image is protected" -msgstr "L'image est protégée" - -#, python-format -msgid "Image member limit exceeded for image %(id)s: %(e)s:" -msgstr "Le nombre maximal de membres est dépassé pour l'image %(id)s : %(e)s :" - -#, python-format -msgid "Image name too long: %d" -msgstr "Nom de l'image trop long : %d" - -msgid "Image operation conflicts" -msgstr "Conflits d'opération d'image" - -#, python-format -msgid "" -"Image status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"La transition du statut de l'image de %(cur_status)s vers %(new_status)s " -"n'est pas autorisée" - -#, python-format -msgid "Image storage media is full: %s" -msgstr "Le support de stockage d'image est saturé : %s" - -#, python-format -msgid "Image tag limit exceeded for image %(id)s: %(e)s:" -msgstr "Le nombre maximal de balises est dépassé pour l'image %(id)s : %(e)s :" - -#, python-format -msgid "Image upload problem: %s" -msgstr "Problème d'envoi de l'image: %s" - -#, python-format -msgid "Image with identifier %s already exists!" -msgstr "L'image avec l'identificateur %s existe déjà !" - -#, python-format -msgid "Image with identifier %s has been deleted." -msgstr "L'image avec l'identificateur %s a été supprimée." - -#, python-format -msgid "Image with identifier %s not found" -msgstr "L'image portant l'ID %s est introuvable" - -#, python-format -msgid "Image with the given id %(image_id)s was not found" -msgstr "L'image avec l'ID %(image_id)s indiqué est introuvable. " - -#, python-format -msgid "" -"Incorrect auth strategy, expected \"%(expected)s\" but received " -"\"%(received)s\"" -msgstr "" -"Stratégie d'autorisation incorrecte, valeur attendue \"%(expected)s\" mais " -"valeur obtenue \"%(received)s\"" - -#, python-format -msgid "Incorrect request: %s" -msgstr "Requête incorrecte: %s" - -#, python-format -msgid "Input does not contain '%(key)s' field" -msgstr "L'entrée ne contient pas la zone '%(key)s'" - -#, python-format -msgid "Insufficient permissions on image storage media: %s" -msgstr "Droits insuffisants sur le support de stockage d'image : %s" - -#, python-format -msgid "Invalid JSON pointer for this resource: '/%s'" -msgstr "Pointeur JSON invalide pour cette ressource : '%s'" - -#, python-format -msgid "Invalid checksum '%s': can't exceed 32 characters" -msgstr "" -"Total de contrôle '%s' non valide : il ne doit pas comporter plus de 32 " -"caractères. " - -msgid "Invalid configuration in glance-swift conf file." -msgstr "" -"Configuration non valide dans le fichier de configuration glance-swift." - -msgid "Invalid configuration in property protection file." -msgstr "" -"Configuration non valide dans le fichier de verrouillage de propriétés." - -#, python-format -msgid "Invalid container format '%s' for image." -msgstr "Format de conteneur '%s' non valide pour l'image." - -#, python-format -msgid "Invalid content type %(content_type)s" -msgstr "Type de contenu non valide %(content_type)s" - -#, python-format -msgid "Invalid disk format '%s' for image." -msgstr "Format de disque '%s' non valide pour l'image." - -#, python-format -msgid "Invalid filter value %s. The quote is not closed." -msgstr "Valeur de filtre %s non valide. Les guillemets ne sont pas fermés." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma after closing quotation mark." -msgstr "" -"Valeur de filtre %s non valide. Il n'y a pas de virgule après la fermeture " -"des guillemets." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma before opening quotation mark." -msgstr "" -"Valeur de filtre %s non valide. Il n'y a pas de virgule avant l'ouverture " -"des guillemets." - -msgid "Invalid image id format" -msgstr "Format d'ID image non valide" - -msgid "Invalid location" -msgstr "Emplacement non valide" - -#, python-format -msgid "Invalid location %s" -msgstr "Emplacement non valide : %s" - -#, python-format -msgid "Invalid location: %s" -msgstr "Emplacement non valide : %s" - -#, python-format -msgid "" -"Invalid location_strategy option: %(name)s. The valid strategy option(s) " -"is(are): %(strategies)s" -msgstr "" -"Option location_strategy non valide : %(name)s. La ou les options de " -"stratégie valides sont : %(strategies)s" - -msgid "Invalid locations" -msgstr "Emplacements non valides" - -#, python-format -msgid "Invalid locations: %s" -msgstr "Emplacements non valides : %s" - -msgid "Invalid marker format" -msgstr "Format de marqueur non valide" - -msgid "Invalid marker. Image could not be found." -msgstr "Marqueur non valide. Image introuvable." - -#, python-format -msgid "Invalid membership association: %s" -msgstr "Association d'appartenance non valide : %s" - -msgid "" -"Invalid mix of disk and container formats. When setting a disk or container " -"format to one of 'aki', 'ari', or 'ami', the container and disk formats must " -"match." -msgstr "" -"Combinaison non valide de formats de disque et de conteneur. Si vous " -"définissez un disque ou un conteneur au format 'aki', 'ari' ou 'ami', les " -"formats du disque et du conteneur doivent correspondre." - -#, python-format -msgid "" -"Invalid operation: `%(op)s`. It must be one of the following: %(available)s." -msgstr "" -"Opération non valide : `%(op)s`. Doit être l'une des suivantes : " -"%(available)s." - -msgid "Invalid position for adding a location." -msgstr "Position non valide pour l'ajout d'un emplacement." - -msgid "Invalid position for removing a location." -msgstr "Position non valide pour la suppression d'un emplacement." - -msgid "Invalid service catalog json." -msgstr "json de catalogue de service non valide." - -#, python-format -msgid "Invalid sort direction: %s" -msgstr "Sens de tri non valide : %s" - -#, python-format -msgid "" -"Invalid sort key: %(sort_key)s. It must be one of the following: " -"%(available)s." -msgstr "" -"Clé de tri non valide : %(sort_key)s. Doit être l'une des valeurs " -"suivantes : %(available)s." - -#, python-format -msgid "Invalid status value: %s" -msgstr "Valeur de statut non valide : %s" - -#, python-format -msgid "Invalid status: %s" -msgstr "Statut non valide : %s" - -#, python-format -msgid "Invalid time format for %s." -msgstr "Format d'heure non valide pour %s." - -#, python-format -msgid "Invalid type value: %s" -msgstr "Type de valeur non valide: %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition namespace " -"with the same name of %s" -msgstr "" -"Mise à jour non valide. Elle créerait une de définition de métadonnées en " -"double avec le nom %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Mise à jour non valide. Elle créerait un objet de définition de métadonnées " -"en double avec le nom %(name)s dans l'espace de nom %(namespace_name)s." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Mise à jour non valide. Elle créerait un objet de définition de métadonnées " -"en double avec le nom %(name)s dans l'espace de nom %(namespace_name)s." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition property " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Mise à jour non valide. Elle créerait une propriété de définition de " -"métadonnées avec le nom %(name)s dans l'espace de nom %(namespace_name)s." - -#, python-format -msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" -msgstr "" -"Valeur non valide '%(value)s' pour le paramètre '%(param)s' : %(extra_msg)s" - -#, python-format -msgid "Invalid value for option %(option)s: %(value)s" -msgstr "Valeur non valide pour l'option %(option)s : %(value)s" - -#, python-format -msgid "Invalid visibility value: %s" -msgstr "Valeur de visibilité non valide : %s" - -msgid "It's invalid to provide multiple image sources." -msgstr "Il est invalide de fournir plusieurs sources d'image" - -msgid "It's not allowed to add locations if locations are invisible." -msgstr "" -"L'ajout des emplacements n'est pas autorisé si les emplacements sont " -"invisibles." - -msgid "It's not allowed to remove locations if locations are invisible." -msgstr "" -"La suppression des emplacements n'est pas autorisée si les emplacements sont " -"invisibles." - -msgid "It's not allowed to update locations if locations are invisible." -msgstr "" -"La mise à jour des emplacements n'est pas autorisée si les emplacements sont " -"invisibles." - -msgid "List of strings related to the image" -msgstr "Liste des chaînes associées à l'image" - -msgid "Malformed JSON in request body." -msgstr "JSON incorrect dans le corps de demande." - -msgid "Maximal age is count of days since epoch." -msgstr "L'ancienneté maximale est le nombre de jours depuis l'epoch." - -#, python-format -msgid "Maximum redirects (%(redirects)s) was exceeded." -msgstr "Le nombre maximum de redirections (%(redirects)s) a été dépassé." - -#, python-format -msgid "Member %(member_id)s is duplicated for image %(image_id)s" -msgstr "Le membre %(member_id)s est en double pour l'image %(image_id)s" - -msgid "Member can't be empty" -msgstr "Membre ne peut pas être vide" - -msgid "Member to be added not specified" -msgstr "Membre à ajouter non spécifié" - -msgid "Membership could not be found." -msgstr "Appartenance non trouvée." - -#, python-format -msgid "" -"Metadata definition namespace %(namespace)s is protected and cannot be " -"deleted." -msgstr "" -"L'espace de nom %(namespace)s de la définition de métadonnées est protégé et " -"ne peut pas être supprimé." - -#, python-format -msgid "Metadata definition namespace not found for id=%s" -msgstr "" -"L'espace de nom de définition de métadonnées est introuvable pour l'ID %s" - -#, python-format -msgid "" -"Metadata definition object %(object_name)s is protected and cannot be " -"deleted." -msgstr "" -"L'objet %(object_name)s de la définition de métadonnées est protégé et ne " -"peut pas être supprimé." - -#, python-format -msgid "Metadata definition object not found for id=%s" -msgstr "L'objet de définition de métadonnées est introuvable pour l'ID %s" - -#, python-format -msgid "" -"Metadata definition property %(property_name)s is protected and cannot be " -"deleted." -msgstr "" -"La propriété %(property_name)s de la définition de métadonnées est protégée " -"et ne peut pas être supprimé." - -#, python-format -msgid "Metadata definition property not found for id=%s" -msgstr "La propriété de définition de métadonnées est introuvable pour l'ID %s" - -#, python-format -msgid "" -"Metadata definition resource-type %(resource_type_name)s is a seeded-system " -"type and cannot be deleted." -msgstr "" -"Le type de ressource %(resource_type_name)s de la définition de métadonnées " -"est un type prédéfiniet ne peut pas être supprimé." - -#, python-format -msgid "" -"Metadata definition resource-type-association %(resource_type)s is protected " -"and cannot be deleted." -msgstr "" -"L'association de type de ressource %(resource_type)s de la définition de " -"métadonnées est protégée et ne peut pas être supprimée." - -#, python-format -msgid "" -"Metadata definition tag %(tag_name)s is protected and cannot be deleted." -msgstr "" -"La balise de définition de métadonnées %(tag_name)s est protégée et ne peut " -"pas être supprimée." - -#, python-format -msgid "Metadata definition tag not found for id=%s" -msgstr "La balise de définition de métadonnées est introuvable pour l'ID %s" - -msgid "Minimal rows limit is 1." -msgstr "Le nombre minimal de lignes est 1." - -#, python-format -msgid "Missing required credential: %(required)s" -msgstr "Données d'identification obligatoires manquantes : %(required)s" - -#, python-format -msgid "" -"Multiple 'image' service matches for region %(region)s. This generally means " -"that a region is required and you have not supplied one." -msgstr "" -"Plusieurs correspondances de service 'image' pour la région %(region)s. En " -"général, cela signifie qu'une région est requise et que vous n'en avez pas " -"indiquée." - -msgid "No authenticated user" -msgstr "Aucun utilisateur authentifié" - -#, python-format -msgid "No image found with ID %s" -msgstr "aucune image trouvée avec l'identifiant %s" - -#, python-format -msgid "No location found with ID %(loc)s from image %(img)s" -msgstr "Aucun emplacement trouvé avec l'ID %(loc)s dans l'image %(img)s" - -msgid "No permission to share that image" -msgstr "Aucun droit de partage de cette image" - -#, python-format -msgid "Not allowed to create members for image %s." -msgstr "Non autorisé à créer des membres pour l'image %s." - -#, python-format -msgid "Not allowed to deactivate image in status '%s'" -msgstr "Non autorisé à désactiver l'image dans l'état '%s'" - -#, python-format -msgid "Not allowed to delete members for image %s." -msgstr "Non autorisé à supprimer des membres de l'image %s." - -#, python-format -msgid "Not allowed to delete tags for image %s." -msgstr "Non autorisé à supprimer des balises de l'image %s." - -#, python-format -msgid "Not allowed to list members for image %s." -msgstr "Non autorisé à répertorier les membres de l'image %s." - -#, python-format -msgid "Not allowed to reactivate image in status '%s'" -msgstr "Non autorisé à réactiver l'image dans l'état '%s'" - -#, python-format -msgid "Not allowed to update members for image %s." -msgstr "Non autorisé à mettre à jour les membres de l'image %s." - -#, python-format -msgid "Not allowed to update tags for image %s." -msgstr "Non autorisé à mettre à jour des balises de l'image %s." - -#, python-format -msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" -msgstr "" -"Non autorisé à télécharger des données image pour l'image %(image_id)s : " -"%(error)s" - -msgid "Number of sort dirs does not match the number of sort keys" -msgstr "Le nombre de rép de tri ne correspond pas au nombre de clés de tri" - -msgid "OVA extract is limited to admin" -msgstr "L'extraction de fichiers OVA est limitée à admin" - -msgid "Old and new sorting syntax cannot be combined" -msgstr "" -"Les syntaxes de tri anciennes et nouvelles ne peuvent pas être combinées" - -#, python-format -msgid "Operation \"%s\" requires a member named \"value\"." -msgstr "L'opération \"%s\" requiert un membre nommé \"value\"." - -msgid "" -"Operation objects must contain exactly one member named \"add\", \"remove\", " -"or \"replace\"." -msgstr "" -"Les objets d'opération doivent contenir exactement un seul membre nommé \"add" -"\", \"remove\" ou \"replace\"." - -msgid "" -"Operation objects must contain only one member named \"add\", \"remove\", or " -"\"replace\"." -msgstr "" -"Les objets d'opération doivent contenir un seul membre nommé \"add\", " -"\"remove\" ou \"replace\"." - -msgid "Operations must be JSON objects." -msgstr "Les opérations doivent être des objets JSON." - -#, python-format -msgid "Original locations is not empty: %s" -msgstr "L'emplacement original %s n'est pas vide" - -msgid "Owner can't be updated by non admin." -msgstr "Le propriétaire ne peut être mis à jour que par un administrateur." - -msgid "Owner must be specified to create a tag." -msgstr "Le propriétaire doit être indiqué pour créer une balise." - -msgid "Owner of the image" -msgstr "Propriétaire de l'image" - -msgid "Owner of the namespace." -msgstr "Propriétaire de l'espace de nom." - -msgid "Param values can't contain 4 byte unicode." -msgstr "" -"Les valeurs de paramètre ne peuvent pas contenir de caractère Unicode de 4 " -"octets." - -#, python-format -msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." -msgstr "" -"Le pointeur `%s` contient \"~\" qui ne fait pas partie d'une séquence " -"d'échappement reconnue." - -#, python-format -msgid "Pointer `%s` contains adjacent \"/\"." -msgstr "Le pointeur `%s` contient des éléments \"/\" adjacent." - -#, python-format -msgid "Pointer `%s` does not contains valid token." -msgstr "le Pointeur '%s' ne contient pas de jeton valide." - -#, python-format -msgid "Pointer `%s` does not start with \"/\"." -msgstr "Le pointeur `%s` ne commence pas par \"/\"." - -#, python-format -msgid "Pointer `%s` end with \"/\"." -msgstr "le pointeur '%s' se termine avec un \"/\"." - -#, python-format -msgid "Port \"%s\" is not valid." -msgstr "Port \"%s\" n'est pas valide." - -#, python-format -msgid "Process %d not running" -msgstr "Le processus %d n'est pas en fonctionnement" - -#, python-format -msgid "Properties %s must be set prior to saving data." -msgstr "" -"Les propriétés %s doivent être définies avant de sauvegarder les données." - -#, python-format -msgid "" -"Property %(property_name)s does not start with the expected resource type " -"association prefix of '%(prefix)s'." -msgstr "" -"La propriété %(property_name)s ne commence pas par le préfixe d'association " -"de type de ressource attendu : '%(prefix)s'." - -#, python-format -msgid "Property %s already present." -msgstr "Propriété %s déjà présente." - -#, python-format -msgid "Property %s does not exist." -msgstr "La propriété %s n'existe pas." - -#, python-format -msgid "Property %s may not be removed." -msgstr "La propriété %s n'est peut-être pas supprimée." - -#, python-format -msgid "Property %s must be set prior to saving data." -msgstr "La propriété %s doit être définie avant de sauvegarder les données." - -#, python-format -msgid "Property '%s' is protected" -msgstr "La propriété '%s' est protégée" - -msgid "Property names can't contain 4 byte unicode." -msgstr "" -"Les noms de propriété ne peuvent pas contenir de caractère Unicode de 4 " -"octets." - -#, python-format -msgid "" -"Provided image size must match the stored image size. (provided size: " -"%(ps)d, stored size: %(ss)d)" -msgstr "" -"La taille de l'image fournie doit correspondre à la taille de l'image " -"stockée. (taille fournie : %(ps)d, taille stockée : %(ss)d)" - -#, python-format -msgid "Provided object does not match schema '%(schema)s': %(reason)s" -msgstr "L'objet fourni ne correspond pas au schéma '%(schema)s' : %(reason)s" - -#, python-format -msgid "Provided status of task is unsupported: %(status)s" -msgstr "Le statut fourni de la tâche n'est pas pris en charge : %(status)s" - -#, python-format -msgid "Provided type of task is unsupported: %(type)s" -msgstr "Le type de tâche fourni n'est pas pris en charge : %(type)s" - -msgid "Provides a user friendly description of the namespace." -msgstr "Fournit une description conviviale de l'espace de nom." - -msgid "Received invalid HTTP redirect." -msgstr "Redirection HTTP non valide reçue." - -#, python-format -msgid "Redirecting to %(uri)s for authorization." -msgstr "Redirection vers %(uri)s pour autorisation." - -#, python-format -msgid "Registry service can't use %s" -msgstr "Le service de registre ne peut pas utiliser %s" - -#, python-format -msgid "Registry was not configured correctly on API server. Reason: %(reason)s" -msgstr "" -"Le registre n'a pas été configuré correctement sur le serveur d'API. Cause : " -"%(reason)s" - -#, python-format -msgid "Reload of %(serv)s not supported" -msgstr "Rechargement de %(serv)s non pris en charge" - -#, python-format -msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "Rechargement de %(serv)s (pid %(pid)s) avec le signal (%(sig)s)" - -#, python-format -msgid "Removing stale pid file %s" -msgstr "Suppression du fichier PID %s périmé" - -msgid "Request body must be a JSON array of operation objects." -msgstr "Le corps de la demande doit être une matrice JSON d'objets Opération." - -msgid "Request must be a list of commands" -msgstr "La demande doit être une liste de commandes" - -#, python-format -msgid "Required store %s is invalid" -msgstr "Le magasin requis %s n'est pas valide" - -msgid "" -"Resource type names should be aligned with Heat resource types whenever " -"possible: http://docs.openstack.org/developer/heat/template_guide/openstack." -"html" -msgstr "" -"Les noms de type de ressource doivent être alignés avec les types de " -"ressource Heat dans la mesure du possible : http://docs.openstack.org/" -"developer/heat/template_guide/openstack.html" - -msgid "Response from Keystone does not contain a Glance endpoint." -msgstr "La réponse de Keystone ne contient pas un noeud final Glance." - -msgid "Scope of image accessibility" -msgstr "Périmètre d'accessibilité de l'image" - -msgid "Scope of namespace accessibility." -msgstr "Périmètre de l'accessibilité de l'espace de nom." - -#, python-format -msgid "Server %(serv)s is stopped" -msgstr "Le serveur %(serv)s est arrêté" - -#, python-format -msgid "Server worker creation failed: %(reason)s." -msgstr "Echec de la création de travailleur de serveur : %(reason)s." - -msgid "Signature verification failed" -msgstr "La vérification de la signature a échoué" - -msgid "Size of image file in bytes" -msgstr "Taille du fichier image en octets" - -msgid "" -"Some resource types allow more than one key / value pair per instance. For " -"example, Cinder allows user and image metadata on volumes. Only the image " -"properties metadata is evaluated by Nova (scheduling or drivers). This " -"property allows a namespace target to remove the ambiguity." -msgstr "" -"Certains types de ressource autorisent plusieurs paires clé-valeur par " -"instance. Par exemple, Cinder autorise les métadonnées d'utilisateur et " -"d'image sur les volumes. Seules les métadonnées de propriétés d'image sont " -"évaluées par Nova (planification ou pilotes). Cette propriété autorise une " -"cible d'espace de nom pour lever l'ambiguïté." - -msgid "Sort direction supplied was not valid." -msgstr "Le sens de tri fourni n'était pas valide." - -msgid "Sort key supplied was not valid." -msgstr "La clé de tri fournie n'était pas valide." - -msgid "" -"Specifies the prefix to use for the given resource type. Any properties in " -"the namespace should be prefixed with this prefix when being applied to the " -"specified resource type. Must include prefix separator (e.g. a colon :)." -msgstr "" -"Spécifie le préfixe à utiliser pour le type de ressource donné. Toutes les " -"propriétés de l'espace de nom doivent être précédées de ce préfixe " -"lorsqu'elles s'appliquent au type de ressource spécifié. Vous devez inclure " -"un séparateur de préfixe (par exemple, le signe deux-points :)." - -msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." -msgstr "L'état doit être \"en attente\", \"accepté\" ou \"rejeté\"." - -msgid "Status not specified" -msgstr "Statut non spécifié" - -msgid "Status of the image" -msgstr "Statut de l'image" - -#, python-format -msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"La transition de statut de %(cur_status)s vers %(new_status)s n'est pas " -"autorisée" - -#, python-format -msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "Arrêt de %(serv)s (pid %(pid)s) avec le signal (%(sig)s)" - -#, python-format -msgid "Store for image_id not found: %s" -msgstr "Magasin de l'image_id non trouvé : %s" - -#, python-format -msgid "Store for scheme %s not found" -msgstr "Magasin du schéma %s non trouvé" - -#, python-format -msgid "" -"Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " -"(%(actual)s) did not match. Setting image status to 'killed'." -msgstr "" -"%(attr)s (%(supplied)s) fournis et %(attr)s générés depuis l'image " -"téléchargée (%(actual)s) ne correspondent pas. Définition du statut de " -"l'image sur 'arrêté'." - -msgid "Supported values for the 'container_format' image attribute" -msgstr "Valeurs prises en charge pour l'attribut d'image 'container_format'" - -msgid "Supported values for the 'disk_format' image attribute" -msgstr "Valeurs prises en charge pour l'attribut d'image 'disk_format'" - -#, python-format -msgid "Suppressed respawn as %(serv)s was %(rsn)s." -msgstr "La relance supprimée en tant que %(serv)s était %(rsn)s." - -msgid "System SIGHUP signal received." -msgstr "Signal SIGHUP du système reçu." - -#, python-format -msgid "Task '%s' is required" -msgstr "La tâche '%s' est obligatoire" - -msgid "Task does not exist" -msgstr "La tâche n'existe pas" - -msgid "Task failed due to Internal Error" -msgstr "Echec de la tâche en raison d'une erreur interne" - -msgid "Task was not configured properly" -msgstr "La tâche n'a pas été configurée correctement" - -#, python-format -msgid "Task with the given id %(task_id)s was not found" -msgstr "La tâche avec l'identificateur donné %(task_id)s est introuvable" - -msgid "The \"changes-since\" filter is no longer available on v2." -msgstr "Le filtre \"changes-since\" n'est plus disponible sur la version 2." - -#, python-format -msgid "The CA file you specified %s does not exist" -msgstr "" -"Le fichier d'autorité de certification que vous avez spécifié %s n'existe pas" - -#, python-format -msgid "" -"The Image %(image_id)s object being created by this task %(task_id)s, is no " -"longer in valid status for further processing." -msgstr "" -"L'objet image %(image_id)s créé par la tâche %(task_id)s n'est plus dans un " -"statut valide pour un traitement ultérieur." - -msgid "The Store URI was malformed." -msgstr "L'URI de magasin était incorrect." - -msgid "" -"The URL to the keystone service. If \"use_user_token\" is not in effect and " -"using keystone auth, then URL of keystone can be specified." -msgstr "" -"URL du service keystone. Si \"use_user_token\" n'est pas en vigueur et si " -"vous utilisez l'authentification keystone, l'URL de keystone peut être " -"spécifiée." - -msgid "" -"The administrators password. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"Mot de passe de l'administrateur. Si \"use_user_token\" n'est pas en " -"vigueur, les données d'identification de l'administrateur peuvent être " -"spécifiées." - -msgid "" -"The administrators user name. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"Nom d'utilisateur administrateur. Si \"use_user_token\" n'est pas en " -"vigueur, les données d'identification de l'administrateur peuvent être " -"spécifiées." - -#, python-format -msgid "The cert file you specified %s does not exist" -msgstr "Le fichier de certificats que vous avez spécifié %s n'existe pas" - -msgid "The current status of this task" -msgstr "Le statut actuel de cette tâche" - -#, python-format -msgid "" -"The device housing the image cache directory %(image_cache_dir)s does not " -"support xattr. It is likely you need to edit your fstab and add the " -"user_xattr option to the appropriate line for the device housing the cache " -"directory." -msgstr "" -"L'unité hébergeant le répertoire de cache d'image %(image_cache_dir)s ne " -"prend pas en charge xattr. Vous devez probablement éditer votre fstab et " -"ajouter l'option user_xattr sur la ligne appropriée de l'unité hébergeant le " -"répertoire de cache." - -#, python-format -msgid "" -"The given uri is not valid. Please specify a valid uri from the following " -"list of supported uri %(supported)s" -msgstr "" -"L'identificateur URI fourni n'est pas valide. Indiquez un identificateur URI " -"valide sélectionné dans la liste des identificateurs URI pris en charge : " -"%(supported)s" - -#, python-format -msgid "The incoming image is too large: %s" -msgstr "L'image entrante est trop grande : %s" - -#, python-format -msgid "The key file you specified %s does not exist" -msgstr "Le fichier de clés que vous avez spécifié %s n'existe pas" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image locations. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"La limite a été dépassée sur le nombre d'emplacements d'image autorisés. " -"Tentatives : %(attempted)s, Maximum : %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image members for this " -"image. Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"La limite a été dépassée sur le nombre de membres d'image autorisés pour " -"cette image. Tentatives : %(attempted)s, Maximum : %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"La limite a été dépassée sur le nombre de propriétés d'image autorisées. " -"Tentatives : %(attempted)s, Maximum : %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(num)s, Maximum: %(quota)s" -msgstr "" -"La limite a été dépassée sur le nombre de propriétés d'image autorisées. " -"Tentatives : %(num)s, Maximum : %(quota)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image tags. Attempted: " -"%(attempted)s, Maximum: %(maximum)s" -msgstr "" -"La limite a été dépassée sur le nombre de balises d'image autorisées. " -"Tentatives : %(attempted)s, Maximum : %(maximum)s" - -#, python-format -msgid "The location %(location)s already exists" -msgstr "L'emplacement %(location)s existe déjà" - -#, python-format -msgid "The location data has an invalid ID: %d" -msgstr "Les données d'emplacement possèdent un ID non valide : %d" - -#, python-format -msgid "" -"The metadata definition %(record_type)s with name=%(record_name)s not " -"deleted. Other records still refer to it." -msgstr "" -"La définition de métadonnées %(record_type)s avec le nom %(record_name)s n'a " -"pas été supprimée. Elle est encore associée à d'autres enregistrements." - -#, python-format -msgid "The metadata definition namespace=%(namespace_name)s already exists." -msgstr "" -"L'espace de nom %(namespace_name)s de la définition de métadonnées existe " -"déjà." - -#, python-format -msgid "" -"The metadata definition object with name=%(object_name)s was not found in " -"namespace=%(namespace_name)s." -msgstr "" -"L'objet %(object_name)s de la définition de métadonnées est introuvable dans " -"l'espace de nom %(namespace_name)s." - -#, python-format -msgid "" -"The metadata definition property with name=%(property_name)s was not found " -"in namespace=%(namespace_name)s." -msgstr "" -"La propriété %(property_name)s de la définition de métadonnées est " -"introuvable dans l'espace de nom %(namespace_name)s." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s already exists." -msgstr "" -"L'association de type de ressource de la définition de métadonnées entre " -"letype de ressource %(resource_type_name)s et l'espace de nom " -"%(namespace_name)s existe déjà." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s, was not found." -msgstr "" -"L'association de type de ressource de la définition de métadonnées entre " -"letype de ressource %(resource_type_name)s et l'espace de nom " -"%(namespace_name)s est introuvable." - -#, python-format -msgid "" -"The metadata definition resource-type with name=%(resource_type_name)s, was " -"not found." -msgstr "" -"Le type de ressource %(resource_type_name)s de la définition de métadonnées " -"est introuvable." - -#, python-format -msgid "" -"The metadata definition tag with name=%(name)s was not found in namespace=" -"%(namespace_name)s." -msgstr "" -"La balise de définition de métadonnées nommée %(name)s est introuvable dans " -"l'espace de nom %(namespace_name)s." - -msgid "The parameters required by task, JSON blob" -msgstr "Les paramètres requis par la tâche, blob JSON" - -msgid "The provided image is too large." -msgstr "L'image fournie est trop volumineuse." - -msgid "" -"The region for the authentication service. If \"use_user_token\" is not in " -"effect and using keystone auth, then region name can be specified." -msgstr "" -"Région du service d'authentification. Si \"use_user_token\" n'est pas en " -"vigueur et si vous utilisez l'authentification keystone, le nom de région " -"peut être spécifié." - -msgid "The request returned 500 Internal Server Error." -msgstr "La demande a renvoyé le message 500 Internal Server Error." - -msgid "" -"The request returned 503 Service Unavailable. This generally occurs on " -"service overload or other transient outage." -msgstr "" -"La demande a renvoyé le message 503 Service Unavailable. Cela se produit " -"généralement lors d'une surcharge de service ou de tout autre coupure " -"transitoire." - -#, python-format -msgid "" -"The request returned a 302 Multiple Choices. This generally means that you " -"have not included a version indicator in a request URI.\n" -"\n" -"The body of response returned:\n" -"%(body)s" -msgstr "" -"La demande a renvoyé un message 302 Multiple Choices. Cela signifie " -"généralement que vous n'avez pas inclus d'indicateur de version dans l'URI " -"de demande.\n" -"\n" -"Le corps de la réponse a renvoyé :\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned a 413 Request Entity Too Large. This generally means " -"that rate limiting or a quota threshold was breached.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"La demande a renvoyé un message 413 Request Entity Too Large. Cela signifie " -"généralement que le taux limite ou le seuil de quota a été dépassé.\n" -"\n" -"Corps de la réponse :\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned an unexpected status: %(status)s.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"La demande a renvoyé un statut inattendu : %(status)s.\n" -"\n" -"Corps de la réponse :\n" -"%(body)s" - -msgid "" -"The requested image has been deactivated. Image data download is forbidden." -msgstr "" -"L'image demandée a été désactivée. Le téléchargement des données image est " -"interdit." - -msgid "The result of current task, JSON blob" -msgstr "Le résultat de la tâche en cours, blob JSON" - -#, python-format -msgid "" -"The size of the data %(image_size)s will exceed the limit. %(remaining)s " -"bytes remaining." -msgstr "" -"La taille des données %(image_size)s dépassera la limite. %(remaining)s " -"octets restants." - -#, python-format -msgid "The specified member %s could not be found" -msgstr "Le membre spécifié %s est introuvable" - -#, python-format -msgid "The specified metadata object %s could not be found" -msgstr "L'objet métadonnées spécifié %s est introuvable" - -#, python-format -msgid "The specified metadata tag %s could not be found" -msgstr "La balise de métadonnées %s est introuvable" - -#, python-format -msgid "The specified namespace %s could not be found" -msgstr "L'espace de nom spécifié %s est introuvable" - -#, python-format -msgid "The specified property %s could not be found" -msgstr "La propriété spécifiée %s est introuvable" - -#, python-format -msgid "The specified resource type %s could not be found " -msgstr "Le type de ressource spécifié %s est introuvable " - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'" -msgstr "" -"L'état de l'emplacement de l'image supprimée ne peut être réglé que sur " -"'pending_delete' ou 'deleted'" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'." -msgstr "" -"L'état de l'emplacement de l'image supprimée ne peut être réglé que sur " -"'pending_delete' ou 'deleted'." - -msgid "The status of this image member" -msgstr "Statut de ce membre d'image" - -msgid "" -"The strategy to use for authentication. If \"use_user_token\" is not in " -"effect, then auth strategy can be specified." -msgstr "" -"Stratégie à utiliser pour l'authentification. Si \"use_user_token\" n'est " -"pas en vigueur, la stratégie d'authentification peut être spécifiée." - -#, python-format -msgid "" -"The target member %(member_id)s is already associated with image " -"%(image_id)s." -msgstr "Le membre cible %(member_id)s est déjà associé à l'image %(image_id)s." - -msgid "" -"The tenant name of the administrative user. If \"use_user_token\" is not in " -"effect, then admin tenant name can be specified." -msgstr "" -"Nom de locataire de l'utilisateur administrateur. Si \"use_user_token\" " -"n'est pas en vigueur, le nom de locataire de l'administrateur peut être " -"spécifié." - -msgid "The type of task represented by this content" -msgstr "Le type de tâche représenté par ce contenu" - -msgid "The unique namespace text." -msgstr "Texte unique de l'espace de nom." - -msgid "The user friendly name for the namespace. Used by UI if available." -msgstr "" -"Nom convivial de l'espace de nom. Utilisé par l'interface utilisateur si " -"disponible." - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. Error: %(ioe)s" -msgstr "" -"Problème lié à votre %(error_key_name)s %(error_filename)s. Veuillez " -"vérifier. Erreur : %(ioe)s" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. OpenSSL error: %(ce)s" -msgstr "" -"Problème lié à votre %(error_key_name)s %(error_filename)s. Veuillez " -"vérifier. Erreur OpenSSL : %(ce)s" - -#, python-format -msgid "" -"There is a problem with your key pair. Please verify that cert " -"%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" -msgstr "" -"Il y a un problème avec votre paire de clés. Vérifiez que le certificat " -"%(cert_file)s et la clé %(key_file)s correspondent. Erreur OpenSSL %(ce)s" - -msgid "There was an error configuring the client." -msgstr "Une erreur s'est produite lors de la configuration du client." - -msgid "There was an error connecting to a server" -msgstr "Une erreur s'est produite lors de la connexion à un serveur." - -msgid "" -"This operation is currently not permitted on Glance Tasks. They are auto " -"deleted after reaching the time based on their expires_at property." -msgstr "" -"Cette opération n'est actuellement pas autorisée sur les tâches Glance. " -"Elles sont supprimées automatiquement après avoir atteint l'heure définie " -"par la propriété expires_at." - -msgid "This operation is currently not permitted on Glance images details." -msgstr "" -"Cette opération n'est pas actuellement autorisée sur des détails d'images " -"Glance." - -msgid "" -"Time in hours for which a task lives after, either succeeding or failing" -msgstr "Durée de vie en heures d'une tâche suite à une réussite ou à un échec" - -msgid "Too few arguments." -msgstr "Trop peu d'arguments." - -msgid "" -"URI cannot contain more than one occurrence of a scheme.If you have " -"specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " -"you need to change it to use the swift+http:// scheme, like so: swift+http://" -"user:pass@authurl.com/v1/container/obj" -msgstr "" -"L'URI ne peut pas contenir plusieurs occurrences d'un schéma. Si vous avez " -"spécifié un URI tel que swift://user:pass@http://authurl.com/v1/container/" -"obj, vous devez le modifier pour utiliser le schéma swift+http://, par " -"exemple : swift+http://user:pass@authurl.com/v1/container/obj" - -msgid "URL to access the image file kept in external store" -msgstr "" -"URL permettant d'accéder au fichier image conservé dans le magasin externe" - -#, python-format -msgid "" -"Unable to create pid file %(pid)s. Running as non-root?\n" -"Falling back to a temp file, you can stop %(service)s service using:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" -msgstr "" -"Impossible de créer le fichier PID %(pid)s. Exécution en tant que non " -"root ?\n" -"Rétablissement vers un fichier temporaire. Vous pouvez arrêter le service " -"%(service)s avec :\n" -" %(file)s %(server)s stop --pid-file %(fb)s" - -#, python-format -msgid "Unable to filter by unknown operator '%s'." -msgstr "Filtrage impossible avec l'opérateur inconnu '%s'." - -msgid "Unable to filter on a range with a non-numeric value." -msgstr "Impossible de filtrer sur une plage avec une valeur non numérique." - -msgid "Unable to filter on a unknown operator." -msgstr "Filtrage impossible avec un opérateur inconnu." - -msgid "Unable to filter using the specified operator." -msgstr "Filtrage impossible à l'aide de l'opérateur spécifié." - -msgid "Unable to filter using the specified range." -msgstr "Impossible de filtrer à l'aide de la plage spécifiée." - -#, python-format -msgid "Unable to find '%s' in JSON Schema change" -msgstr "Impossible de trouver '%s' dans la modification du schéma JSON" - -#, python-format -msgid "" -"Unable to find `op` in JSON Schema change. It must be one of the following: " -"%(available)s." -msgstr "" -"Impossible de localiser `op` dans la modification du schéma JSON. Doit être " -"l'une des valeurs suivantes : %(available)s." - -msgid "Unable to increase file descriptor limit. Running as non-root?" -msgstr "" -"Impossible d'augmenter la limite de descripteur de fichier. Exécution en " -"tant que non root ?" - -#, python-format -msgid "" -"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" -"Got: %(e)r" -msgstr "" -"Impossible de charger %(app_name)s depuis le fichier de configuration " -"%(conf_file)s.\n" -"Résultat : %(e)r" - -#, python-format -msgid "Unable to load schema: %(reason)s" -msgstr "Impossible de charger le schéma : %(reason)s" - -#, python-format -msgid "Unable to locate paste config file for %s." -msgstr "" -"Impossible de localiser le fichier de configuration du collage pour %s." - -#, python-format -msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" -msgstr "" -"Impossible de télécharger des données image en double pour l'image " -"%(image_id)s : %(error)s" - -msgid "Unauthorized image access" -msgstr "Accès à l'image non autorisé" - -msgid "Unexpected body type. Expected list/dict." -msgstr "Type de corps inattendu. Type attendu : list/dict." - -#, python-format -msgid "Unexpected response: %s" -msgstr "Réponse inattendue : %s" - -#, python-format -msgid "Unknown auth strategy '%s'" -msgstr "Stratégie d'autorisation inconnue '%s'" - -#, python-format -msgid "Unknown command: %s" -msgstr "commande %s inconnue" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Sens de tri inconnu, doit être 'desc' ou 'asc'" - -msgid "Unrecognized JSON Schema draft version" -msgstr "Version brouillon du schéma JSON non reconnue" - -msgid "Unrecognized changes-since value" -msgstr "Valeur changes-since non reconnue" - -#, python-format -msgid "Unsupported sort_dir. Acceptable values: %s" -msgstr "sort_dir non pris en charge. Valeurs acceptables : %s" - -#, python-format -msgid "Unsupported sort_key. Acceptable values: %s" -msgstr "sort_key non pris en charge. Valeurs acceptables : %s" - -msgid "Virtual size of image in bytes" -msgstr "Taille virtuelle de l'image en octets" - -#, python-format -msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" -msgstr "" -"Attente de la fin du pid %(pid)s (%(file)s) pendant 15 secondes ; abandon en " -"cours" - -msgid "" -"When running server in SSL mode, you must specify both a cert_file and " -"key_file option value in your configuration file" -msgstr "" -"Lors de l'exécution du serveur en mode SSL, vous devez spécifier une valeur " -"d'option cert_file et key_file dans votre fichier de configuration" - -msgid "" -"Whether to pass through the user token when making requests to the registry. " -"To prevent failures with token expiration during big files upload, it is " -"recommended to set this parameter to False.If \"use_user_token\" is not in " -"effect, then admin credentials can be specified." -msgstr "" -"Transmettre le jeton utilisateur lors de demandes au registre. Pour éviter " -"des échecs dus à l'expiration du jeton lors du téléchargement de fichiers " -"volumineux, il est recommandé de définir de paramètre à 'False'.If Si " -"\"use_user_token\" n'est pas activé, des données d'identification " -"administrateur doivent être spécifiées." - -#, python-format -msgid "Wrong command structure: %s" -msgstr "Structure de commande erronée : %s" - -msgid "You are not authenticated." -msgstr "Vous n'êtes pas authentifié." - -msgid "You are not authorized to complete this action." -msgstr "Vous n'êtes pas autorisé à effectuer cette action." - -#, python-format -msgid "You are not authorized to lookup image %s." -msgstr "Vous n'êtes pas autorisé à rechercher l'image %s." - -#, python-format -msgid "You are not authorized to lookup the members of the image %s." -msgstr "Vous n'êtes pas autorisé à rechercher les membres de l'image %s." - -#, python-format -msgid "You are not permitted to create a tag in the namespace owned by '%s'" -msgstr "" -"Vous n'êtes pas autorisé à créer une balise dans l'espace de nom détenu par " -"'%s'" - -msgid "You are not permitted to create image members for the image." -msgstr "Vous n'êtes pas autorisé à créer des membres image pour l'image." - -#, python-format -msgid "You are not permitted to create images owned by '%s'." -msgstr "Vous n'êtes pas autorisé à créer des images détenues par '%s'." - -#, python-format -msgid "You are not permitted to create namespace owned by '%s'" -msgstr "Vous n'êtes pas autorisé à créer un espace de nom détenu par '%s'" - -#, python-format -msgid "You are not permitted to create object owned by '%s'" -msgstr "Vous n'êtes pas autorisé à créer un objet détenu par '%s'" - -#, python-format -msgid "You are not permitted to create property owned by '%s'" -msgstr "Vous n'êtes pas autorisé à créer une propriété détenue par '%s'" - -#, python-format -msgid "You are not permitted to create resource_type owned by '%s'" -msgstr "" -"Vous n'êtes pas autorisé à créer des types de ressource détenus par '%s'" - -#, python-format -msgid "You are not permitted to create this task with owner as: %s" -msgstr "" -"Vous n'êtes pas autorisé à créer cette tâche avec comme propriétaire : %s" - -msgid "You are not permitted to deactivate this image." -msgstr "Vous n'êtes pas autorisé à désactiver cette image." - -msgid "You are not permitted to delete this image." -msgstr "Vous n'êtes pas autorisé à supprimer cette image." - -msgid "You are not permitted to delete this meta_resource_type." -msgstr "Vous n'êtes pas autorisé à supprimer le paramètre meta_resource_type." - -msgid "You are not permitted to delete this namespace." -msgstr "Vous n'êtes pas autorisé à supprimer cet espace de nom." - -msgid "You are not permitted to delete this object." -msgstr "Vous n'êtes pas autorisé à supprimer cet objet." - -msgid "You are not permitted to delete this property." -msgstr "Vous n'êtes pas autorisé à supprimer cette propriété." - -msgid "You are not permitted to delete this tag." -msgstr "Vous n'êtes pas autorisé à supprimer cette balise." - -#, python-format -msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." -msgstr "Vous n'êtes pas autorisé à modifier '%(attr)s' sur cette %(resource)s." - -#, python-format -msgid "You are not permitted to modify '%s' on this image." -msgstr "Vous n'êtes pas autorisé à modifier '%s' sur cette image." - -msgid "You are not permitted to modify locations for this image." -msgstr "Vous n'êtes pas autorisé à modifier les emplacements pour cette image." - -msgid "You are not permitted to modify tags on this image." -msgstr "Vous n'êtes pas autorisé à modifier les balises pour cette image." - -msgid "You are not permitted to modify this image." -msgstr "Vous n'êtes pas autorisé à modifier cette image." - -msgid "You are not permitted to reactivate this image." -msgstr "Vous n'êtes pas autorisé à réactiver cette image." - -msgid "You are not permitted to set status on this task." -msgstr "Vous n'êtes pas autorisé à définir le statut pour cette tâche." - -msgid "You are not permitted to update this namespace." -msgstr "Vous n'êtes pas autorisé à mettre à jour cet espace de nom." - -msgid "You are not permitted to update this object." -msgstr "Vous n'êtes pas autorisé à mettre à jour cet objet." - -msgid "You are not permitted to update this property." -msgstr "Vous n'êtes pas autorisé à mettre à jour cette propriété." - -msgid "You are not permitted to update this tag." -msgstr "Vous n'êtes pas autorisé à mettre à jour cette balise." - -msgid "You are not permitted to upload data for this image." -msgstr "Vous n'êtes pas autorisé à télécharger des données pour cette image." - -#, python-format -msgid "You cannot add image member for %s" -msgstr "Vous ne pouvez pas ajouter le membre image pour %s" - -#, python-format -msgid "You cannot delete image member for %s" -msgstr "Vous ne pouvez pas supprimer le membre image pour %s" - -#, python-format -msgid "You cannot get image member for %s" -msgstr "Vous ne pouvez pas obtenir le membre image pour %s" - -#, python-format -msgid "You cannot update image member %s" -msgstr "Vous ne pouvez pas mettre à jour le membre image pour %s" - -msgid "You do not own this image" -msgstr "Vous n'êtes pas propriétaire de cette image" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a cert, " -"however you have failed to supply either a key_file parameter or set the " -"GLANCE_CLIENT_KEY_FILE environ variable" -msgstr "" -"Vous avez choisi d'utiliser SSL pour la connexion et avez fourni un " -"certificat, cependant, vous n'avez pas fourni de paramètre key_file ou " -"n'avez pas défini la variable d'environnement GLANCE_CLIENT_KEY_FILE" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a key, " -"however you have failed to supply either a cert_file parameter or set the " -"GLANCE_CLIENT_CERT_FILE environ variable" -msgstr "" -"Vous avez choisi d'utiliser SSL pour la connexion et avez fourni une clé, " -"cependant, vous n'avez pas fourni de paramètre cert_file ou n'avez pas " -"défini la variable d'environnement GLANCE_CLIENT_CERT_FILE" - -msgid "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" -msgstr "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" - -#, python-format -msgid "__init__() got unexpected keyword argument '%s'" -msgstr "__init__() a récupéré un argument de mot clé '%s' inattendu" - -#, python-format -msgid "" -"cannot transition from %(current)s to %(next)s in update (wanted from_state=" -"%(from)s)" -msgstr "" -"impossible d'effectuer la transition depuis %(current)s vers %(next)s dans " -"la mise à jour (voulu : from_state=%(from)s)" - -#, python-format -msgid "custom properties (%(props)s) conflict with base properties" -msgstr "" -"propriétés personnalisées (%(props)s) en conflit avec les propriétés de base" - -msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" -msgstr "" -"Les concentrateurs Eventlet 'poll' et 'selects' sont indisponibles sur cette " -"plateforme" - -msgid "is_public must be None, True, or False" -msgstr "is_public doit être None, True ou False" - -msgid "limit param must be an integer" -msgstr "le paramètre limit doit être un entier" - -msgid "limit param must be positive" -msgstr "le paramètre limit doit être positif" - -msgid "md5 hash of image contents." -msgstr "Hachage md5 du contenu d'image." - -#, python-format -msgid "new_image() got unexpected keywords %s" -msgstr "new_image() a récupéré des mots-clés %s inattendus" - -msgid "protected must be True, or False" -msgstr "protected doit être True ou False" - -#, python-format -msgid "unable to launch %(serv)s. Got error: %(e)s" -msgstr "impossible de lancer %(serv)s. Erreur : %(e)s" - -#, python-format -msgid "x-openstack-request-id is too long, max size %s" -msgstr "x-openstack-request-id est trop long, sa taille maximale est de %s" diff --git a/glance/locale/it/LC_MESSAGES/glance.po b/glance/locale/it/LC_MESSAGES/glance.po deleted file mode 100644 index e8718775..00000000 --- a/glance/locale/it/LC_MESSAGES/glance.po +++ /dev/null @@ -1,2167 +0,0 @@ -# Translations template for glance. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the glance project. -# -# Translators: -# Andreas Jaeger , 2016. #zanata -# KATO Tomoyuki , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: glance 15.0.0.0b3.dev29\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-06-23 20:54+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-03 01:43+0000\n" -"Last-Translator: KATO Tomoyuki \n" -"Language: it\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: Italian\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "%(cls)s exception was raised in the last rpc call: %(val)s" -msgstr "Eccezione %(cls)s generata nell'ultima chiamata rpc: %(val)s" - -#, python-format -msgid "%(m_id)s not found in the member list of the image %(i_id)s." -msgstr "%(m_id)s non trovato nell'elenco di membri dell'immagine %(i_id)s." - -#, python-format -msgid "%(serv)s (pid %(pid)s) is running..." -msgstr "%(serv)s (pid %(pid)s) in esecuzione..." - -#, python-format -msgid "%(serv)s appears to already be running: %(pid)s" -msgstr "%(serv)s sembra essere già in esecuzione: %(pid)s" - -#, python-format -msgid "" -"%(strategy)s is registered as a module twice. %(module)s is not being used." -msgstr "" -"%(strategy)s è registrato come modulo due volte. %(module)s non viene del " -"provider." - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Could not load the " -"filesystem store" -msgstr "" -"%(task_id)s di %(task_type)s non configurato correttamente. Impossibile " -"caricare l'archivio filesystem" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Missing work dir: " -"%(work_dir)s" -msgstr "" -"%(task_id)s di %(task_type)s non configurato correttamente. Directory di " -"lavoro mancante: %(work_dir)s" - -#, python-format -msgid "%(verb)sing %(serv)s" -msgstr "%(verb)sing %(serv)s" - -#, python-format -msgid "%(verb)sing %(serv)s with %(conf)s" -msgstr "%(verb)s %(serv)s con %(conf)s" - -#, python-format -msgid "" -"%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " -"address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " -"separately from the port (i.e., \"[fe80::a:b:c]:9876\")." -msgstr "" -"%s Specificare una coppia host:port in cui host è un indirizzo IPv4, un " -"indirizzo IPv6 nome host o FQDN. Se si utilizza un indirizzo IPv6 " -"racchiuderlo in parentesi separatamente dalla porta (ad esempio, \"[fe80::a:" -"b:c]:9876\")." - -#, python-format -msgid "%s can't contain 4 byte unicode characters." -msgstr "%s non può contenere 4 byte di caratteri unicode." - -#, python-format -msgid "%s is already stopped" -msgstr "%s è già stato arrestato" - -#, python-format -msgid "%s is stopped" -msgstr "%s è stato arrestato" - -msgid "" -"--os_auth_url option or OS_AUTH_URL environment variable required when " -"keystone authentication strategy is enabled\n" -msgstr "" -"l'opzione --os_auth_url o la variabile d'ambiente OS_AUTH_URL sono " -"obbligatori quando è abilitato il modo di autenticazione keystone\n" - -msgid "A body is not expected with this request." -msgstr "Un corpo non è previsto con questa richiesta." - -#, python-format -msgid "" -"A metadata definition object with name=%(object_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Un oggetto della definizione di metadati con nome=%(object_name)s già " -"esiste nello nello spazio dei nomi=%(namespace_name)s." - -#, python-format -msgid "" -"A metadata definition property with name=%(property_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Una proprietà della definizione di metadati con nome=%(property_name)s già " -"esiste nello spazio dei nomi=%(namespace_name)s." - -#, python-format -msgid "" -"A metadata definition resource-type with name=%(resource_type_name)s already " -"exists." -msgstr "" -"Un tipo-risorsa della definizione di metadati con nome=" -"%(resource_type_name)s già esiste." - -msgid "A set of URLs to access the image file kept in external store" -msgstr "" -"Un insieme di URL per accedere al file di immagini conservato nell'archivio " -"esterno" - -msgid "Amount of disk space (in GB) required to boot image." -msgstr "Quantità di spazio su disco (in GB) richiesto per l'immagine di avvio." - -msgid "Amount of ram (in MB) required to boot image." -msgstr "Quantità di ram (in MB) richiesta per l'immagine di avvio." - -msgid "An identifier for the image" -msgstr "Un identificativo per l'immagine" - -msgid "An identifier for the image member (tenantId)" -msgstr "Un identificativo per il membro dell'immagine (tenantId)" - -msgid "An identifier for the owner of this task" -msgstr "Un identificativo del proprietario di questa attività" - -msgid "An identifier for the task" -msgstr "Un identificativo per l'attività" - -msgid "An image file url" -msgstr "Un URL al file di immagini" - -msgid "An image schema url" -msgstr "Un URL allo schema di immagini" - -msgid "An image self url" -msgstr "Un URL personale all'immagine" - -#, python-format -msgid "An image with identifier %s already exists" -msgstr "Un'immagine con identificativo %s già esiste" - -msgid "An import task exception occurred" -msgstr "Si è verificata un'eccezione attività di importazione" - -msgid "An object with the same identifier already exists." -msgstr "Già esiste un oggetto con lo stesso identificativo." - -msgid "An object with the same identifier is currently being operated on." -msgstr "Un oggetto con lo stesso identificativo è attualmente in uso." - -msgid "An object with the specified identifier was not found." -msgstr "Impossibile trovare un oggetto con l'identificativo specificato." - -msgid "An unknown exception occurred" -msgstr "Si è verificata un'eccezione sconosciuta" - -msgid "An unknown task exception occurred" -msgstr "Si è verificata un'eccezione attività sconosciuta" - -#, python-format -msgid "Attempt to upload duplicate image: %s" -msgstr "Tentativo di caricare un duplicato di immagine: %s" - -msgid "Attempted to update Location field for an image not in queued status." -msgstr "" -"Si è tentato di aggiornare il campo Ubicazione per un'immagine che non si " -"trova nello stato accodato." - -#, python-format -msgid "Attribute '%(property)s' is read-only." -msgstr "Attributo '%(property)s' è di sola lettura." - -#, python-format -msgid "Attribute '%(property)s' is reserved." -msgstr "L'attributo '%(property)s' è riservato." - -#, python-format -msgid "Attribute '%s' is read-only." -msgstr "Attributo '%s' è di sola lettura." - -#, python-format -msgid "Attribute '%s' is reserved." -msgstr "L'attributo '%s' è riservato." - -msgid "Attribute container_format can be only replaced for a queued image." -msgstr "" -"L'attributo container_format può essere sostituito solo per un'immagine " -"nella coda." - -msgid "Attribute disk_format can be only replaced for a queued image." -msgstr "" -"L'attributo disk_format può essere sostituito solo per un'immagine nella " -"coda." - -#, python-format -msgid "Auth service at URL %(url)s not found." -msgstr "Servizio di autenticazione all'URL %(url)s non trovato." - -#, python-format -msgid "" -"Authentication error - the token may have expired during file upload. " -"Deleting image data for %s." -msgstr "" -"Errore di autenticazione - il token potrebbe essere scaduto durante il " -"caricamento del file. Eliminazione dei dati dell'immagine per %s." - -msgid "Authorization failed." -msgstr "Autorizzazione non riuscita." - -msgid "Available categories:" -msgstr "Categorie disponibili:" - -#, python-format -msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." -msgstr "" -"Formato filtro di query \"%s\" errato. Utilizzare la notazione ISO 8601 " -"DateTime." - -#, python-format -msgid "Bad Command: %s" -msgstr "Comando non corretto: %s" - -#, python-format -msgid "Bad header: %(header_name)s" -msgstr "Intestazione non valida: %(header_name)s" - -#, python-format -msgid "Bad value passed to filter %(filter)s got %(val)s" -msgstr "Il valore non valido fornito al filtro %(filter)s ha riportato %(val)s" - -#, python-format -msgid "Badly formed S3 URI: %(uri)s" -msgstr "URI S3 formato in modo non corretto: %(uri)s" - -#, python-format -msgid "Badly formed credentials '%(creds)s' in Swift URI" -msgstr "Credenziali con formato non corretto %(creds)s' nell'URI Swift" - -msgid "Badly formed credentials in Swift URI." -msgstr "Credenziali formate in modo non corretto nell'URI Swift." - -msgid "Body expected in request." -msgstr "Corpo previsto nella richiesta." - -msgid "Cannot be a negative value" -msgstr "Non può essere un valore negativo" - -msgid "Cannot be a negative value." -msgstr "Non può essere un valore negativo." - -#, python-format -msgid "Cannot convert image %(key)s '%(value)s' to an integer." -msgstr "" -"Impossibile convertire %(key)s dell'immagine '%(value)s' in un numero intero." - -msgid "Cannot remove last location in the image." -msgstr "Impossibile rimuovere l'ultima ubicazione nell'immagine." - -#, python-format -msgid "Cannot save data for image %(image_id)s: %(error)s" -msgstr "Impossibile salvare i dati per l'immagine %(image_id)s: %(error)s" - -msgid "Cannot set locations to empty list." -msgstr "Impossibile impostare le ubicazione nell'elenco vuoto." - -msgid "Cannot upload to an unqueued image" -msgstr "Impossibile caricare un'immagine non accodata" - -#, python-format -msgid "Checksum verification failed. Aborted caching of image '%s'." -msgstr "" -"Verifica checksum non riuscita. È stata interrotta la memorizzazione nella " -"cache dell'immagine '%s'." - -msgid "Client disconnected before sending all data to backend" -msgstr "Client disconnesso prima di inviare tutti i dati a backend" - -msgid "Command not found" -msgstr "Comando non trovato" - -msgid "Configuration option was not valid" -msgstr "Opzione di configurazione non valida" - -#, python-format -msgid "Connect error/bad request to Auth service at URL %(url)s." -msgstr "" -"Connetti richiesta/non corretta o in errore per il servizio di " -"autenticazione all'URL %(url)s." - -#, python-format -msgid "Constructed URL: %s" -msgstr "URL costruita: %s" - -msgid "Container format is not specified." -msgstr "Formato contenitore non specificato. " - -msgid "Content-Type must be application/octet-stream" -msgstr "Tipo-contenuto deve essere application/octet-stream" - -#, python-format -msgid "Corrupt image download for image %(image_id)s" -msgstr "" -"Esecuzione del download immagine danneggiato per l'immagine %(image_id)s" - -#, python-format -msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" -msgstr "" -"Impossibile collegarsi a %(host)s:%(port)s dopo aver tentato per 30 secondi" - -msgid "Could not find OVF file in OVA archive file." -msgstr "Impossibile trovare il file OVD nel file di archivio OVA." - -#, python-format -msgid "Could not find metadata object %s" -msgstr "Impossibile trovare l'oggetto di metadati %s" - -#, python-format -msgid "Could not find metadata tag %s" -msgstr "Impossibile trovare il tag di metadati %s" - -#, python-format -msgid "Could not find namespace %s" -msgstr "Impossibile trovare lo spazio dei nomi %s" - -#, python-format -msgid "Could not find property %s" -msgstr "Impossibile trovare la proprietà %s" - -msgid "Could not find required configuration option" -msgstr "Impossibile trovare l'opzione di configurazione richiesta" - -#, python-format -msgid "Could not find task %s" -msgstr "Impossibile trovare l'attività %s" - -#, python-format -msgid "Could not update image: %s" -msgstr "Impossibile aggiornare l'immagine: %s" - -msgid "Currently, OVA packages containing multiple disk are not supported." -msgstr "" -"Attualmente, i pacchetti OVA che contengono più dischi non sono supportati." - -#, python-format -msgid "Data for image_id not found: %s" -msgstr "Dati per image_id non trovati: %s" - -msgid "Data supplied was not valid." -msgstr "I dati forniti non erano validi." - -msgid "Date and time of image member creation" -msgstr "Data e ora di creazione del membro dell'immagine" - -msgid "Date and time of image registration" -msgstr "Data e ora della registrazione dell'immagine" - -msgid "Date and time of last modification of image member" -msgstr "Data e ora dell'ultima modifica del membro dell'immagine" - -msgid "Date and time of namespace creation" -msgstr "Data ed ora della creazione dello spazio dei nomi" - -msgid "Date and time of object creation" -msgstr "Data ed ora della creazione dell'oggetto" - -msgid "Date and time of resource type association" -msgstr "Data ed ora dell'associazione del tipo di risorsa" - -msgid "Date and time of tag creation" -msgstr "Data ed ora della creazione del tag" - -msgid "Date and time of the last image modification" -msgstr "Data e ora dell'ultima modifica dell'immagine" - -msgid "Date and time of the last namespace modification" -msgstr "Data ed ora dell'ultima modifica allo spazio dei nomi" - -msgid "Date and time of the last object modification" -msgstr "Data ed ora dell'ultima modifica all'oggetto" - -msgid "Date and time of the last resource type association modification" -msgstr "Data ed ora dell'ultima modifica all'associazione del tipo di risorsa" - -msgid "Date and time of the last tag modification" -msgstr "Data ed ora dell'ultima modifica al tag" - -msgid "Datetime when this resource was created" -msgstr "Data e ora in cui questa risorsa è stata creata" - -msgid "Datetime when this resource was updated" -msgstr "Data e ora in cui questa risorsa è stata aggiornata" - -msgid "Datetime when this resource would be subject to removal" -msgstr "Data e ora in cui questa risorsa verrà rimossa" - -#, python-format -msgid "Denying attempt to upload image because it exceeds the quota: %s" -msgstr "" -"Rifiutato il tentativo di caricare l'immagine perché supera la quota: %s" - -#, python-format -msgid "Denying attempt to upload image larger than %d bytes." -msgstr "Divieto del tentativo di caricare un'immagine più grande di %d byte." - -msgid "Descriptive name for the image" -msgstr "Nome descrittivo per l'immagine" - -msgid "Disk format is not specified." -msgstr "Formato disco non specificato. " - -#, python-format -msgid "" -"Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" -msgstr "" -"Impossibile configurare il driver %(driver_name)s correttamente. Motivo: " -"%(reason)s" - -msgid "" -"Error decoding your request. Either the URL or the request body contained " -"characters that could not be decoded by Glance" -msgstr "" -"Errore di decodifica della richiesta. L'URL o il corpo della richiesta " -"contengono caratteri che non possono essere decodificati da Glance" - -#, python-format -msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" -msgstr "" -"Errore durante il recupero dei membri immagine %(image_id)s: %(inner_msg)s" - -msgid "Error in store configuration. Adding images to store is disabled." -msgstr "" -"Errore nella configurazione dell'archivio. L'aggiunta di immagini a questo " -"archivio non è consentita." - -msgid "Expected a member in the form: {\"member\": \"image_id\"}" -msgstr "Previsto un membro nel formato: {\"member\": \"image_id\"}" - -msgid "Expected a status in the form: {\"status\": \"status\"}" -msgstr "Previsto uno stato nel formato: {\"status\": \"status\"}" - -msgid "External source should not be empty" -msgstr "L'origine esterna non deve essere vuota" - -#, python-format -msgid "External sources are not supported: '%s'" -msgstr "Le origini esterne non sono supportate: '%s'" - -#, python-format -msgid "Failed to activate image. Got error: %s" -msgstr "Attivazione immagine non riuscita. Ricevuto errore: %s" - -#, python-format -msgid "Failed to add image metadata. Got error: %s" -msgstr "Impossibile aggiungere metadati all'immagine. Ricevuto errore: %s" - -#, python-format -msgid "Failed to find image %(image_id)s to delete" -msgstr "Impossibile trovare l'immagine %(image_id)s da eliminare" - -#, python-format -msgid "Failed to find image to delete: %s" -msgstr "Impossibile trovare l'immagine da eliminare: %s" - -#, python-format -msgid "Failed to find image to update: %s" -msgstr "Impossibile trovare l'immagine da aggiornare: %s" - -#, python-format -msgid "Failed to find resource type %(resourcetype)s to delete" -msgstr "Impossibile trovare il tipo di risorsa %(resourcetype)s da eliminare" - -#, python-format -msgid "Failed to initialize the image cache database. Got error: %s" -msgstr "" -"Impossibile inizializzare il database cache immagini. Errore ricevuto: %s" - -#, python-format -msgid "Failed to read %s from config" -msgstr "Impossibile leggere %s dalla configurazione" - -#, python-format -msgid "Failed to reserve image. Got error: %s" -msgstr "Impossibile prenotare l'immagine. Errore: %s" - -#, python-format -msgid "Failed to update image metadata. Got error: %s" -msgstr "Impossibile aggiornare i metadati immagine. Errore: %s" - -#, python-format -msgid "Failed to upload image %s" -msgstr "Caricamento immagine %s non riuscito" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to HTTP error: " -"%(error)s" -msgstr "" -"Impossibile caricare i dati dell'immagine %(image_id)s a causa di un errore " -"HTTP: %(error)s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to internal error: " -"%(error)s" -msgstr "" -"Impossibile caricare i dati dell'immagine %(image_id)s a causa di un errore " -"interno: %(error)s" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"Il file %(path)s ha un file di backup %(bfile)s non valido, operazione " -"interrotta." - -msgid "" -"File based imports are not allowed. Please use a non-local source of image " -"data." -msgstr "" -"Le importazioni basata su file non sono consentite. Utilizzare un'origine " -"dati dell'immagine non locale." - -msgid "Forbidden image access" -msgstr "Accesso all'immagine vietato" - -#, python-format -msgid "Forbidden to delete a %s image." -msgstr "Divieto di eliminare un'immagine %s." - -#, python-format -msgid "Forbidden to delete image: %s" -msgstr "Divieto di eliminare l'immagine: %s" - -#, python-format -msgid "Forbidden to modify '%(key)s' of %(status)s image." -msgstr "Divieto di modificare '%(key)s' dell'immagine %(status)s." - -#, python-format -msgid "Forbidden to modify '%s' of image." -msgstr "Divieto di modificare '%s' dell'immagine." - -msgid "Forbidden to reserve image." -msgstr "Vietato prenotare l'immagine." - -msgid "Forbidden to update deleted image." -msgstr "Divieto di aggiornare l'immagine eliminata." - -#, python-format -msgid "Forbidden to update image: %s" -msgstr "Divieto di aggiornare l'immagine: %s" - -#, python-format -msgid "Forbidden upload attempt: %s" -msgstr "Vietato tentativo di caricamento: %s" - -#, python-format -msgid "Forbidding request, metadata definition namespace=%s is not visible." -msgstr "" -"Richiesta vietata, lo spazio dei nomi della definizione di metadati =%s non " -"è visibile." - -#, python-format -msgid "Forbidding request, task %s is not visible" -msgstr "Richiesta vietata, l'attività %s non è visibile" - -msgid "Format of the container" -msgstr "Formato del contenitore" - -msgid "Format of the disk" -msgstr "Formato del disco" - -#, python-format -msgid "Host \"%s\" is not valid." -msgstr "L'host \"%s\" non è valido." - -#, python-format -msgid "Host and port \"%s\" is not valid." -msgstr "Host o porta \"%s\" non è valido." - -msgid "" -"Human-readable informative message only included when appropriate (usually " -"on failure)" -msgstr "" -"I messaggi informativi leggibili dall'utente sono inclusi solo se necessario " -"(di solito in caso di errore)" - -msgid "If true, image will not be deletable." -msgstr "Se true, l'immagine non sarà eliminabile." - -msgid "If true, namespace will not be deletable." -msgstr "Se impostato su true, lo spazio dei nomi non sarà eliminabile." - -#, python-format -msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" -msgstr "L'immagine %(id)s non può essere eliminata perché è in uso: %(exc)s" - -#, python-format -msgid "Image %(id)s not found" -msgstr "Immagine %(id)s non trovata" - -#, python-format -msgid "" -"Image %(image_id)s could not be found after upload. The image may have been " -"deleted during the upload: %(error)s" -msgstr "" -"Impossibile trovare l'immagine %(image_id)s dopo il caricamento. L'immagine " -"potrebbe essere stata eliminata durante il caricamento: %(error)s" - -#, python-format -msgid "Image %(image_id)s is protected and cannot be deleted." -msgstr "L'immagine %(image_id)s è protetta e non può essere eliminata." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload, cleaning up the chunks uploaded." -msgstr "" -"Impossibile trovare l'immagine %s dopo il caricamento. L'immagine potrebbe " -"essere stata eliminata durante il caricamento. Eliminazione delle porzioni " -"caricate." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload." -msgstr "" -"Impossibile trovare l'immagine %s dopo il caricamento. L'immagine potrebbe " -"essere stata eliminata durante il caricamento." - -#, python-format -msgid "Image %s is deactivated" -msgstr "L'immagine %s è disattivata" - -#, python-format -msgid "Image %s is not active" -msgstr "L'immagine %s non è attiva" - -#, python-format -msgid "Image %s not found." -msgstr "Immagine %s non trovata." - -#, python-format -msgid "Image exceeds the storage quota: %s" -msgstr "L'immagine supera la quota di memoria: %s" - -msgid "Image id is required." -msgstr "ID immagine obbligatorio." - -msgid "Image is protected" -msgstr "L'immagine è protetta" - -#, python-format -msgid "Image member limit exceeded for image %(id)s: %(e)s:" -msgstr "" -"Superato il limite del membro dell'immagine per l'immagine %(id)s: %(e)s:" - -#, python-format -msgid "Image name too long: %d" -msgstr "Il nome dell'immagine è troppo lungo: %d" - -msgid "Image operation conflicts" -msgstr "L'operazione dell'immagine è in conflitto" - -#, python-format -msgid "" -"Image status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"Il passaggio di stato dell'immagine da %(cur_status)s a %(new_status)s non è " -"consentito" - -#, python-format -msgid "Image storage media is full: %s" -msgstr "Il supporto di memorizzazione dell'immagine è pieno: %s" - -#, python-format -msgid "Image tag limit exceeded for image %(id)s: %(e)s:" -msgstr "Superato il limite di tag dell'immagine per l'immagine %(id)s: %(e)s:" - -#, python-format -msgid "Image upload problem: %s" -msgstr "Problemi nel caricamento dell'immagine: %s" - -#, python-format -msgid "Image with identifier %s already exists!" -msgstr "Immagine con identificativo %s già esiste!" - -#, python-format -msgid "Image with identifier %s has been deleted." -msgstr "L'immagine con identificativo %s è stata eliminata." - -#, python-format -msgid "Image with identifier %s not found" -msgstr "Impossibile trovare l'immagine con identificativo %s" - -#, python-format -msgid "Image with the given id %(image_id)s was not found" -msgstr "L'immagine con l'id fornito %(image_id)s non è stata trovata" - -#, python-format -msgid "" -"Incorrect auth strategy, expected \"%(expected)s\" but received " -"\"%(received)s\"" -msgstr "" -"Strategia di autenticazione errata, previsto \"%(expected)s\" ma ricevuto " -"\"%(received)s\"" - -#, python-format -msgid "Incorrect request: %s" -msgstr "Richiesta non corretta: %s" - -#, python-format -msgid "Input does not contain '%(key)s' field" -msgstr "L'input non contiene il campo '%(key)s'" - -#, python-format -msgid "Insufficient permissions on image storage media: %s" -msgstr "" -"Autorizzazioni insufficienti sul supporto di memorizzazione immagini: %s" - -#, python-format -msgid "Invalid JSON pointer for this resource: '/%s'" -msgstr "Puntatore JSON non valido per questa risorsa: '/%s'" - -#, python-format -msgid "Invalid checksum '%s': can't exceed 32 characters" -msgstr "Checksum non valido '%s': non può superare 32 caratteri " - -msgid "Invalid configuration in glance-swift conf file." -msgstr "Configurazione nel file di configurazione glance-swift non valida." - -msgid "Invalid configuration in property protection file." -msgstr "Configurazione non valida nel file di protezione della proprietà." - -#, python-format -msgid "Invalid container format '%s' for image." -msgstr "Formato del contenitore '%s' non valido per l'immagine." - -#, python-format -msgid "Invalid content type %(content_type)s" -msgstr "Tipo contenuto non valido %(content_type)s" - -#, python-format -msgid "Invalid disk format '%s' for image." -msgstr "Formato del disco '%s' non valido per l'immagine." - -#, python-format -msgid "Invalid filter value %s. The quote is not closed." -msgstr "Valore filtro non valido %s. Le virgolette non sono chiuse." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma after closing quotation mark." -msgstr "" -"Valore filtro non valido %s. Non è presente una virgola prima delle " -"virgolette di chiusura." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma before opening quotation mark." -msgstr "" -"Valore filtro non valido %s. Non è presente una virgola prima delle " -"virgolette di apertura." - -msgid "Invalid image id format" -msgstr "Formato ID immagine non valido" - -msgid "Invalid location" -msgstr "Ubicazione non valida" - -#, python-format -msgid "Invalid location %s" -msgstr "Ubicazione non valida %s" - -#, python-format -msgid "Invalid location: %s" -msgstr "Ubicazione non valida: %s" - -#, python-format -msgid "" -"Invalid location_strategy option: %(name)s. The valid strategy option(s) " -"is(are): %(strategies)s" -msgstr "" -"Opzione location_strategy non valida: %(name)s. Le opzioni strategia valide " -"sono: %(strategies)s" - -msgid "Invalid locations" -msgstr "Ubicazioni non valide" - -#, python-format -msgid "Invalid locations: %s" -msgstr "Ubicazioni non valide: %s" - -msgid "Invalid marker format" -msgstr "Formato indicatore non valido" - -msgid "Invalid marker. Image could not be found." -msgstr "Indicatore non valido. Impossibile trovare l'immagine." - -#, python-format -msgid "Invalid membership association: %s" -msgstr "Associazione di appartenenza non valida: %s" - -msgid "" -"Invalid mix of disk and container formats. When setting a disk or container " -"format to one of 'aki', 'ari', or 'ami', the container and disk formats must " -"match." -msgstr "" -"Combinazione di formati di disco e contenitore non valida. Quando si imposta " -"un formato disco o contenitore in uno dei seguenti 'aki', 'ari' o 'ami', i " -"formati contenitore e disco devono corrispondere." - -#, python-format -msgid "" -"Invalid operation: `%(op)s`. It must be one of the following: %(available)s." -msgstr "" -"Operazione non valida: `%(op)s`. Deve essere uno dei seguenti: %(available)s." - -msgid "Invalid position for adding a location." -msgstr "Posizione non valida per l'aggiunta di una ubicazione." - -msgid "Invalid position for removing a location." -msgstr "Posizione non valida per la rimozione di una ubicazione." - -msgid "Invalid service catalog json." -msgstr "json del catalogo del servizio non è valido." - -#, python-format -msgid "Invalid sort direction: %s" -msgstr "Direzione ordinamento non valida: %s" - -#, python-format -msgid "" -"Invalid sort key: %(sort_key)s. It must be one of the following: " -"%(available)s." -msgstr "" -"Chiave di ordinamento non valida: %(sort_key)s. Deve essere una delle " -"seguenti: %(available)s." - -#, python-format -msgid "Invalid status value: %s" -msgstr "Valore di stato non valido: %s" - -#, python-format -msgid "Invalid status: %s" -msgstr "Stato non valido: %s" - -#, python-format -msgid "Invalid time format for %s." -msgstr "Formato ora non valido per %s." - -#, python-format -msgid "Invalid type value: %s" -msgstr "Valore di tipo non valido: %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition namespace " -"with the same name of %s" -msgstr "" -"Aggiornamento non valido. Potrebbe generare uno spazio dei nomi della " -"definizione di metadati duplicato con lo stesso nome di %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Aggiornamento non valido. Potrebbe generare un oggetto della definizione di " -"metadati duplicato con lo stesso nome=%(name)s nello spazio dei nomi" -"%(namespace_name)s." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Aggiornamento non valido. Potrebbe generare un oggetto della definizione di " -"metadati duplicato con lo stesso nome=%(name)s nello spazio dei nomi" -"%(namespace_name)s." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition property " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Aggiornamento non valido. Potrebbe generare uno spazio dei nomi della " -"definizione di metadati duplicato con lo stesso nome=%(name)s nello spazio " -"dei nomi=%(namespace_name)s." - -#, python-format -msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" -msgstr "" -"Valore '%(value)s' non valido per il parametro '%(param)s': %(extra_msg)s" - -#, python-format -msgid "Invalid value for option %(option)s: %(value)s" -msgstr "Valore non valido per l'opzione %(option)s: %(value)s" - -#, python-format -msgid "Invalid visibility value: %s" -msgstr "Valore visibilità non valido: %s" - -msgid "It's invalid to provide multiple image sources." -msgstr "Non è valido per fornire più origini delle immagini." - -msgid "It's not allowed to add locations if locations are invisible." -msgstr "" -"Non è consentito aggiungere ubicazione se le ubicazioni sono invisibili." - -msgid "It's not allowed to remove locations if locations are invisible." -msgstr "" -"Non è consentito rimuovere ubicazioni se le ubicazioni sono invisibili." - -msgid "It's not allowed to update locations if locations are invisible." -msgstr "Non è consentito caricare ubicazioni se le ubicazioni sono invisibili." - -msgid "List of strings related to the image" -msgstr "Elenco di stringhe relative all'immagine" - -msgid "Malformed JSON in request body." -msgstr "JSON non corretto nel corpo della richiesta." - -msgid "Maximal age is count of days since epoch." -msgstr "L'età massima è il numero di giorni dal periodo." - -#, python-format -msgid "Maximum redirects (%(redirects)s) was exceeded." -msgstr "Il numero massimo di rendirizzamenti (%(redirects)s) è stato superato." - -#, python-format -msgid "Member %(member_id)s is duplicated for image %(image_id)s" -msgstr "Il membro %(member_id)s è il duplicato dell'immagine %(image_id)s" - -msgid "Member can't be empty" -msgstr "Il membro non può essere vuoto" - -msgid "Member to be added not specified" -msgstr "Membro da aggiungere non specificato" - -msgid "Membership could not be found." -msgstr "Impossibile trovare l'appartenenza." - -#, python-format -msgid "" -"Metadata definition namespace %(namespace)s is protected and cannot be " -"deleted." -msgstr "" -"Lo spazio dei nomi della definizione di metadati %(namespace)s è protetto e " -"non è possibile eliminarlo." - -#, python-format -msgid "Metadata definition namespace not found for id=%s" -msgstr "" -"Lo spazio dei nomi della definizione dei metadati per l'id=%s non è stato " -"trovato" - -#, python-format -msgid "" -"Metadata definition object %(object_name)s is protected and cannot be " -"deleted." -msgstr "" -"L'oggetto di definizione di metadati %(object_name)s è protetto e non è " -"possibile eliminarlo." - -#, python-format -msgid "Metadata definition object not found for id=%s" -msgstr "" -"L'oggetto della definizione dei metadati per l'id=%s non è stato trovato" - -#, python-format -msgid "" -"Metadata definition property %(property_name)s is protected and cannot be " -"deleted." -msgstr "" -"La proprietà della definizione di metadati %(property_name)s è protetta e " -"non è possibile eliminarlo." - -#, python-format -msgid "Metadata definition property not found for id=%s" -msgstr "" -"La proprietà della definizione dei metadati per l'id=%s non è stata trovata" - -#, python-format -msgid "" -"Metadata definition resource-type %(resource_type_name)s is a seeded-system " -"type and cannot be deleted." -msgstr "" -"Il tipo-risorsa della definizione di metadati %(resource_type_name)s è un " -"tipo inserito dalsistema e non è possibile eliminarlo." - -#, python-format -msgid "" -"Metadata definition resource-type-association %(resource_type)s is protected " -"and cannot be deleted." -msgstr "" -"L'associazione-tipo-risorsa della definizione di metadati %(resource_type)s " -"è protetta e non può essere eliminata." - -#, python-format -msgid "" -"Metadata definition tag %(tag_name)s is protected and cannot be deleted." -msgstr "" -"Il tag di definizione dei metadati %(tag_name)s è protetto e non può essere " -"eliminato." - -#, python-format -msgid "Metadata definition tag not found for id=%s" -msgstr "Il tag di definizione dei metadati per l'id=%s non è stato trovato" - -msgid "Minimal rows limit is 1." -msgstr "Il limite di righe minimo è 1." - -#, python-format -msgid "Missing required credential: %(required)s" -msgstr "Credenziale richiesta mancante: %(required)s" - -#, python-format -msgid "" -"Multiple 'image' service matches for region %(region)s. This generally means " -"that a region is required and you have not supplied one." -msgstr "" -"Il servizio 'immagine' multipla corrisponde nella regione %(region)s. Questo " -"in genere significa che una regione è obbligatoria e non ne è stata fornita " -"una." - -msgid "No authenticated user" -msgstr "Nessun utente autenticato" - -#, python-format -msgid "No image found with ID %s" -msgstr "Nessuna immagine trovata con ID %s" - -#, python-format -msgid "No location found with ID %(loc)s from image %(img)s" -msgstr "" -"Non è stata trovata nessuna ubicazione con ID %(loc)s dall'immagine %(img)s" - -msgid "No permission to share that image" -msgstr "Nessuna autorizzazione per condividere tale immagine" - -#, python-format -msgid "Not allowed to create members for image %s." -msgstr "Non è consentito creare membri per l'immagine %s." - -#, python-format -msgid "Not allowed to deactivate image in status '%s'" -msgstr "Disattivazione dell'immagine in stato '%s' non consentita" - -#, python-format -msgid "Not allowed to delete members for image %s." -msgstr "Non è consentito eliminare i membri dell'immagine %s." - -#, python-format -msgid "Not allowed to delete tags for image %s." -msgstr "Non è consentito eliminare i tag dell'immagine %s." - -#, python-format -msgid "Not allowed to list members for image %s." -msgstr "Non è consentito elencare i membri dell'immagine %s." - -#, python-format -msgid "Not allowed to reactivate image in status '%s'" -msgstr "Riattivazione dell'immagine in stato '%s' non consentita" - -#, python-format -msgid "Not allowed to update members for image %s." -msgstr "Non è consentito aggiornare i membri dell'immagine %s." - -#, python-format -msgid "Not allowed to update tags for image %s." -msgstr "Non è consentito aggiornare i tag dell'immagine %s." - -#, python-format -msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" -msgstr "" -"Non è consentito caricare i dati dell'immagine per l'immagine %(image_id)s: " -"%(error)s" - -msgid "Number of sort dirs does not match the number of sort keys" -msgstr "" -"Il numero di directory di ordinamento non corrisponde al numero di chiavi di " -"ordinamento" - -msgid "OVA extract is limited to admin" -msgstr "L'estrazione OVA è limitata all'amministratore" - -msgid "Old and new sorting syntax cannot be combined" -msgstr "Impossibile combinare la nuova e la precedente sintassi di ordinamento" - -#, python-format -msgid "Operation \"%s\" requires a member named \"value\"." -msgstr "L'operazione \"%s\" richiede un membro denominato \"value\"." - -msgid "" -"Operation objects must contain exactly one member named \"add\", \"remove\", " -"or \"replace\"." -msgstr "" -"Gli oggetti dell'operazione devono contenere esattamente un membro " -"denominato \"add\", \"remove\" o \"replace\"." - -msgid "" -"Operation objects must contain only one member named \"add\", \"remove\", or " -"\"replace\"." -msgstr "" -"Gli oggetti dell'operazione devono contenere solo un membro denominato \"add" -"\", \" remove \" o \"replace\"." - -msgid "Operations must be JSON objects." -msgstr "Le operazioni devono essere oggetti JSON." - -#, python-format -msgid "Original locations is not empty: %s" -msgstr "Le ubicazioni originali non sono vuote: %s" - -msgid "Owner can't be updated by non admin." -msgstr "Il proprietario non può essere aggiornato da un non admin." - -msgid "Owner must be specified to create a tag." -msgstr "Il proprietario deve specificare per creare un tag." - -msgid "Owner of the image" -msgstr "Proprietario dell'immagine" - -msgid "Owner of the namespace." -msgstr "Proprietario dello spazio dei nomi." - -msgid "Param values can't contain 4 byte unicode." -msgstr "I valori dei parametri non possono contenere 4 byte unicode." - -#, python-format -msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." -msgstr "" -"Il puntatore `%s` contiene \"~\" che non fa parte di una sequenza escape " -"riconosciuta." - -#, python-format -msgid "Pointer `%s` contains adjacent \"/\"." -msgstr "Il puntatore `%s` contiene l'adiacente \"/\"." - -#, python-format -msgid "Pointer `%s` does not contains valid token." -msgstr "Il puntatore `%s` non contiene token valido." - -#, python-format -msgid "Pointer `%s` does not start with \"/\"." -msgstr "Il puntatore `%s` non inizia con \"/\"." - -#, python-format -msgid "Pointer `%s` end with \"/\"." -msgstr "Il puntatore `%s` finisce con \"/\"." - -#, python-format -msgid "Port \"%s\" is not valid." -msgstr "La porta \"%s\" non è valida." - -#, python-format -msgid "Process %d not running" -msgstr "Il processo %d non è in esecuzione" - -#, python-format -msgid "Properties %s must be set prior to saving data." -msgstr "Le proprietà %s devono essere impostate prima di salvare i dati." - -#, python-format -msgid "" -"Property %(property_name)s does not start with the expected resource type " -"association prefix of '%(prefix)s'." -msgstr "" -"La proprietà %(property_name)s non inizia con il prefisso di associazione " -"del tipo di risorsa previsto '%(prefix)s'." - -#, python-format -msgid "Property %s already present." -msgstr "La proprietà %s è già presente." - -#, python-format -msgid "Property %s does not exist." -msgstr "La proprietà %s non esiste." - -#, python-format -msgid "Property %s may not be removed." -msgstr "La proprietà %s non può essere rimossa." - -#, python-format -msgid "Property %s must be set prior to saving data." -msgstr "La proprietà %s deve essere impostata prima di salvare i dati." - -#, python-format -msgid "Property '%s' is protected" -msgstr "La proprietà '%s' è protetta" - -msgid "Property names can't contain 4 byte unicode." -msgstr "I nomi delle proprietà non possono contenere 4 byte unicode." - -#, python-format -msgid "" -"Provided image size must match the stored image size. (provided size: " -"%(ps)d, stored size: %(ss)d)" -msgstr "" -"La dimensione dell'immagine fornita deve corrispondere alla dimensione " -"dell'immagine memorizzata. (dimensione fornita: %(ps)d, dimensione " -"memorizzata: %(ss)d)" - -#, python-format -msgid "Provided object does not match schema '%(schema)s': %(reason)s" -msgstr "L'oggetto fornito non corrisponde allo schema '%(schema)s': %(reason)s" - -#, python-format -msgid "Provided status of task is unsupported: %(status)s" -msgstr "Lo stato dell'attività fornito non è supportato: %(status)s" - -#, python-format -msgid "Provided type of task is unsupported: %(type)s" -msgstr "Il tipo dell'attività fornito non è supportato: %(type)s" - -msgid "Provides a user friendly description of the namespace." -msgstr "Fornisce una semplice descrizione utente dello spazio dei nomi." - -msgid "Received invalid HTTP redirect." -msgstr "Ricevuto un reindirizzamento HTTP non valido." - -#, python-format -msgid "Redirecting to %(uri)s for authorization." -msgstr "Reindirizzamento a %(uri)s per l'autorizzazione." - -#, python-format -msgid "Registry service can't use %s" -msgstr "Il servizio registro non può utilizzare %s" - -#, python-format -msgid "Registry was not configured correctly on API server. Reason: %(reason)s" -msgstr "" -"Il registro non è stato configurato correttamente sul server API. Motivo: " -"%(reason)s" - -#, python-format -msgid "Reload of %(serv)s not supported" -msgstr "Ricaricamento di %(serv)s non supportato" - -#, python-format -msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "Ricaricamento %(serv)s (pid %(pid)s) con segnale(%(sig)s)" - -#, python-format -msgid "Removing stale pid file %s" -msgstr "Rimozione del file pid %s obsoleto in corso" - -msgid "Request body must be a JSON array of operation objects." -msgstr "" -"Il corpo della richiesta deve essere un array JSON degli oggetti " -"dell'operazione." - -msgid "Request must be a list of commands" -msgstr "La richiesta deve essere un elenco di comandi" - -#, python-format -msgid "Required store %s is invalid" -msgstr "Archivio richiesto %s non valido" - -msgid "" -"Resource type names should be aligned with Heat resource types whenever " -"possible: http://docs.openstack.org/developer/heat/template_guide/openstack." -"html" -msgstr "" -"I nomi del tipo di risorsa devono essere allineati con i tipi di risorsa " -"Heat quando possibile: http://docs.openstack.org/developer/heat/" -"template_guide/openstack.html" - -msgid "Response from Keystone does not contain a Glance endpoint." -msgstr "La risposta dal Keystone non contiene un endpoint Glance." - -msgid "Scope of image accessibility" -msgstr "Ambito di accessibilità dell'immagine" - -msgid "Scope of namespace accessibility." -msgstr "Ambito di accessibilità dello spazio dei nomi." - -#, python-format -msgid "Server %(serv)s is stopped" -msgstr "Il server %(serv)s è stato arrestato" - -#, python-format -msgid "Server worker creation failed: %(reason)s." -msgstr "Creazione dell'operatore server non riuscita: %(reason)s." - -msgid "Signature verification failed" -msgstr "Verifica firma non riuscita" - -msgid "Size of image file in bytes" -msgstr "Dimensione del file di immagine in byte" - -msgid "" -"Some resource types allow more than one key / value pair per instance. For " -"example, Cinder allows user and image metadata on volumes. Only the image " -"properties metadata is evaluated by Nova (scheduling or drivers). This " -"property allows a namespace target to remove the ambiguity." -msgstr "" -"Alcuni tipi di risorsa consentono più di una coppia chiave / valore per " -"istanza. Ad esempio, Cinder consente metadati immagine ed utente sui " -"volumi. Solo i metadati delle proprietà dell'immagine vengono valutati da " -"Nova (pianificazione o driver). Questa proprietà consente una destinazione " -"dello spazio dei nomi per eliminare l'ambiguità." - -msgid "Sort direction supplied was not valid." -msgstr "La direzione di ordinamento fornita non è valida." - -msgid "Sort key supplied was not valid." -msgstr "La chiave di ordinamento fornita non è valida." - -msgid "" -"Specifies the prefix to use for the given resource type. Any properties in " -"the namespace should be prefixed with this prefix when being applied to the " -"specified resource type. Must include prefix separator (e.g. a colon :)." -msgstr "" -"Specifica il prefisso da utilizzare per il tipo di risorsa fornito. " -"Qualsiasi proprietà nello spazio dei nomi deve essere preceduta da un " -"prefisso quando viene applicata ad un tipo di risorsa specificato. Deve " -"includere un separatore di prefisso (ad esempio due punti :)." - -msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." -msgstr "Lo stato deve essere \"pending\", \"accepted\" o \"rejected\"." - -msgid "Status not specified" -msgstr "Stato non specificato" - -msgid "Status of the image" -msgstr "Stato dell'immagine" - -#, python-format -msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"Il passaggio di stato da %(cur_status)s a %(new_status)s non è consentito" - -#, python-format -msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "Arresto di %(serv)s in corso (pid %(pid)s) con segnale(%(sig)s)" - -#, python-format -msgid "Store for image_id not found: %s" -msgstr "Archivio per image_id non trovato: %s" - -#, python-format -msgid "Store for scheme %s not found" -msgstr "Archivio per lo schema %s non trovato" - -#, python-format -msgid "" -"Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " -"(%(actual)s) did not match. Setting image status to 'killed'." -msgstr "" -"%(attr)s (%(supplied)s) e %(attr)s fornito e generato dall'immagine caricata " -"(%(actual)s) non corrispondevano. Lo stato dell'immagine viene impostato su " -"'killed'." - -msgid "Supported values for the 'container_format' image attribute" -msgstr "Valori supportati per l'attributo di immagine 'container_format'" - -msgid "Supported values for the 'disk_format' image attribute" -msgstr "Valori supportati per l'attributo di immagine 'disk_format'" - -#, python-format -msgid "Suppressed respawn as %(serv)s was %(rsn)s." -msgstr "Respawn soppresso come %(serv)s era %(rsn)s." - -msgid "System SIGHUP signal received." -msgstr "Ricevuto segnale SIGHUP di sistema." - -#, python-format -msgid "Task '%s' is required" -msgstr "Attività '%s' obbligatoria" - -msgid "Task does not exist" -msgstr "L'attività non esiste" - -msgid "Task failed due to Internal Error" -msgstr "Attività non riuscita a causa di un errore interno" - -msgid "Task was not configured properly" -msgstr "L'attività non è stata configurata correttamente" - -#, python-format -msgid "Task with the given id %(task_id)s was not found" -msgstr "L'attività con l'id fornito %(task_id)s non è stata trovata" - -msgid "The \"changes-since\" filter is no longer available on v2." -msgstr "Il filtro \"changes-since\" non è più disponibile su v2." - -#, python-format -msgid "The CA file you specified %s does not exist" -msgstr "Il file CA specificato %s non esiste" - -#, python-format -msgid "" -"The Image %(image_id)s object being created by this task %(task_id)s, is no " -"longer in valid status for further processing." -msgstr "" -"L'oggetto immagine %(image_id)s, in fase di creazione da questa attività " -"%(task_id)s, non si trova più in uno stato che ne consenta ulteriori " -"elaborazioni." - -msgid "The Store URI was malformed." -msgstr "L'URI della memoria non era corretto." - -msgid "" -"The URL to the keystone service. If \"use_user_token\" is not in effect and " -"using keystone auth, then URL of keystone can be specified." -msgstr "" -"L'URL per il servizio keystone. Se \"use_user_token\" non è attiva e si " -"utilizza l'auth keystone, è possibile specificare l'URL di keystone." - -msgid "" -"The administrators password. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"La password degli amministratori. Se non è attiva \"use_user_token\", è " -"possibile specificare le credenziali admin." - -msgid "" -"The administrators user name. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"Il nome utente degli amministratori. Se non è attiva \"use_user_token\" , è " -"possibile specificare le credenziali admin." - -#, python-format -msgid "The cert file you specified %s does not exist" -msgstr "Il file certificato specificato %s non esiste" - -msgid "The current status of this task" -msgstr "Lo stato corrente di questa attività" - -#, python-format -msgid "" -"The device housing the image cache directory %(image_cache_dir)s does not " -"support xattr. It is likely you need to edit your fstab and add the " -"user_xattr option to the appropriate line for the device housing the cache " -"directory." -msgstr "" -"L'unità in cui si trova la directory cache dell'immagine %(image_cache_dir)s " -"non supporta xattr. Probabilmente è necessario modificare fstab e aggiungere " -"l'opzione user_xattr nella riga appropriata per l'unità che ospita la " -"directory cache." - -#, python-format -msgid "" -"The given uri is not valid. Please specify a valid uri from the following " -"list of supported uri %(supported)s" -msgstr "" -"L'URI fornito non è valido. Specificare un URI valido dal seguente elenco di " -"uri supportati %(supported)s" - -#, python-format -msgid "The incoming image is too large: %s" -msgstr "L'immagine in entrata è troppo grande: %s" - -#, python-format -msgid "The key file you specified %s does not exist" -msgstr "Il file chiave specificato %snon esiste" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image locations. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Il limite di ubicazioni immagine consentito è stato superato. Tentato: " -"%(attempted)s, Massimo: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image members for this " -"image. Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Il limite di membri dell'immagine consentito è stato superato in questa " -"immagine. Tentato: %(attempted)s, Massimo: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Il limite di proprietà immagine consentito è stato superato. Tentato: " -"%(attempted)s, Massimo: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(num)s, Maximum: %(quota)s" -msgstr "" -"Il limite di proprietà immagine consentito è stato superato. Tentato: " -"%(num)s, Massimo: %(quota)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image tags. Attempted: " -"%(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Il limite di tag immagine consentito è stato superato. Tentato: " -"%(attempted)s, Massimo: %(maximum)s" - -#, python-format -msgid "The location %(location)s already exists" -msgstr "L'ubicazione %(location)s esiste già" - -#, python-format -msgid "The location data has an invalid ID: %d" -msgstr "I dati dell'ubicazione hanno un ID non valido: %d" - -#, python-format -msgid "" -"The metadata definition %(record_type)s with name=%(record_name)s not " -"deleted. Other records still refer to it." -msgstr "" -"La definizione di metadati %(record_type)s con nome=%(record_name)s non è " -"eliminata. Altri record ancora fanno riferimento a tale definizione." - -#, python-format -msgid "The metadata definition namespace=%(namespace_name)s already exists." -msgstr "" -"Lo spazio dei nomi della definizione di metadati =%(namespace_name)s già " -"esiste." - -#, python-format -msgid "" -"The metadata definition object with name=%(object_name)s was not found in " -"namespace=%(namespace_name)s." -msgstr "" -"L'oggetto della definizione di metadati con nome=%(object_name)s non è stato " -"trovato nello spazio dei nomi=%(namespace_name)s." - -#, python-format -msgid "" -"The metadata definition property with name=%(property_name)s was not found " -"in namespace=%(namespace_name)s." -msgstr "" -"La proprietà della definizione di metadati con nome=%(property_name)s non è " -"stata trovata nello spazio dei nomi=%(namespace_name)s." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s already exists." -msgstr "" -"L'associazione tipo-risorsa della definizione di metadati del tipo-risorsa=" -"%(resource_type_name)s per lo spazio dei nomi=%(namespace_name)s già esiste." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s, was not found." -msgstr "" -"L'associazione tipo-risorsa della definizione di metadati del tipo-risorsa=" -"%(resource_type_name)s per lo spazio dei nomi=%(namespace_name)s, non è " -"stata trovata." - -#, python-format -msgid "" -"The metadata definition resource-type with name=%(resource_type_name)s, was " -"not found." -msgstr "" -"Il tipo-risorsa della definizione di metadati con nome=" -"%(resource_type_name)s, non è stato trovato." - -#, python-format -msgid "" -"The metadata definition tag with name=%(name)s was not found in namespace=" -"%(namespace_name)s." -msgstr "" -"Il tag di definizione dei metadati con nome=%(name)s non è stato trovato " -"nello spazio dei nomi=%(namespace_name)s." - -msgid "The parameters required by task, JSON blob" -msgstr "I parametri richiesti dall'attività, blob JSON" - -msgid "The provided image is too large." -msgstr "L'immagine fornita è troppo grande." - -msgid "" -"The region for the authentication service. If \"use_user_token\" is not in " -"effect and using keystone auth, then region name can be specified." -msgstr "" -"la regione per il servizio di autenticazione. Se \"use_user_token\" non è " -"attiva e si utilizza l'autenticazione keystone, è possibile specificare il " -"nome regione." - -msgid "The request returned 500 Internal Server Error." -msgstr "La richiesta ha restituito 500 Errore interno del server." - -msgid "" -"The request returned 503 Service Unavailable. This generally occurs on " -"service overload or other transient outage." -msgstr "" -"La richiesta ha restituito 503 Servizio non disponibile 503. Ciò " -"generalmente si verifica nel sovraccarico del servizio o altro tipo di " -"interruzione temporanea." - -#, python-format -msgid "" -"The request returned a 302 Multiple Choices. This generally means that you " -"have not included a version indicator in a request URI.\n" -"\n" -"The body of response returned:\n" -"%(body)s" -msgstr "" -"La richiesta ha restituito 302 scelte multiple. Questo generalmente indica " -"che non è stato incluso un indicatore di versione in un URI della " -"richiesta.\n" -"\n" -"Restituito il corpo della risposta:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned a 413 Request Entity Too Large. This generally means " -"that rate limiting or a quota threshold was breached.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"La richiesta ha restituito 413 Entità della richiesta troppo grande. Questo " -"generalmente significa che il limite della velocità o la soglia della quota " -"sono stati violati.\n" -"\n" -"Il corpo della risposta \n" -"%(body)s" - -#, python-format -msgid "" -"The request returned an unexpected status: %(status)s.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"La richiesta ha restituito uno stato imprevisto: %(status)s.\n" -"\n" -"Il corpo della risposta \n" -"%(body)s" - -msgid "" -"The requested image has been deactivated. Image data download is forbidden." -msgstr "" -"L'immagine richiesta è stata disattivata. Il download dei dati immagine non " -"è consentito." - -msgid "The result of current task, JSON blob" -msgstr "Il risultato dell'attività corrente, blob JSON" - -#, python-format -msgid "" -"The size of the data %(image_size)s will exceed the limit. %(remaining)s " -"bytes remaining." -msgstr "" -"La dimensione dei dati %(image_size)s supererà il limite. %(remaining)s byte " -"rimanenti." - -#, python-format -msgid "The specified member %s could not be found" -msgstr "Impossibile trovare il membro specificato %s" - -#, python-format -msgid "The specified metadata object %s could not be found" -msgstr "Impossibile trovare l'oggetto di metadati %s specificato" - -#, python-format -msgid "The specified metadata tag %s could not be found" -msgstr "Impossibile trovare il tag di metadati %s specificato" - -#, python-format -msgid "The specified namespace %s could not be found" -msgstr "Impossibile trovare lo spazio dei nomi %s specificato" - -#, python-format -msgid "The specified property %s could not be found" -msgstr "Impossibile trovare la proprietà %s specificata" - -#, python-format -msgid "The specified resource type %s could not be found " -msgstr "Impossibile trovare il tipo di risorsa %s specificato " - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'" -msgstr "" -"Lo stato dell'ubicazione immagine eliminata può essere impostata solo su " -"'pending_delete' o 'deleted'" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'." -msgstr "" -"Lo stato dell'ubicazione immagine eliminata può essere impostata solo su " -"'pending_delete' o 'deleted'." - -msgid "The status of this image member" -msgstr "Lo stato di questo membro dell'immagine" - -msgid "" -"The strategy to use for authentication. If \"use_user_token\" is not in " -"effect, then auth strategy can be specified." -msgstr "" -"La strategia da utilizzare per l'autenticazione. Se \"use_user_token\" non è " -"attiva è possibile specificare la strategia di autenticazione." - -#, python-format -msgid "" -"The target member %(member_id)s is already associated with image " -"%(image_id)s." -msgstr "" -"Il membro di destinazione %(member_id)s è già associato all'immagine " -"%(image_id)s." - -msgid "" -"The tenant name of the administrative user. If \"use_user_token\" is not in " -"effect, then admin tenant name can be specified." -msgstr "" -"Il nome tenant dell'utente amministrativo. Se \"use_user_token\" non è " -"attiva è possibile specificare il nome tenant admin." - -msgid "The type of task represented by this content" -msgstr "Il tipo di attività rappresentata da questo contenuto" - -msgid "The unique namespace text." -msgstr "Il testo dello spazio dei nomi univoco." - -msgid "The user friendly name for the namespace. Used by UI if available." -msgstr "" -"Il nome utente semplice per lo spazio dei nomi. Utilizzato dalla UI se " -"disponibile." - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. Error: %(ioe)s" -msgstr "" -"Si è verificato un problema in %(error_key_name)s %(error_filename)s. " -"Verificare. Errore: %(ioe)s" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. OpenSSL error: %(ce)s" -msgstr "" -"Si è verificato un problema in %(error_key_name)s %(error_filename)s. " -"Verificare. Errore OpenSSL: %(ce)s" - -#, python-format -msgid "" -"There is a problem with your key pair. Please verify that cert " -"%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" -msgstr "" -"Si è verificato un problema con la coppia di chiavi. Verificare che il cert " -"%(cert_file)s e la chiave %(key_file)s siano collegati. Errore OpenSSL " -"%(ce)s" - -msgid "There was an error configuring the client." -msgstr "Si è verificato un errore durante la configurazione del client." - -msgid "There was an error connecting to a server" -msgstr "Si è verificato un errore durante la connessione al server" - -msgid "" -"This operation is currently not permitted on Glance Tasks. They are auto " -"deleted after reaching the time based on their expires_at property." -msgstr "" -"Questa operazione non è attualmente consentita nelle attività Glance. " -"Vengono automaticamente eliminate al raggiungimento dell'ora in base alla " -"proprietà expires_at." - -msgid "This operation is currently not permitted on Glance images details." -msgstr "" -"Questa operazione non è attualmente consentita nei dettagli delle immagini " -"Glance." - -msgid "" -"Time in hours for which a task lives after, either succeeding or failing" -msgstr "" -"Periodo di tempo, in ore, per cui l'attività prosegue dopo l'esito positivo " -"o meno" - -msgid "Too few arguments." -msgstr "Troppo pochi argomenti." - -msgid "" -"URI cannot contain more than one occurrence of a scheme.If you have " -"specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " -"you need to change it to use the swift+http:// scheme, like so: swift+http://" -"user:pass@authurl.com/v1/container/obj" -msgstr "" -"L'URI non può contenere più di una ricorrenza di uno schema. Se è stato " -"specificato un URI come swift://user:pass@http://authurl.com/v1/container/" -"obj, è necessario modificarlo per utilizzare lo schema swift+http://, come: " -"swift+http://user:pass@authurl.com/v1/container/obj" - -msgid "URL to access the image file kept in external store" -msgstr "URL per accedere al file di immagini tenuto nell'archivio esterno" - -#, python-format -msgid "" -"Unable to create pid file %(pid)s. Running as non-root?\n" -"Falling back to a temp file, you can stop %(service)s service using:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" -msgstr "" -"Impossibile creare il file pid %(pid)s. Eseguire come non-root?\n" -"Ritorno a un file temporaneo; è possibile arrestare il servizio %(service)s " -"utilizzando:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" - -#, python-format -msgid "Unable to filter by unknown operator '%s'." -msgstr "Impossibile filtrare mediante un operatore sconosciuto '%s'." - -msgid "Unable to filter on a range with a non-numeric value." -msgstr "" -"Impossibile filtrare in base a un intervallo con un valore non numerico." - -msgid "Unable to filter on a unknown operator." -msgstr "Impossibile filtrare su un operatore sconosciuto." - -msgid "Unable to filter using the specified operator." -msgstr "Impossibile filtrare utilizzando l'operatore specificato." - -msgid "Unable to filter using the specified range." -msgstr "Impossibile filtrare utilizzando l'intervallo specificato." - -#, python-format -msgid "Unable to find '%s' in JSON Schema change" -msgstr "Impossibile trovare '%s' nella modifica dello schema JSON" - -#, python-format -msgid "" -"Unable to find `op` in JSON Schema change. It must be one of the following: " -"%(available)s." -msgstr "" -"Impossibile trovare `op` in modifica schema JSON. Deve essere uno dei " -"seguenti: %(available)s." - -msgid "Unable to increase file descriptor limit. Running as non-root?" -msgstr "" -"Impossibile aumentare il limite del descrittore di file. Eseguire come non-" -"root?" - -#, python-format -msgid "" -"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" -"Got: %(e)r" -msgstr "" -"Impossibile caricare %(app_name)s dal file di configurazione %(conf_file)s.\n" -"Ricevuto: %(e)r" - -#, python-format -msgid "Unable to load schema: %(reason)s" -msgstr "Impossibile caricare lo schema: %(reason)s" - -#, python-format -msgid "Unable to locate paste config file for %s." -msgstr "Impossibile individuare il file di configurazione paste per %s." - -#, python-format -msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" -msgstr "" -"Impossibile caricare i dati dell'immagine duplicata per l'immagine " -"%(image_id)s: %(error)s" - -msgid "Unauthorized image access" -msgstr "Accesso all'immagine non autorizzato" - -msgid "Unexpected body type. Expected list/dict." -msgstr "Tipo di corpo imprevisto. Elenco/dizionario previsto." - -#, python-format -msgid "Unexpected response: %s" -msgstr "Risposta imprevista: %s" - -#, python-format -msgid "Unknown auth strategy '%s'" -msgstr "Strategia di autenticazione sconosciuta '%s'" - -#, python-format -msgid "Unknown command: %s" -msgstr "Comando sconosciuto: %s" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Direzione ordinamento sconosciuta, deve essere 'desc' o 'asc'" - -msgid "Unrecognized JSON Schema draft version" -msgstr "Versione della bozza dello schema JSON non riconosciuta" - -msgid "Unrecognized changes-since value" -msgstr "Valore changes-since non riconosciuto" - -#, python-format -msgid "Unsupported sort_dir. Acceptable values: %s" -msgstr "sort_dir non supportato. Valori consentiti: %s" - -#, python-format -msgid "Unsupported sort_key. Acceptable values: %s" -msgstr "sort_key non supportato. Valori consentiti: %s" - -msgid "Virtual size of image in bytes" -msgstr "Dimensione virtuale dell'immagine in byte" - -#, python-format -msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" -msgstr "Entro 15 secondi il pid %(pid)s (%(file)s) verrà interrotto; terminato" - -msgid "" -"When running server in SSL mode, you must specify both a cert_file and " -"key_file option value in your configuration file" -msgstr "" -"Quando si esegue il server in modalità SSL, è necessario specificare sia un " -"valore dell'opzione cert_file che key_file nel file di configurazione" - -msgid "" -"Whether to pass through the user token when making requests to the registry. " -"To prevent failures with token expiration during big files upload, it is " -"recommended to set this parameter to False.If \"use_user_token\" is not in " -"effect, then admin credentials can be specified." -msgstr "" -"Se passare o meno attraverso il token utente quando si effettuano richieste " -"al registro. Per impedire problemi con la scadenza del token durante il " -"caricamento di file grandi, si consiglia di impostare questo parametro su " -"False. Se \"use_user_token\" non è in vigore, è possibile specificare le " -"credenziali admin." - -#, python-format -msgid "Wrong command structure: %s" -msgstr "Struttura del comando errata: %s" - -msgid "You are not authenticated." -msgstr "L'utente non è autenticato." - -msgid "You are not authorized to complete this action." -msgstr "Non si è autorizzati a completare questa azione." - -#, python-format -msgid "You are not authorized to lookup image %s." -msgstr "Non si è autorizzati a ricercare l'immagine %s." - -#, python-format -msgid "You are not authorized to lookup the members of the image %s." -msgstr "Non si è autorizzati a ricercare i membri dell'immagine %s." - -#, python-format -msgid "You are not permitted to create a tag in the namespace owned by '%s'" -msgstr "" -"L'utente non dispone dell'autorizzazione per creare un tag lo spazio dei " -"nomi posseduto da '%s'" - -msgid "You are not permitted to create image members for the image." -msgstr "Non si è autorizzati a creare membri dell'immagine per l'immagine." - -#, python-format -msgid "You are not permitted to create images owned by '%s'." -msgstr "Non si è autorizzati a creare immagini di proprietà di '%s'." - -#, python-format -msgid "You are not permitted to create namespace owned by '%s'" -msgstr "" -"L'utente non dispone dell'autorizzazione per creare lo spazio dei nomi " -"posseduto da '%s'" - -#, python-format -msgid "You are not permitted to create object owned by '%s'" -msgstr "" -"L'utente non dispone dell'autorizzazione per creare l'oggetto posseduto da " -"'%s'" - -#, python-format -msgid "You are not permitted to create property owned by '%s'" -msgstr "" -"L'utente non dispone dell'autorizzazione per creare la proprietà posseduta " -"da '%s'" - -#, python-format -msgid "You are not permitted to create resource_type owned by '%s'" -msgstr "" -"L'utente non dispone dell'autorizzazione per creare il tipo_risorsa " -"posseduto da '%s'" - -#, python-format -msgid "You are not permitted to create this task with owner as: %s" -msgstr "Non si è autorizzati a creare questa attività con proprietario: %s" - -msgid "You are not permitted to deactivate this image." -msgstr "Non si è autorizzati a disattivare questa immagine." - -msgid "You are not permitted to delete this image." -msgstr "Non si è autorizzati a eliminare questa immagine." - -msgid "You are not permitted to delete this meta_resource_type." -msgstr "" -"L'utente non dispone dell'autorizzazione per eliminare questo " -"tipo_risorsa_metadati." - -msgid "You are not permitted to delete this namespace." -msgstr "" -"L'utente non dispone dell'autorizzazione per eliminare questo spazio dei " -"nomi." - -msgid "You are not permitted to delete this object." -msgstr "L'utente non dispone dell'autorizzazione per eliminare questo oggetto." - -msgid "You are not permitted to delete this property." -msgstr "" -"L'utente non dispone dell'autorizzazione per eliminare questa proprietà." - -msgid "You are not permitted to delete this tag." -msgstr "L'utente non dispone dell'autorizzazione per eliminare questo tag." - -#, python-format -msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." -msgstr "Non si è autorizzati a modificare '%(attr)s' in questa %(resource)s." - -#, python-format -msgid "You are not permitted to modify '%s' on this image." -msgstr "Non si è autorizzati a modificare '%s' in questa immagine." - -msgid "You are not permitted to modify locations for this image." -msgstr "Non si è autorizzati a modificare le ubicazioni per questa immagine." - -msgid "You are not permitted to modify tags on this image." -msgstr "Non si è autorizzati a modificare i tag in questa immagine." - -msgid "You are not permitted to modify this image." -msgstr "Non si è autorizzati a modificare questa immagine." - -msgid "You are not permitted to reactivate this image." -msgstr "Non si è autorizzati a riattivare questa immagine." - -msgid "You are not permitted to set status on this task." -msgstr "Non si è autorizzati ad impostare lo stato in questa attività." - -msgid "You are not permitted to update this namespace." -msgstr "" -"L'utente non dispone dell'autorizzazione per aggiornare questo spazio dei " -"nomi." - -msgid "You are not permitted to update this object." -msgstr "" -"L'utente non dispone dell'autorizzazione per aggiornare questo oggetto." - -msgid "You are not permitted to update this property." -msgstr "" -"L'utente non dispone dell'autorizzazione per aggiornare questa proprietà." - -msgid "You are not permitted to update this tag." -msgstr "L'utente non dispone dell'autorizzazione per aggiornare questo tag." - -msgid "You are not permitted to upload data for this image." -msgstr "Non si è autorizzati a caricare i dati per questa immagine." - -#, python-format -msgid "You cannot add image member for %s" -msgstr "Non è possibile aggiungere il membro dell'immagine per %s" - -#, python-format -msgid "You cannot delete image member for %s" -msgstr "Non è possibile eliminare il membro dell'immagine per %s" - -#, python-format -msgid "You cannot get image member for %s" -msgstr "Non è possibile ottenere il membro dell'immagine per %s" - -#, python-format -msgid "You cannot update image member %s" -msgstr "Non è possibile aggiornare il membro dell'immagine %s" - -msgid "You do not own this image" -msgstr "Non si possiede tale immagine" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a cert, " -"however you have failed to supply either a key_file parameter or set the " -"GLANCE_CLIENT_KEY_FILE environ variable" -msgstr "" -"Si è scelto di utilizzare nella connessione SSL ed è stato fornito un " -"certificato, tuttavia non è stato fornito un parametro key_file o la " -"variabile di ambiente GLANCE_CLIENT_KEY_FILE non è stata impostata" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a key, " -"however you have failed to supply either a cert_file parameter or set the " -"GLANCE_CLIENT_CERT_FILE environ variable" -msgstr "" -"Si è scelto di utilizzare SSL nella connessione e si è fornita una chiave, " -"tuttavia non è stato fornito un parametro cert_file parameter o la variabile " -"di ambiente GLANCE_CLIENT_CERT_FILE non è stata impostata" - -msgid "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" -msgstr "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" - -#, python-format -msgid "__init__() got unexpected keyword argument '%s'" -msgstr "__init__() ha ricevuto l'argomento di parole chiave '%s' non previsto" - -#, python-format -msgid "" -"cannot transition from %(current)s to %(next)s in update (wanted from_state=" -"%(from)s)" -msgstr "" -"Impossibile passare da %(current)s a %(next)s in fase di aggiornamento " -"(richiesto from_state=%(from)s)" - -#, python-format -msgid "custom properties (%(props)s) conflict with base properties" -msgstr "" -"le proprietà personalizzate (%(props)s) sono in conflitto con le proprietà " -"di base" - -msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" -msgstr "" -"Su questa piattaforma non sono disponibili hub 'poll' e 'selects' eventlog" - -msgid "is_public must be None, True, or False" -msgstr "is_public deve essere None, True, o False" - -msgid "limit param must be an integer" -msgstr "parametro limite deve essere un numero intero" - -msgid "limit param must be positive" -msgstr "parametro limite deve essere positivo" - -msgid "md5 hash of image contents." -msgstr "hash md5 del contenuto dell'immagine. " - -#, python-format -msgid "new_image() got unexpected keywords %s" -msgstr "new_image() ha ricevuto parole chiave %s non previste" - -msgid "protected must be True, or False" -msgstr "protetto deve essere True o False" - -#, python-format -msgid "unable to launch %(serv)s. Got error: %(e)s" -msgstr "impossibile avviare %(serv)s. Si è verificato l'errore: %(e)s" - -#, python-format -msgid "x-openstack-request-id is too long, max size %s" -msgstr "x-openstack-request-id è troppo lungo, dimensione max %s" diff --git a/glance/locale/ja/LC_MESSAGES/glance.po b/glance/locale/ja/LC_MESSAGES/glance.po deleted file mode 100644 index 9e28868f..00000000 --- a/glance/locale/ja/LC_MESSAGES/glance.po +++ /dev/null @@ -1,2083 +0,0 @@ -# Translations template for glance. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the glance project. -# -# Translators: -# Tomoyuki KATO , 2013 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: glance 15.0.0.0b3.dev29\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-06-23 20:54+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 05:21+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language: ja\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: Japanese\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "%(cls)s exception was raised in the last rpc call: %(val)s" -msgstr "最後の RPC 呼び出しで %(cls)s 例外が発生しました: %(val)s" - -#, python-format -msgid "%(m_id)s not found in the member list of the image %(i_id)s." -msgstr "イメージ %(i_id)s のメンバーリストで %(m_id)s が見つかりません。" - -#, python-format -msgid "%(serv)s (pid %(pid)s) is running..." -msgstr "%(serv)s (pid %(pid)s) が実行中..." - -#, python-format -msgid "%(serv)s appears to already be running: %(pid)s" -msgstr "%(serv)s は既に実行されている可能性があります: %(pid)s" - -#, python-format -msgid "" -"%(strategy)s is registered as a module twice. %(module)s is not being used." -msgstr "" -"%(strategy)s はモジュールとして 2 回登録されています。%(module)s は使用されて" -"いません。" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Could not load the " -"filesystem store" -msgstr "" -"%(task_type)s の %(task_id)s が正しく設定されていません。ファイルシステムスト" -"アをロードできませんでした。" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Missing work dir: " -"%(work_dir)s" -msgstr "" -"%(task_type)s の %(task_id)s が適切に設定されていません。作業ディレクトリー " -"%(work_dir)s がありません" - -#, python-format -msgid "%(verb)sing %(serv)s" -msgstr "%(serv)s の %(verb)s 中" - -#, python-format -msgid "%(verb)sing %(serv)s with %(conf)s" -msgstr "%(conf)s を使用して %(serv)s を %(verb)s 中" - -#, python-format -msgid "" -"%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " -"address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " -"separately from the port (i.e., \"[fe80::a:b:c]:9876\")." -msgstr "" -"%s host:port のペアを指定してください。host は IPv4 アドレス、IPv6 アドレス、" -"ホスト名、または FQDN です。IPv6 アドレスを使用する場合は、アドレスを大括弧で" -"囲んでポートと区別してください (例えば、\"[fe80::a:b:c]:9876\")。" - -#, python-format -msgid "%s can't contain 4 byte unicode characters." -msgstr "%s に 4 バイトの Unicode 文字が含まれていてはなりません。" - -#, python-format -msgid "%s is already stopped" -msgstr "%s は既に停止しています" - -#, python-format -msgid "%s is stopped" -msgstr "%s は停止しています" - -msgid "" -"--os_auth_url option or OS_AUTH_URL environment variable required when " -"keystone authentication strategy is enabled\n" -msgstr "" -"keystone 認証戦略が有効な場合は、--os_auth_url オプションまたはOS_AUTH_URL 環" -"境変数が必要です\n" - -msgid "A body is not expected with this request." -msgstr "この要求では本文は予期されません。" - -#, python-format -msgid "" -"A metadata definition object with name=%(object_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"name=%(object_name)s のメタデータ定義オブジェクトは、namespace=" -"%(namespace_name)s に既に存在します。" - -#, python-format -msgid "" -"A metadata definition property with name=%(property_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"name=%(property_name)s のメタデータ定義プロパティーは、namespace=" -"%(namespace_name)s に既に存在します。" - -#, python-format -msgid "" -"A metadata definition resource-type with name=%(resource_type_name)s already " -"exists." -msgstr "" -"name=%(resource_type_name)s のメタデータ定義リソースタイプは、既に存在しま" -"す。" - -msgid "A set of URLs to access the image file kept in external store" -msgstr "" -"外部ストアに保持されているイメージファイルにアクセスするための一連の URL" - -msgid "Amount of disk space (in GB) required to boot image." -msgstr "イメージのブートに必要なディスクスペースの量 (GB)" - -msgid "Amount of ram (in MB) required to boot image." -msgstr "イメージのブートに必要な RAM の量 (MB)" - -msgid "An identifier for the image" -msgstr "イメージの ID" - -msgid "An identifier for the image member (tenantId)" -msgstr "イメージメンバーの ID (テナント ID)" - -msgid "An identifier for the owner of this task" -msgstr "このタスクの所有者 ID" - -msgid "An identifier for the task" -msgstr "タスクの ID" - -msgid "An image file url" -msgstr "イメージファイルの URL" - -msgid "An image schema url" -msgstr "イメージスキーマの URL" - -msgid "An image self url" -msgstr "イメージ自体の URL" - -#, python-format -msgid "An image with identifier %s already exists" -msgstr "ID %s のイメージは既に存在します" - -msgid "An import task exception occurred" -msgstr "インポートタスクの例外が発生しました" - -msgid "An object with the same identifier already exists." -msgstr "同じ ID のオブジェクトが既に存在します。" - -msgid "An object with the same identifier is currently being operated on." -msgstr "現在、同じ ID を持つオブジェクトが操作されています。" - -msgid "An object with the specified identifier was not found." -msgstr "指定された ID を持つオブジェクトが見つかりませんでした。" - -msgid "An unknown exception occurred" -msgstr "不明な例外が発生しました" - -msgid "An unknown task exception occurred" -msgstr "不明なタスク例外が発生しました" - -#, python-format -msgid "Attempt to upload duplicate image: %s" -msgstr "重複したイメージのアップロードを試行します: %s" - -msgid "Attempted to update Location field for an image not in queued status." -msgstr "" -"待機状況になっていないイメージの「ロケーション」フィールドを更新しようとしま" -"した。" - -#, python-format -msgid "Attribute '%(property)s' is read-only." -msgstr "属性 '%(property)s' は読み取り専用です。" - -#, python-format -msgid "Attribute '%(property)s' is reserved." -msgstr "属性 '%(property)s' は予約されています。" - -#, python-format -msgid "Attribute '%s' is read-only." -msgstr "属性 '%s' は読み取り専用です。" - -#, python-format -msgid "Attribute '%s' is reserved." -msgstr "属性 '%s' は予約されています。" - -msgid "Attribute container_format can be only replaced for a queued image." -msgstr "" -"キューに入れられたイメージについてのみ属性 container_format を置換できます。" - -msgid "Attribute disk_format can be only replaced for a queued image." -msgstr "" -"キューに入れられたイメージについてのみ属性 disk_format を置換できます。" - -#, python-format -msgid "Auth service at URL %(url)s not found." -msgstr "URL %(url)s の認証サービスが見つかりません。" - -#, python-format -msgid "" -"Authentication error - the token may have expired during file upload. " -"Deleting image data for %s." -msgstr "" -"認証エラー - トークンがファイルアップロード中に失効した可能性があります。 %s " -"へのイメージデータを削除します。" - -msgid "Authorization failed." -msgstr "許可が失敗しました。" - -msgid "Available categories:" -msgstr "使用可能カテゴリー:" - -#, python-format -msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." -msgstr "" -"正しくない \"%s\" 照会フィルター形式。ISO 8601 DateTime 表記を使用してくださ" -"い。" - -#, python-format -msgid "Bad Command: %s" -msgstr "正しくないコマンド: %s" - -#, python-format -msgid "Bad header: %(header_name)s" -msgstr "ヘッダーが正しくありません: %(header_name)s" - -#, python-format -msgid "Bad value passed to filter %(filter)s got %(val)s" -msgstr "正しくない値がフィルター %(filter)s に渡され、%(val)s が取得されました" - -#, python-format -msgid "Badly formed S3 URI: %(uri)s" -msgstr "S3 URI の形式が正しくありません: %(uri)s" - -#, python-format -msgid "Badly formed credentials '%(creds)s' in Swift URI" -msgstr "Swift URI 内の資格情報 '%(creds)s' の形式が正しくありません" - -msgid "Badly formed credentials in Swift URI." -msgstr "Swift URI 内の資格情報の形式が正しくありません。" - -msgid "Body expected in request." -msgstr "要求の本体が必要です。" - -msgid "Cannot be a negative value" -msgstr "負の値にすることはできません" - -msgid "Cannot be a negative value." -msgstr "負の値にすることはできません。" - -#, python-format -msgid "Cannot convert image %(key)s '%(value)s' to an integer." -msgstr "イメージ %(key)s '%(value)s' を整数に変換できません。" - -msgid "Cannot remove last location in the image." -msgstr "イメージ内の最後のロケーションは削除できません。" - -#, python-format -msgid "Cannot save data for image %(image_id)s: %(error)s" -msgstr "イメージ %(image_id)s のデータを保存できません: %(error)s" - -msgid "Cannot set locations to empty list." -msgstr "空のリストにロケーションを設定することはできません。" - -msgid "Cannot upload to an unqueued image" -msgstr "キューに入れられていないイメージに対してアップロードできません" - -#, python-format -msgid "Checksum verification failed. Aborted caching of image '%s'." -msgstr "" -"チェックサムの検証に失敗しました。イメージ '%s' のキャッシュを打ち切りまし" -"た。" - -msgid "Client disconnected before sending all data to backend" -msgstr "すべてのデータをバックエンドへ送信する前にクライアントが切断されました" - -msgid "Command not found" -msgstr "コマンドが見つかりません" - -msgid "Configuration option was not valid" -msgstr "構成オプションが無効でした" - -#, python-format -msgid "Connect error/bad request to Auth service at URL %(url)s." -msgstr "接続エラー/URL %(url)s の認証サービスに対する正しくない要求。" - -#, python-format -msgid "Constructed URL: %s" -msgstr "URL を構成しました: %s" - -msgid "Container format is not specified." -msgstr "コンテナーフォーマットが指定されていません。" - -msgid "Content-Type must be application/octet-stream" -msgstr "Content-Type は application/octet-stream でなければなりません" - -#, python-format -msgid "Corrupt image download for image %(image_id)s" -msgstr "イメージ %(image_id)s のイメージダウンロードが壊れています" - -#, python-format -msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" -msgstr "30 秒間の試行後に %(host)s:%(port)s にバインドできませんでした" - -msgid "Could not find OVF file in OVA archive file." -msgstr "OVA アーカイブファイル内に OVF ファイルが見つかりませんでした。" - -#, python-format -msgid "Could not find metadata object %s" -msgstr "メタデータオブジェクト %s が見つかりませんでした" - -#, python-format -msgid "Could not find metadata tag %s" -msgstr "メタデータタグ %s が見つかりませんでした" - -#, python-format -msgid "Could not find namespace %s" -msgstr "名前空間 %s が見つかりませんでした" - -#, python-format -msgid "Could not find property %s" -msgstr "プロパティー %s が見つかりませんでした" - -msgid "Could not find required configuration option" -msgstr "必要な設定オプションが見つかりませんでした" - -#, python-format -msgid "Could not find task %s" -msgstr "タスク %s が見つかりませんでした" - -#, python-format -msgid "Could not update image: %s" -msgstr "イメージを更新できませんでした: %s" - -msgid "Currently, OVA packages containing multiple disk are not supported." -msgstr "現在、複数のディスクを含む OVA パッケージはサポートされません。" - -#, python-format -msgid "Data for image_id not found: %s" -msgstr "image_id のデータが見つかりません: %s" - -msgid "Data supplied was not valid." -msgstr "指定されたデータが無効でした。" - -msgid "Date and time of image member creation" -msgstr "イメージメンバーの作成日時" - -msgid "Date and time of image registration" -msgstr "イメージ登録日時" - -msgid "Date and time of last modification of image member" -msgstr "イメージメンバーの最終変更日時" - -msgid "Date and time of namespace creation" -msgstr "名前空間の作成日時" - -msgid "Date and time of object creation" -msgstr "オブジェクトの作成日時" - -msgid "Date and time of resource type association" -msgstr "リソースタイプ関連付けの日時" - -msgid "Date and time of tag creation" -msgstr "タグの作成日時" - -msgid "Date and time of the last image modification" -msgstr "イメージの最終変更日時" - -msgid "Date and time of the last namespace modification" -msgstr "名前空間の最終変更日時" - -msgid "Date and time of the last object modification" -msgstr "オブジェクトの最終変更日時" - -msgid "Date and time of the last resource type association modification" -msgstr "リソースタイプ関連付けの最終変更日時" - -msgid "Date and time of the last tag modification" -msgstr "タグの最終変更日時" - -msgid "Datetime when this resource was created" -msgstr "このリソースが作成された日時" - -msgid "Datetime when this resource was updated" -msgstr "このリソースが更新された日時" - -msgid "Datetime when this resource would be subject to removal" -msgstr "このリソースが削除される日時" - -#, python-format -msgid "Denying attempt to upload image because it exceeds the quota: %s" -msgstr "" -"イメージをアップロードしようとしましたが、割り当て量を超えてしまうため、拒否" -"されています: %s" - -#, python-format -msgid "Denying attempt to upload image larger than %d bytes." -msgstr "%d バイトより大きいイメージのアップロード試行を拒否しています。" - -msgid "Descriptive name for the image" -msgstr "イメージの記述名" - -msgid "Disk format is not specified." -msgstr "ディスクフォーマットが指定されていません。" - -#, python-format -msgid "" -"Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" -msgstr "" -"ドライバー %(driver_name)s を正しく設定できませんでした。理由: %(reason)s" - -msgid "" -"Error decoding your request. Either the URL or the request body contained " -"characters that could not be decoded by Glance" -msgstr "" -"要求のデコードのエラー。URL または要求本文に Glance でデコードできない文字が" -"含まれていました。" - -#, python-format -msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" -msgstr "イメージ %(image_id)s のメンバーの取得中のエラー: %(inner_msg)s" - -msgid "Error in store configuration. Adding images to store is disabled." -msgstr "" -"ストア設定にエラーがあります。ストアへのイメージの追加が無効になっています。" - -msgid "Expected a member in the form: {\"member\": \"image_id\"}" -msgstr "次の形式でメンバーを予期: {\"member\": \"image_id\"}" - -msgid "Expected a status in the form: {\"status\": \"status\"}" -msgstr "次の形式で状態を予期: {\"status\": \"status\"}" - -msgid "External source should not be empty" -msgstr "外部ソースは空であってはなりません" - -#, python-format -msgid "External sources are not supported: '%s'" -msgstr "外部ソースはサポートされていません: '%s'" - -#, python-format -msgid "Failed to activate image. Got error: %s" -msgstr "イメージのアクティブ化に失敗しました。受け取ったエラー: %s" - -#, python-format -msgid "Failed to add image metadata. Got error: %s" -msgstr "イメージメタデータを追加できませんでした。受け取ったエラー: %s" - -#, python-format -msgid "Failed to find image %(image_id)s to delete" -msgstr "削除するイメージ %(image_id)s が見つかりませんでした" - -#, python-format -msgid "Failed to find image to delete: %s" -msgstr "削除するイメージが見つかりませんでした: %s" - -#, python-format -msgid "Failed to find image to update: %s" -msgstr "更新するイメージが見つかりませんでした: %s" - -#, python-format -msgid "Failed to find resource type %(resourcetype)s to delete" -msgstr "削除するリソースタイプ %(resourcetype)s が見つかりませんでした" - -#, python-format -msgid "Failed to initialize the image cache database. Got error: %s" -msgstr "" -"イメージキャッシュデータベースを初期化できませんでした。受け取ったエラー: %s" - -#, python-format -msgid "Failed to read %s from config" -msgstr "設定から %s を読み取ることができませんでした" - -#, python-format -msgid "Failed to reserve image. Got error: %s" -msgstr "イメージを予約できませんでした。受け取ったエラー: %s" - -#, python-format -msgid "Failed to update image metadata. Got error: %s" -msgstr "イメージメタデータを更新できませんでした。エラー: %s" - -#, python-format -msgid "Failed to upload image %s" -msgstr "イメージ %s をアップロードできませんでした" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to HTTP error: " -"%(error)s" -msgstr "" -"HTTP エラーが発生したため、イメージ %(image_id)s のイメージデータのアップロー" -"ドに失敗しました: %(error)s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to internal error: " -"%(error)s" -msgstr "" -"内部エラーが発生したため、イメージ %(image_id)s のイメージデータをアップロー" -"ドできませんでした: %(error)s" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"ファイル %(path)s に無効なバッキングファイル %(bfile)s があります。打ち切りま" -"す。" - -msgid "" -"File based imports are not allowed. Please use a non-local source of image " -"data." -msgstr "" -"ファイルベースのインポートは許可されません。イメージデータの非ローカルソース" -"を使用してください。" - -msgid "Forbidden image access" -msgstr "イメージにアクセスする権限がありません" - -#, python-format -msgid "Forbidden to delete a %s image." -msgstr "%s イメージの削除は禁止されています。" - -#, python-format -msgid "Forbidden to delete image: %s" -msgstr "イメージの削除は禁止されています: %s" - -#, python-format -msgid "Forbidden to modify '%(key)s' of %(status)s image." -msgstr "%(status)s イメージの '%(key)s' を変更することは禁止されています。" - -#, python-format -msgid "Forbidden to modify '%s' of image." -msgstr "イメージの '%s' を変更することは禁止されています。" - -msgid "Forbidden to reserve image." -msgstr "イメージの予約は禁止されています。" - -msgid "Forbidden to update deleted image." -msgstr "削除されたイメージの更新は禁止されています。" - -#, python-format -msgid "Forbidden to update image: %s" -msgstr "イメージの更新は禁止されています: %s" - -#, python-format -msgid "Forbidden upload attempt: %s" -msgstr "禁止されているアップロードの試行: %s" - -#, python-format -msgid "Forbidding request, metadata definition namespace=%s is not visible." -msgstr "要求は禁止されています。メタデータ定義 namespace=%s を表示できません" - -#, python-format -msgid "Forbidding request, task %s is not visible" -msgstr "要求を禁止しています。タスク %s は表示されません" - -msgid "Format of the container" -msgstr "コンテナーの形式" - -msgid "Format of the disk" -msgstr "ディスクの形式" - -#, python-format -msgid "Host \"%s\" is not valid." -msgstr "ホスト \"%s\" が無効です。" - -#, python-format -msgid "Host and port \"%s\" is not valid." -msgstr "ホストおよびポート \"%s\" が無効です。" - -msgid "" -"Human-readable informative message only included when appropriate (usually " -"on failure)" -msgstr "" -"適切な場合 (通常は障害発生時) にのみ、人間が読み取れる情報メッセージが含まれ" -"ます" - -msgid "If true, image will not be deletable." -msgstr "true の場合、イメージは削除可能になりません。" - -msgid "If true, namespace will not be deletable." -msgstr "true の場合、名前空間は削除可能になりません。" - -#, python-format -msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" -msgstr "イメージ %(id)s は使用中のため削除できませんでした: %(exc)s" - -#, python-format -msgid "Image %(id)s not found" -msgstr "イメージ %(id)s が見つかりません" - -#, python-format -msgid "" -"Image %(image_id)s could not be found after upload. The image may have been " -"deleted during the upload: %(error)s" -msgstr "" -"アップロード後にイメージ %(image_id)s が見つかりませんでした。このイメージは" -"アップロード中に削除された可能性があります: %(error)s" - -#, python-format -msgid "Image %(image_id)s is protected and cannot be deleted." -msgstr "イメージ %(image_id)s は保護されているため、削除できません。" - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload, cleaning up the chunks uploaded." -msgstr "" -"アップロード後にイメージ %s が見つかりませんでした。イメージはアップロード中" -"に削除された可能性があります。アップロードされたチャンクをクリーンアップ中で" -"す。" - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload." -msgstr "" -"アップロード後にイメージ %s が見つかりませんでした。このイメージはアップロー" -"ド中に削除された可能性があります。" - -#, python-format -msgid "Image %s is deactivated" -msgstr "イメージ %s は非アクティブ化されています" - -#, python-format -msgid "Image %s is not active" -msgstr "イメージ %s はアクティブではありません" - -#, python-format -msgid "Image %s not found." -msgstr "イメージ %s が見つかりません。" - -#, python-format -msgid "Image exceeds the storage quota: %s" -msgstr "イメージがストレージクォータを超えています: %s" - -msgid "Image id is required." -msgstr "イメージ ID が必要です。" - -msgid "Image is protected" -msgstr "イメージは保護されています" - -#, python-format -msgid "Image member limit exceeded for image %(id)s: %(e)s:" -msgstr "イメージ %(id)s のメンバー数がイメージメンバー上限を超えました: %(e)s:" - -#, python-format -msgid "Image name too long: %d" -msgstr "イメージ名が長すぎます: %d" - -msgid "Image operation conflicts" -msgstr "イメージ操作が競合しています" - -#, python-format -msgid "" -"Image status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"%(cur_status)s から %(new_status)s へのイメージのステータス移行は許可されませ" -"ん" - -#, python-format -msgid "Image storage media is full: %s" -msgstr "イメージストレージのメディアがフルです: %s" - -#, python-format -msgid "Image tag limit exceeded for image %(id)s: %(e)s:" -msgstr "イメージ %(id)s のイメージタグ上限を超えました: %(e)s:" - -#, python-format -msgid "Image upload problem: %s" -msgstr "イメージのアップロード問題: %s" - -#, python-format -msgid "Image with identifier %s already exists!" -msgstr "ID %s のイメージは既に存在します。" - -#, python-format -msgid "Image with identifier %s has been deleted." -msgstr "ID %s のイメージが削除されました。" - -#, python-format -msgid "Image with identifier %s not found" -msgstr "ID %s のイメージが見つかりません" - -#, python-format -msgid "Image with the given id %(image_id)s was not found" -msgstr "指定された ID %(image_id)s を持つイメージが見つかりませんでした" - -#, python-format -msgid "" -"Incorrect auth strategy, expected \"%(expected)s\" but received " -"\"%(received)s\"" -msgstr "" -"認証ストラテジーが誤っています。\"%(expected)s\" が必要ですが、\"%(received)s" -"\" を受け取りました" - -#, python-format -msgid "Incorrect request: %s" -msgstr "正しくない要求: %s" - -#, python-format -msgid "Input does not contain '%(key)s' field" -msgstr "入力に '%(key)s' フィールドが含まれていません" - -#, python-format -msgid "Insufficient permissions on image storage media: %s" -msgstr "イメージストレージのメディアに対する許可が不十分です: %s" - -#, python-format -msgid "Invalid JSON pointer for this resource: '/%s'" -msgstr "このリソースの JSON ポインターは無効です: '/%s'" - -#, python-format -msgid "Invalid checksum '%s': can't exceed 32 characters" -msgstr "無効なチェックサム '%s': 32文字を超えることはできません" - -msgid "Invalid configuration in glance-swift conf file." -msgstr "glance-swift 設定ファイルの設定が無効です。" - -msgid "Invalid configuration in property protection file." -msgstr "プロパティー保護ファイルで設定が無効です。" - -#, python-format -msgid "Invalid container format '%s' for image." -msgstr "コンテナー形式 '%s' はイメージには無効です。" - -#, python-format -msgid "Invalid content type %(content_type)s" -msgstr "コンテンツタイプ %(content_type)s が無効です" - -#, python-format -msgid "Invalid disk format '%s' for image." -msgstr "ディスク形式 '%s' はイメージには無効です。" - -#, python-format -msgid "Invalid filter value %s. The quote is not closed." -msgstr "無効なフィルター値 %s。引用符が組みになっていません。" - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma after closing quotation mark." -msgstr "無効なフィルター値 %s。終了引用符の後にコンマがありません。" - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma before opening quotation mark." -msgstr "無効なフィルター値 %s。開始引用符の前にコンマがありません。" - -msgid "Invalid image id format" -msgstr "イメージ ID の形式が無効です" - -#, fuzzy -msgid "Invalid location" -msgstr "無効なロケーション" - -#, python-format -msgid "Invalid location %s" -msgstr "無効なロケーション %s" - -#, python-format -msgid "Invalid location: %s" -msgstr "無効なロケーション: %s" - -#, python-format -msgid "" -"Invalid location_strategy option: %(name)s. The valid strategy option(s) " -"is(are): %(strategies)s" -msgstr "" -"location_strategy オプションが無効です: %(name)s。有効なストラテジーオプショ" -"ン: %(strategies)s" - -#, fuzzy -msgid "Invalid locations" -msgstr "無効なロケーション" - -#, python-format -msgid "Invalid locations: %s" -msgstr "無効なロケーション: %s" - -msgid "Invalid marker format" -msgstr "マーカーフォーマットが無効です" - -msgid "Invalid marker. Image could not be found." -msgstr "無効なマーカーです。イメージが見つかりませんでした。" - -#, python-format -msgid "Invalid membership association: %s" -msgstr "無効なメンバーシップの関連付け: %s" - -msgid "" -"Invalid mix of disk and container formats. When setting a disk or container " -"format to one of 'aki', 'ari', or 'ami', the container and disk formats must " -"match." -msgstr "" -"ディスクとコンテナーの形式が無効な形で混在しています。ディスクまたはコンテ" -"ナーの形式を 'aki'、'ari'、または 'ami' のいずれかに設定するときは、コンテ" -"ナーとディスクの形式が一致していなければなりません。" - -#, python-format -msgid "" -"Invalid operation: `%(op)s`. It must be one of the following: %(available)s." -msgstr "" -"無効な操作: `%(op)s`。以下のいずれかでなければなりません: %(available)s。" - -msgid "Invalid position for adding a location." -msgstr "ロケーションの追加位置が無効です。" - -msgid "Invalid position for removing a location." -msgstr "ロケーション削除位置が無効です。" - -msgid "Invalid service catalog json." -msgstr "無効なサービスカタログ JSON ファイル。" - -#, python-format -msgid "Invalid sort direction: %s" -msgstr "無効なソート方向: %s" - -#, python-format -msgid "" -"Invalid sort key: %(sort_key)s. It must be one of the following: " -"%(available)s." -msgstr "" -"ソートキー %(sort_key)s は無効です。 %(available)s のいずれかでなければなりま" -"せん。" - -#, python-format -msgid "Invalid status value: %s" -msgstr "状態の値が無効です: %s" - -#, python-format -msgid "Invalid status: %s" -msgstr "無効な状況: %s" - -#, python-format -msgid "Invalid time format for %s." -msgstr "%s に対する無効な時刻フォーマット。" - -#, python-format -msgid "Invalid type value: %s" -msgstr "タイプ値が無効です: %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition namespace " -"with the same name of %s" -msgstr "" -"無効な更新です。結果として、同じ名前 %s でメタデータ定義名前空間が重複しま" -"す。" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"無効な更新です。結果として、同じ name=%(name)s で、namespace=" -"%(namespace_name)s でメタデータ定義オブジェクトが重複します。" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"無効な更新です。結果として、同じ name=%(name)s で、namespace=" -"%(namespace_name)s でメタデータ定義オブジェクトが重複します。" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition property " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"無効な更新です。結果として、同じ name=%(name)s で、namespace=" -"%(namespace_name)s でメタデータ定義プロパティーが重複します。" - -#, python-format -msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" -msgstr "パラメーター '%(param)s' の値 '%(value)s' が無効です: %(extra_msg)s" - -#, python-format -msgid "Invalid value for option %(option)s: %(value)s" -msgstr "オプション %(option)s の値が無効です: %(value)s" - -#, python-format -msgid "Invalid visibility value: %s" -msgstr "無効な可視性の値: %s" - -msgid "It's invalid to provide multiple image sources." -msgstr "イメージソースの複数指定は無効です。" - -msgid "It's not allowed to add locations if locations are invisible." -msgstr "ロケーションが表示されない場合、ロケーションを追加できません。" - -msgid "It's not allowed to remove locations if locations are invisible." -msgstr "ロケーションが表示されない場合、ロケーションを削除できません。" - -msgid "It's not allowed to update locations if locations are invisible." -msgstr "ロケーションが表示されない場合、ロケーションを更新できません。" - -msgid "List of strings related to the image" -msgstr "イメージに関連する文字列のリスト" - -msgid "Malformed JSON in request body." -msgstr "要求本体の JSON の形式が誤りです。" - -msgid "Maximal age is count of days since epoch." -msgstr "最長存続時間は、エポック以降の日数です。" - -#, python-format -msgid "Maximum redirects (%(redirects)s) was exceeded." -msgstr "最大リダイレクト数 (%(redirects)s) を超えました。" - -#, python-format -msgid "Member %(member_id)s is duplicated for image %(image_id)s" -msgstr "イメージ %(image_id)s のメンバー %(member_id)s が重複しています" - -msgid "Member can't be empty" -msgstr "「メンバー」は空にできません" - -msgid "Member to be added not specified" -msgstr "追加するメンバーが指定されていません" - -msgid "Membership could not be found." -msgstr "メンバーシップが見つかりませんでした。" - -#, python-format -msgid "" -"Metadata definition namespace %(namespace)s is protected and cannot be " -"deleted." -msgstr "" -"メタデータ定義名前空間 %(namespace)s は保護されており、削除できません。" - -#, python-format -msgid "Metadata definition namespace not found for id=%s" -msgstr "id=%s のメタデータ定義名前空間が見つかりません" - -#, python-format -msgid "" -"Metadata definition object %(object_name)s is protected and cannot be " -"deleted." -msgstr "" -"メタデータ定義オブジェクト %(object_name)s は保護されており、削除できません。" - -#, python-format -msgid "Metadata definition object not found for id=%s" -msgstr "id=%s のメタデータ定義オブジェクトが見つかりません" - -#, python-format -msgid "" -"Metadata definition property %(property_name)s is protected and cannot be " -"deleted." -msgstr "" -"メタデータ定義プロパティー %(property_name)s は保護されており、削除できませ" -"ん。" - -#, python-format -msgid "Metadata definition property not found for id=%s" -msgstr "id=%s のメタデータ定義プロパティーが見つかりません" - -#, python-format -msgid "" -"Metadata definition resource-type %(resource_type_name)s is a seeded-system " -"type and cannot be deleted." -msgstr "" -"メタデータ定義リソースタイプ %(resource_type_name)s はシードシステムタイプで" -"あり、削除できません。" - -#, python-format -msgid "" -"Metadata definition resource-type-association %(resource_type)s is protected " -"and cannot be deleted." -msgstr "" -"メタデータ定義リソースタイプ関連付け %(resource_type)s は保護されており、削除" -"できません。" - -#, python-format -msgid "" -"Metadata definition tag %(tag_name)s is protected and cannot be deleted." -msgstr "メタデータ定義タグ %(tag_name)s は保護されており、削除できません。" - -#, python-format -msgid "Metadata definition tag not found for id=%s" -msgstr "id=%s のメタデータ定義タグが見つかりません" - -msgid "Minimal rows limit is 1." -msgstr "最少行数制限は 1 です。" - -#, python-format -msgid "Missing required credential: %(required)s" -msgstr "必須の資格情報がありません: %(required)s" - -#, python-format -msgid "" -"Multiple 'image' service matches for region %(region)s. This generally means " -"that a region is required and you have not supplied one." -msgstr "" -"領域 %(region)s に対して複数の「イメージ」サービスが一致します。これは一般" -"に、領域が必要であるのに、領域を指定していないことを意味します。" - -msgid "No authenticated user" -msgstr "認証されていないユーザー" - -#, python-format -msgid "No image found with ID %s" -msgstr "ID が %s であるイメージは見つかりません" - -#, python-format -msgid "No location found with ID %(loc)s from image %(img)s" -msgstr "イメージ %(img)s 内で ID が %(loc)s のロケーションは見つかりません" - -msgid "No permission to share that image" -msgstr "そのイメージを共有する許可がありません" - -#, python-format -msgid "Not allowed to create members for image %s." -msgstr "イメージ %s のメンバーの作成は許可されていません。" - -#, python-format -msgid "Not allowed to deactivate image in status '%s'" -msgstr "状況が「%s」であるイメージの非アクティブ化は許可されていません" - -#, python-format -msgid "Not allowed to delete members for image %s." -msgstr "イメージ %s のメンバーの削除は許可されていません。" - -#, python-format -msgid "Not allowed to delete tags for image %s." -msgstr "イメージ %s のタグの削除は許可されていません。" - -#, python-format -msgid "Not allowed to list members for image %s." -msgstr "イメージ %s のメンバーのリストは許可されていません。" - -#, python-format -msgid "Not allowed to reactivate image in status '%s'" -msgstr "状況が「%s」であるイメージの再アクティブ化は許可されていません" - -#, python-format -msgid "Not allowed to update members for image %s." -msgstr "イメージ %s のメンバーの更新は許可されていません。" - -#, python-format -msgid "Not allowed to update tags for image %s." -msgstr "イメージ %s のタグの更新は許可されていません。" - -#, python-format -msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" -msgstr "" -"イメージ %(image_id)s ではイメージデータのアップロードは許可されません: " -"%(error)s" - -msgid "Number of sort dirs does not match the number of sort keys" -msgstr "ソート方向の数がソートキーの数に一致しません" - -msgid "OVA extract is limited to admin" -msgstr "OVA 抽出が実行できるのは管理者のみです" - -msgid "Old and new sorting syntax cannot be combined" -msgstr "新旧のソート構文を結合することはできません" - -#, python-format -msgid "Operation \"%s\" requires a member named \"value\"." -msgstr "操作 \"%s\" には \"value\" という名前のメンバーが必要です。" - -msgid "" -"Operation objects must contain exactly one member named \"add\", \"remove\", " -"or \"replace\"." -msgstr "" -"操作オブジェクトには、\"add\"、\"remove\"、または \"replace\" という名前のメ" -"ンバーを正確に 1 つだけ含める必要があります。" - -msgid "" -"Operation objects must contain only one member named \"add\", \"remove\", or " -"\"replace\"." -msgstr "" -"操作オブジェクトには、\"add\"、\"remove\"、または \"replace\" という名前のメ" -"ンバーを 1 つしか含められません。" - -msgid "Operations must be JSON objects." -msgstr "操作は JSON オブジェクトでなければなりません。" - -#, fuzzy, python-format -msgid "Original locations is not empty: %s" -msgstr "元のロケーションは空ではありません: %s" - -msgid "Owner can't be updated by non admin." -msgstr "管理者以外は所有者を更新できません。" - -msgid "Owner must be specified to create a tag." -msgstr "タグを作成するには、所有者を指定する必要があります。" - -msgid "Owner of the image" -msgstr "イメージの所有者" - -msgid "Owner of the namespace." -msgstr "名前空間の所有者。" - -msgid "Param values can't contain 4 byte unicode." -msgstr "Param 値に 4 バイトの Unicode が含まれていてはなりません。" - -#, python-format -msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." -msgstr "" -"ポインター `%s` に、認識されているエスケープシーケンスの一部ではない \"~\" が" -"含まれています。" - -#, python-format -msgid "Pointer `%s` contains adjacent \"/\"." -msgstr "ポインター `%s` に隣接する \"/\" が含まれています。" - -#, python-format -msgid "Pointer `%s` does not contains valid token." -msgstr "ポインター `%s` に有効なトークンが含まれていません。" - -#, python-format -msgid "Pointer `%s` does not start with \"/\"." -msgstr "ポインター `%s` の先頭が \"/\" ではありません。" - -#, python-format -msgid "Pointer `%s` end with \"/\"." -msgstr "ポインター `%s` の末尾が \"/\" です。" - -#, python-format -msgid "Port \"%s\" is not valid." -msgstr "ポート \"%s\" が無効です。" - -#, python-format -msgid "Process %d not running" -msgstr "プロセス %d は実行されていません" - -#, python-format -msgid "Properties %s must be set prior to saving data." -msgstr "データの保存前にプロパティー %s を設定する必要があります。" - -#, python-format -msgid "" -"Property %(property_name)s does not start with the expected resource type " -"association prefix of '%(prefix)s'." -msgstr "" -"プロパティー %(property_name)s の先頭が、想定されるリソースタイプ関連付けのプ" -"レフィックス \"%(prefix)s\" ではありません。" - -#, python-format -msgid "Property %s already present." -msgstr "プロパティー %s は既に存在しています。" - -#, python-format -msgid "Property %s does not exist." -msgstr "プロパティー %s は存在しません。" - -#, python-format -msgid "Property %s may not be removed." -msgstr "プロパティー %s は削除できません。" - -#, python-format -msgid "Property %s must be set prior to saving data." -msgstr "データの保存前にプロパティー %s を設定する必要があります。" - -#, python-format -msgid "Property '%s' is protected" -msgstr "プロパティー '%s' は保護されています" - -msgid "Property names can't contain 4 byte unicode." -msgstr "プロパティー名に 4 バイトの Unicode が含まれていてはなりません。" - -#, python-format -msgid "" -"Provided image size must match the stored image size. (provided size: " -"%(ps)d, stored size: %(ss)d)" -msgstr "" -"指定するイメージのサイズは、保管されているイメージのサイズと一致しなければな" -"りません。(指定サイズ: %(ps)d、保管サイズ: %(ss)d)" - -#, python-format -msgid "Provided object does not match schema '%(schema)s': %(reason)s" -msgstr "" -"指定されたオブジェクトがスキーマ '%(schema)s' と一致しません: %(reason)s" - -#, python-format -msgid "Provided status of task is unsupported: %(status)s" -msgstr "指定されたタスク状況はサポートされていません: %(status)s" - -#, python-format -msgid "Provided type of task is unsupported: %(type)s" -msgstr "指定されたタスクタイプはサポートされていません: %(type)s" - -msgid "Provides a user friendly description of the namespace." -msgstr "分かりやすい名前空間の説明が提供されます。" - -msgid "Received invalid HTTP redirect." -msgstr "無効な HTTP リダイレクトを受け取りました。" - -#, python-format -msgid "Redirecting to %(uri)s for authorization." -msgstr "許可のために %(uri)s にリダイレクトしています。" - -#, python-format -msgid "Registry service can't use %s" -msgstr "レジストリーサービスでは %s を使用できません" - -#, python-format -msgid "Registry was not configured correctly on API server. Reason: %(reason)s" -msgstr "" -"レジストリーが API サーバーで正しく設定されていませんでした。理由: %(reason)s" - -#, python-format -msgid "Reload of %(serv)s not supported" -msgstr "%(serv)s の再ロードはサポートされていません" - -#, python-format -msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "%(serv)s (pid %(pid)s) をシグナル (%(sig)s) により再ロード中" - -#, python-format -msgid "Removing stale pid file %s" -msgstr "失効した pid ファイル %s を削除中" - -msgid "Request body must be a JSON array of operation objects." -msgstr "要求本文は、操作オブジェクトの JSON 配列でなければなりません。" - -msgid "Request must be a list of commands" -msgstr "要求はコマンドのリストである必要があります" - -#, python-format -msgid "Required store %s is invalid" -msgstr "必須のストア %s が無効です" - -msgid "" -"Resource type names should be aligned with Heat resource types whenever " -"possible: http://docs.openstack.org/developer/heat/template_guide/openstack." -"html" -msgstr "" -"可能であれば、リソースタイプ名を Heat リソースタイプと位置合わせします。" -"http://docs.openstack.org/developer/heat/template_guide/openstack.html" - -msgid "Response from Keystone does not contain a Glance endpoint." -msgstr "Keystone からの応答に Glance エンドポイントが含まれていません。" - -msgid "Scope of image accessibility" -msgstr "イメージのアクセス可能性の範囲" - -msgid "Scope of namespace accessibility." -msgstr "名前空間アクセシビリティーの範囲。" - -#, python-format -msgid "Server %(serv)s is stopped" -msgstr "サーバー %(serv)s は停止しています" - -#, python-format -msgid "Server worker creation failed: %(reason)s." -msgstr "サーバーワーカーの作成に失敗しました: %(reason)s" - -msgid "Signature verification failed" -msgstr "シグニチャーの検証が失敗しました" - -msgid "Size of image file in bytes" -msgstr "イメージファイルのサイズ (バイト)" - -msgid "" -"Some resource types allow more than one key / value pair per instance. For " -"example, Cinder allows user and image metadata on volumes. Only the image " -"properties metadata is evaluated by Nova (scheduling or drivers). This " -"property allows a namespace target to remove the ambiguity." -msgstr "" -"一部のリソースタイプでは、インスタンスごとに複数のキー/値のペアが許可されてい" -"ます。例えば、Cinder はボリューム上のユーザーおよびイメージメタデータを許可し" -"ています。イメージプロパティーメタデータのみ、Nova (スケジュールまたはドライ" -"バー) によって評価されます。このプロパティーによって、名前空間ターゲットから" -"あいまいさを排除できます。" - -msgid "Sort direction supplied was not valid." -msgstr "指定されたソート方向が無効でした。" - -msgid "Sort key supplied was not valid." -msgstr "指定されたソートキーが無効でした。" - -msgid "" -"Specifies the prefix to use for the given resource type. Any properties in " -"the namespace should be prefixed with this prefix when being applied to the " -"specified resource type. Must include prefix separator (e.g. a colon :)." -msgstr "" -"指定されたリソースタイプに使用するプレフィックスを指定します。名前空間にある" -"プロパティーはすべて、指定されたリソースタイプに適用されるときに、このプレ" -"フィックスが先頭に付けられます。コロン (:) などのプレフィックス区切り文字を組" -"み込む必要があります。" - -msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." -msgstr "状況は、\"保留中\"、\"受諾\"、または\"拒否\" でなければなりません。" - -msgid "Status not specified" -msgstr "状況が指定されていません" - -msgid "Status of the image" -msgstr "イメージの状態" - -#, python-format -msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "%(cur_status)s から %(new_status)s への状況遷移は許可されません" - -#, python-format -msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "%(serv)s (pid %(pid)s) をシグナル (%(sig)s) により停止中" - -#, python-format -msgid "Store for image_id not found: %s" -msgstr "image_id のストアが見つかりません: %s" - -#, python-format -msgid "Store for scheme %s not found" -msgstr "スキーマ %s のストアが見つかりません" - -#, python-format -msgid "" -"Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " -"(%(actual)s) did not match. Setting image status to 'killed'." -msgstr "" -"指定された %(attr)s (%(supplied)s) とアップロードされたイメージ (%(actual)s) " -"から生成された %(attr)s が一致していませんでした。イメージの状況を「強制終了" -"済み」に設定します。" - -msgid "Supported values for the 'container_format' image attribute" -msgstr "'container_format' イメージ属性に対してサポートされる値" - -msgid "Supported values for the 'disk_format' image attribute" -msgstr "'disk_format' イメージ属性に対してサポートされる値" - -#, python-format -msgid "Suppressed respawn as %(serv)s was %(rsn)s." -msgstr "%(serv)s として抑制された再作成は %(rsn)s でした。" - -msgid "System SIGHUP signal received." -msgstr "システム SIGHUP シグナルを受信しました。" - -#, python-format -msgid "Task '%s' is required" -msgstr "タスク '%s' が必要です" - -msgid "Task does not exist" -msgstr "タスクが存在しません" - -msgid "Task failed due to Internal Error" -msgstr "内部エラーが原因でタスクが失敗しました" - -msgid "Task was not configured properly" -msgstr "タスクが正しく設定されませんでした" - -#, python-format -msgid "Task with the given id %(task_id)s was not found" -msgstr "指定された id %(task_id)s のタスクは見つかりませんでした" - -msgid "The \"changes-since\" filter is no longer available on v2." -msgstr "\"changes-since\" フィルターは v2 上で使用できなくなりました。" - -#, python-format -msgid "The CA file you specified %s does not exist" -msgstr "指定した CA ファイル %s は存在しません" - -#, python-format -msgid "" -"The Image %(image_id)s object being created by this task %(task_id)s, is no " -"longer in valid status for further processing." -msgstr "" -"このタスク %(task_id)s で作成されているイメージ %(image_id)s オブジェクトは以" -"降の処理に有効な状況ではなくなりました。" - -msgid "The Store URI was malformed." -msgstr "ストア URI の形式に誤りがありました。" - -msgid "" -"The URL to the keystone service. If \"use_user_token\" is not in effect and " -"using keystone auth, then URL of keystone can be specified." -msgstr "" -"keystone サービスの URL。\"use_user_token\" が無効で、keystone 認証を使用して" -"いる場合、keystone の URL を指定できます。" - -msgid "" -"The administrators password. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"管理者パスワード。\"use_user_token\" が無効であれば、管理資格情報を指定できま" -"す。" - -msgid "" -"The administrators user name. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"管理者ユーザー名。\"use_user_token\" が無効であれば、管理資格情報を指定できま" -"す。" - -#, python-format -msgid "The cert file you specified %s does not exist" -msgstr "指定した証明書ファイル %s は存在しません" - -msgid "The current status of this task" -msgstr "このタスクの現行状況" - -#, python-format -msgid "" -"The device housing the image cache directory %(image_cache_dir)s does not " -"support xattr. It is likely you need to edit your fstab and add the " -"user_xattr option to the appropriate line for the device housing the cache " -"directory." -msgstr "" -"イメージキャッシュディレクトリー %(image_cache_dir)s が格納されているデバイス" -"では xattr はサポートされません。fstab を編集して、キャッシュディレクトリーが" -"格納されているデバイスの該当する行に user_xattr オプションを追加しなければな" -"らない可能性があります。" - -#, python-format -msgid "" -"The given uri is not valid. Please specify a valid uri from the following " -"list of supported uri %(supported)s" -msgstr "" -"指定した URI が無効です。次のサポートされている URI のリストから、有効な URI " -"を指定してください: %(supported)s" - -#, python-format -msgid "The incoming image is too large: %s" -msgstr "入力イメージが大きすぎます: %s" - -#, python-format -msgid "The key file you specified %s does not exist" -msgstr "指定した鍵ファイル %s は存在しません" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image locations. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"許可されるイメージロケーション数の制限を超えました。試行: %(attempted)s、最" -"大: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image members for this " -"image. Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"このイメージに対して許可されるイメージメンバー数の制限を超えました。試行: " -"%(attempted)s、最大: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"許可されるイメージプロパティー数の制限を超えました。試行: %(attempted)s、最" -"大: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(num)s, Maximum: %(quota)s" -msgstr "" -"許可されるイメージプロパティー数の制限を超えました。試行: %(num)s、最大: " -"%(quota)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image tags. Attempted: " -"%(attempted)s, Maximum: %(maximum)s" -msgstr "" -"許可されるイメージタグ数の制限を超えました。試行: %(attempted)s、最大: " -"%(maximum)s" - -#, python-format -msgid "The location %(location)s already exists" -msgstr "ロケーション %(location)s は既に存在します" - -#, python-format -msgid "The location data has an invalid ID: %d" -msgstr "ロケーションデータの ID が無効です: %d" - -#, python-format -msgid "" -"The metadata definition %(record_type)s with name=%(record_name)s not " -"deleted. Other records still refer to it." -msgstr "" -"name=%(record_name)s のメタデータ定義 %(record_type)s は削除されていません。" -"他のレコードがまだこのメタデータ定義を参照しています。" - -#, python-format -msgid "The metadata definition namespace=%(namespace_name)s already exists." -msgstr "メタデータ定義 namespace=%(namespace_name)s は既に存在します。" - -#, python-format -msgid "" -"The metadata definition object with name=%(object_name)s was not found in " -"namespace=%(namespace_name)s." -msgstr "" -"name=%(object_name)s のメタデータ定義オブジェクトが、namespace=" -"%(namespace_name)s に見つかりませんでした。" - -#, python-format -msgid "" -"The metadata definition property with name=%(property_name)s was not found " -"in namespace=%(namespace_name)s." -msgstr "" -"name=%(property_name)s のメタデータ定義プロパティーは、namespace=" -"%(namespace_name)s に見つかりませんでした。" - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s already exists." -msgstr "" -"resource-type=%(resource_type_name)s の、namespace=%(namespace_name)s へのメ" -"タデータ定義リソースタイプ関連付けは、既に存在します。" - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s, was not found." -msgstr "" -"resource-type=%(resource_type_name)s の、namespace=%(namespace_name)s へのメ" -"タデータ定義リソースタイプ関連付けが見つかりませんでした。" - -#, python-format -msgid "" -"The metadata definition resource-type with name=%(resource_type_name)s, was " -"not found." -msgstr "" -"name=%(resource_type_name)s のメタデータ定義リソースタイプが見つかりませんで" -"した。" - -#, python-format -msgid "" -"The metadata definition tag with name=%(name)s was not found in namespace=" -"%(namespace_name)s." -msgstr "" -"name=%(name)s のメタデータ定義タグが namespace=%(namespace_name)s に見つかり" -"ませんでした。" - -msgid "The parameters required by task, JSON blob" -msgstr "タスクによって要求されるパラメーター、JSON blob" - -msgid "The provided image is too large." -msgstr "指定されたイメージが大きすぎます。" - -msgid "" -"The region for the authentication service. If \"use_user_token\" is not in " -"effect and using keystone auth, then region name can be specified." -msgstr "" -"認証サービスの領域。\"use_user_token\" が無効で、keystone 認証を使用している" -"場合、領域名を指定できます。" - -msgid "The request returned 500 Internal Server Error." -msgstr "要求で「500 Internal Server Error」が返されました。" - -msgid "" -"The request returned 503 Service Unavailable. This generally occurs on " -"service overload or other transient outage." -msgstr "" -"要求で「503 Service Unavailable」が返されました。これは一般に、サービスの過負" -"荷または他の一時的な障害時に起こります。" - -#, python-format -msgid "" -"The request returned a 302 Multiple Choices. This generally means that you " -"have not included a version indicator in a request URI.\n" -"\n" -"The body of response returned:\n" -"%(body)s" -msgstr "" -"要求が「302 Multiple Choices」を返しました。これは通常、要求 URI にバージョン" -"標識を含めなかったことを意味します。\n" -"\n" -"返された応答の本体:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned a 413 Request Entity Too Large. This generally means " -"that rate limiting or a quota threshold was breached.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"要求で「413 Request Entity Too Large」が返されました。これは一般に、速度制限" -"または割り当て量のしきい値に違反したことを意味します。\n" -"\n" -"応答本体:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned an unexpected status: %(status)s.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"要求で予期しない状況が返されました: %(status)s。\n" -"\n" -"応答本体:\n" -"%(body)s" - -msgid "" -"The requested image has been deactivated. Image data download is forbidden." -msgstr "" -"要求されたイメージは非アクティブ化されています。イメージデータのダウンロード" -"は禁止されています。" - -msgid "The result of current task, JSON blob" -msgstr "現行タスクの結果、JSON blob" - -#, python-format -msgid "" -"The size of the data %(image_size)s will exceed the limit. %(remaining)s " -"bytes remaining." -msgstr "" -"データのサイズ %(image_size)s が制限を超えます。%(remaining)s バイト残されて" -"います。" - -#, python-format -msgid "The specified member %s could not be found" -msgstr "指定されたメンバー %s は見つかりませんでした" - -#, python-format -msgid "The specified metadata object %s could not be found" -msgstr "指定されたメタデータオブジェクト %s は見つかりませんでした" - -#, python-format -msgid "The specified metadata tag %s could not be found" -msgstr "指定されたメタデータタグ %s が見つかりませんでした" - -#, python-format -msgid "The specified namespace %s could not be found" -msgstr "指定された名前空間 %s は見つかりませんでした" - -#, python-format -msgid "The specified property %s could not be found" -msgstr "指定されたプロパティー %s は見つかりませんでした" - -#, python-format -msgid "The specified resource type %s could not be found " -msgstr "指定されたリソースタイプ %s は見つかりませんでした" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'" -msgstr "" -"削除されたイメージロケーションの状況は「pending_delete」または「deleted」にの" -"み設定できます" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'." -msgstr "" -"削除されたイメージロケーションの状況は「pending_delete」または「deleted」にの" -"み設定できます。" - -msgid "The status of this image member" -msgstr "このイメージメンバーの状況" - -msgid "" -"The strategy to use for authentication. If \"use_user_token\" is not in " -"effect, then auth strategy can be specified." -msgstr "" -"認証に使用されるストラテジー。\"use_user_token\" が無効であれば、認証ストラテ" -"ジーを指定できます。" - -#, python-format -msgid "" -"The target member %(member_id)s is already associated with image " -"%(image_id)s." -msgstr "" -"ターゲットメンバー %(member_id)s はイメージ %(image_id)s に既に関連付けられて" -"います。" - -msgid "" -"The tenant name of the administrative user. If \"use_user_token\" is not in " -"effect, then admin tenant name can be specified." -msgstr "" -"管理ユーザーのテナント名。\"use_user_token\" が無効であれば、管理テナント名を" -"指定できます。" - -msgid "The type of task represented by this content" -msgstr "このコンテンツによって表されるタスクのタイプ" - -msgid "The unique namespace text." -msgstr "固有の名前空間テキスト。" - -msgid "The user friendly name for the namespace. Used by UI if available." -msgstr "名前空間の分かりやすい名前。存在する場合は、UI によって使用されます。" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. Error: %(ioe)s" -msgstr "" -"%(error_key_name)s %(error_filename)s に関して問題があります。確認してくださ" -"い。エラー: %(ioe)s" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. OpenSSL error: %(ce)s" -msgstr "" -"%(error_key_name)s %(error_filename)s に関して問題があります。確認してくださ" -"い。OpenSSL エラー: %(ce)s" - -#, python-format -msgid "" -"There is a problem with your key pair. Please verify that cert " -"%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" -msgstr "" -"ご使用の鍵ペアに関して問題があります。証明書 %(cert_file)s と鍵 %(key_file)s " -"がペアになっていることを確認してください。OpenSSL エラー %(ce)s" - -msgid "There was an error configuring the client." -msgstr "クライアントの設定中にエラーが発生しました。" - -msgid "There was an error connecting to a server" -msgstr "サーバーへの接続中にエラーが発生しました" - -msgid "" -"This operation is currently not permitted on Glance Tasks. They are auto " -"deleted after reaching the time based on their expires_at property." -msgstr "" -"この操作は、Glance タスクでは現在許可されていません。これらのタスクは、" -"expires_at プロパティーに基づき、時間に達すると自動的に削除されます。" - -msgid "This operation is currently not permitted on Glance images details." -msgstr "この操作は、Glance イメージの詳細では現在許可されていません。" - -msgid "" -"Time in hours for which a task lives after, either succeeding or failing" -msgstr "成功または失敗の後でタスクが存続する時間 (時)" - -msgid "Too few arguments." -msgstr "引数が少なすぎます。" - -msgid "" -"URI cannot contain more than one occurrence of a scheme.If you have " -"specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " -"you need to change it to use the swift+http:// scheme, like so: swift+http://" -"user:pass@authurl.com/v1/container/obj" -msgstr "" -"URI に複数回、スキームを指定することはできません。swift://user:pass@http://" -"authurl.com/v1/container/obj のような URI を指定した場合は、次のように、swift" -"+http:// スキームを使用するよう変更する必要があります。swift+http://user:" -"pass@authurl.com/v1/container/obj" - -msgid "URL to access the image file kept in external store" -msgstr "外部ストアに保持されているイメージファイルにアクセスするための URL" - -#, python-format -msgid "" -"Unable to create pid file %(pid)s. Running as non-root?\n" -"Falling back to a temp file, you can stop %(service)s service using:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" -msgstr "" -"pid ファイル %(pid)s を作成できません。非ルートとして実行しますか?\n" -"一時ファイルにフォールバック中。次を使用して %(service)s サービスを\n" -"停止できます: %(file)s %(server)s stop --pid-file %(fb)s" - -#, python-format -msgid "Unable to filter by unknown operator '%s'." -msgstr "不明な演算子 '%s' によってフィルター処理を行うことができません。" - -msgid "Unable to filter on a range with a non-numeric value." -msgstr "非数値を含む範囲ではフィルタリングできません。" - -msgid "Unable to filter on a unknown operator." -msgstr "不明な演算子に対してフィルター処理を行うことができません。" - -msgid "Unable to filter using the specified operator." -msgstr "指定された演算子を使用してフィルター処理ができません。" - -msgid "Unable to filter using the specified range." -msgstr "指定された範囲ではフィルタリングできません。" - -#, python-format -msgid "Unable to find '%s' in JSON Schema change" -msgstr "JSON スキーマの変更で '%s' が見つかりません" - -#, python-format -msgid "" -"Unable to find `op` in JSON Schema change. It must be one of the following: " -"%(available)s." -msgstr "" -"JSON スキーマの変更で `op` が見つかりません。以下のいずれかでなければなりませ" -"ん: %(available)s。" - -msgid "Unable to increase file descriptor limit. Running as non-root?" -msgstr "ファイル記述子制限を増加できません。非ルートとして実行しますか?" - -#, python-format -msgid "" -"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" -"Got: %(e)r" -msgstr "" -"設定ファイル %(conf_file)s から %(app_name)s をロードできません。\n" -"受け取ったエラー: %(e)r" - -#, python-format -msgid "Unable to load schema: %(reason)s" -msgstr "スキーマをロードできません: %(reason)s" - -#, python-format -msgid "Unable to locate paste config file for %s." -msgstr "%s の paste 設定ファイルが見つかりません。" - -#, python-format -msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" -msgstr "" -"イメージ %(image_id)s の重複イメージデータはアップロードできません: %(error)s" - -msgid "Unauthorized image access" -msgstr "許可されていないイメージアクセス" - -msgid "Unexpected body type. Expected list/dict." -msgstr "予期しない本文タイプ。予期されたのはリストまたは辞書です。" - -#, python-format -msgid "Unexpected response: %s" -msgstr "予期しない応答: %s" - -#, python-format -msgid "Unknown auth strategy '%s'" -msgstr "不明な認証ストラテジー '%s'" - -#, python-format -msgid "Unknown command: %s" -msgstr "不明なコマンド: %s" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "ソート方向が不明です。'desc' または 'asc' でなければなりません" - -msgid "Unrecognized JSON Schema draft version" -msgstr "認識されない JSON スキーマのドラフトバージョン" - -msgid "Unrecognized changes-since value" -msgstr "認識されない changes-since 値" - -#, python-format -msgid "Unsupported sort_dir. Acceptable values: %s" -msgstr "サポートされない sort_dir です。許容値: %s" - -#, python-format -msgid "Unsupported sort_key. Acceptable values: %s" -msgstr "サポートされない sort_key です。許容値: %s" - -msgid "Virtual size of image in bytes" -msgstr "イメージの仮想サイズ (バイト)" - -#, python-format -msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" -msgstr "pid %(pid)s (%(file)s) が停止するまで 15 秒お待ちください。中断中です" - -msgid "" -"When running server in SSL mode, you must specify both a cert_file and " -"key_file option value in your configuration file" -msgstr "" -"サーバーを SSL モードで実行する場合は、cert_file オプション値と key_file オプ" -"ション値の両方を設定ファイルに指定する必要があります" - -msgid "" -"Whether to pass through the user token when making requests to the registry. " -"To prevent failures with token expiration during big files upload, it is " -"recommended to set this parameter to False.If \"use_user_token\" is not in " -"effect, then admin credentials can be specified." -msgstr "" -"レジストリーに対して要求を行うときに、ユーザートークンをパススルーするかどう" -"か。サイズの大きなファイルのアップロード中のトークンの有効期限切れに伴う障害" -"を防ぐために、このパラメーターは False に設定することが推奨されま" -"す。\"use_user_token\" が無効である場合は、管理者のクレデンシャルを指定できま" -"す。" - -#, python-format -msgid "Wrong command structure: %s" -msgstr "正しくないコマンド構造: %s" - -msgid "You are not authenticated." -msgstr "認証されていません。" - -msgid "You are not authorized to complete this action." -msgstr "このアクションの実行を許可されていません。" - -#, python-format -msgid "You are not authorized to lookup image %s." -msgstr "イメージ %s を調べる権限がありません。" - -#, python-format -msgid "You are not authorized to lookup the members of the image %s." -msgstr "イメージ %s のメンバーを調べる権限がありません。" - -#, python-format -msgid "You are not permitted to create a tag in the namespace owned by '%s'" -msgstr "'%s' が所有する名前空間でのタグの作成は許可されていません" - -msgid "You are not permitted to create image members for the image." -msgstr "そのイメージのイメージメンバーの作成は許可されていません。" - -#, python-format -msgid "You are not permitted to create images owned by '%s'." -msgstr "'%s' によって所有されているイメージの作成は許可されていません。" - -#, python-format -msgid "You are not permitted to create namespace owned by '%s'" -msgstr "'%s' によって所有される名前空間の作成は許可されません" - -#, python-format -msgid "You are not permitted to create object owned by '%s'" -msgstr "'%s' によって所有されるオブジェクトの作成は許可されません" - -#, python-format -msgid "You are not permitted to create property owned by '%s'" -msgstr "'%s' によって所有されるプロパティーの作成は許可されません" - -#, python-format -msgid "You are not permitted to create resource_type owned by '%s'" -msgstr "'%s' によって所有される resource_type の作成は許可されません" - -#, python-format -msgid "You are not permitted to create this task with owner as: %s" -msgstr "所有者 %s を使用してこのタスクを作成することは許可されません" - -msgid "You are not permitted to deactivate this image." -msgstr "このイメージの非アクティブ化は許可されていません。" - -msgid "You are not permitted to delete this image." -msgstr "このイメージの削除は許可されていません。" - -msgid "You are not permitted to delete this meta_resource_type." -msgstr "この meta_resource_type の削除は許可されません。" - -msgid "You are not permitted to delete this namespace." -msgstr "この名前空間の削除は許可されません。" - -msgid "You are not permitted to delete this object." -msgstr "このオブジェクトの削除は許可されません。" - -msgid "You are not permitted to delete this property." -msgstr "このプロパティーの削除は許可されません。" - -msgid "You are not permitted to delete this tag." -msgstr "このタグの削除は許可されていません。" - -#, python-format -msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." -msgstr "この %(resource)s 上の '%(attr)s' の変更は許可されません。" - -#, python-format -msgid "You are not permitted to modify '%s' on this image." -msgstr "このイメージ上の '%s' の変更は許可されません。" - -msgid "You are not permitted to modify locations for this image." -msgstr "このイメージのロケーションの変更は許可されていません。" - -msgid "You are not permitted to modify tags on this image." -msgstr "このイメージ上のタグの変更は許可されていません。" - -msgid "You are not permitted to modify this image." -msgstr "このイメージの変更は許可されていません。" - -msgid "You are not permitted to reactivate this image." -msgstr "このイメージの再アクティブ化は許可されていません。" - -msgid "You are not permitted to set status on this task." -msgstr "このタスクに関する状況を設定することは許可されません。" - -msgid "You are not permitted to update this namespace." -msgstr "この名前空間の更新は許可されません。" - -msgid "You are not permitted to update this object." -msgstr "このオブジェクトの更新は許可されません。" - -msgid "You are not permitted to update this property." -msgstr "このプロパティーの更新は許可されません。" - -msgid "You are not permitted to update this tag." -msgstr "このタグの更新は許可されていません。" - -msgid "You are not permitted to upload data for this image." -msgstr "このイメージのデータのアップロードは許可されていません。" - -#, python-format -msgid "You cannot add image member for %s" -msgstr "%s のイメージメンバーを追加できません" - -#, python-format -msgid "You cannot delete image member for %s" -msgstr "%s のイメージメンバーを削除できません" - -#, python-format -msgid "You cannot get image member for %s" -msgstr "%s のイメージメンバーを取得できません" - -#, python-format -msgid "You cannot update image member %s" -msgstr "イメージメンバー %s を更新できません" - -msgid "You do not own this image" -msgstr "このイメージを所有していません" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a cert, " -"however you have failed to supply either a key_file parameter or set the " -"GLANCE_CLIENT_KEY_FILE environ variable" -msgstr "" -"接続時に SSL を使用するよう選択し、証明書を指定しましたが、key_file パラメー" -"ターを指定しなかったか、GLANCE_CLIENT_KEY_FILE 環境変数を設定しませんでした" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a key, " -"however you have failed to supply either a cert_file parameter or set the " -"GLANCE_CLIENT_CERT_FILE environ variable" -msgstr "" -"接続時に SSL を使用するよう選択し、鍵を指定しましたが、cert_file パラメーター" -"を指定しなかったか、GLANCE_CLIENT_CERT_FILE 環境変数を設定しませんでした" - -msgid "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" -msgstr "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" - -#, python-format -msgid "__init__() got unexpected keyword argument '%s'" -msgstr "__init__() で予期しないキーワード引数 '%s' が得られました" - -#, python-format -msgid "" -"cannot transition from %(current)s to %(next)s in update (wanted from_state=" -"%(from)s)" -msgstr "" -"更新で %(current)s から %(next)s に移行できません (from_state=%(from)s が必" -"要)" - -#, python-format -msgid "custom properties (%(props)s) conflict with base properties" -msgstr "カスタムプロパティー (%(props)s) が基本プロパティーと競合しています" - -msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" -msgstr "" -"このプラットフォームでは eventlet の「poll」ハブも「selects」ハブも使用できま" -"せん" - -msgid "is_public must be None, True, or False" -msgstr "is_public は、None、True、または False でなければなりません" - -msgid "limit param must be an integer" -msgstr "limit パラメーターは整数でなければなりません" - -msgid "limit param must be positive" -msgstr "limit パラメーターは正でなければなりません" - -msgid "md5 hash of image contents." -msgstr "イメージコンテンツの MD5 ハッシュ。" - -#, python-format -msgid "new_image() got unexpected keywords %s" -msgstr "new_image() で予期しないキーワード %s が得られました" - -msgid "protected must be True, or False" -msgstr "protected は True または False でなければなりません" - -#, python-format -msgid "unable to launch %(serv)s. Got error: %(e)s" -msgstr "%(serv)s を起動できません。受け取ったエラー: %(e)s" - -#, python-format -msgid "x-openstack-request-id is too long, max size %s" -msgstr "x-openstack-request-id が長すぎます。最大サイズは %s です" diff --git a/glance/locale/ko_KR/LC_MESSAGES/glance.po b/glance/locale/ko_KR/LC_MESSAGES/glance.po deleted file mode 100644 index b04b1be9..00000000 --- a/glance/locale/ko_KR/LC_MESSAGES/glance.po +++ /dev/null @@ -1,2051 +0,0 @@ -# Translations template for glance. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the glance project. -# -# Translators: -# HyunWoo Jo , 2014 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: glance 15.0.0.0b3.dev29\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-06-23 20:54+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 05:21+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language: ko-KR\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: Korean (South Korea)\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "%(cls)s exception was raised in the last rpc call: %(val)s" -msgstr "%(cls)s 예외가 마지막 rpc 호출에서 발생: %(val)s" - -#, python-format -msgid "%(m_id)s not found in the member list of the image %(i_id)s." -msgstr "이미지 %(i_id)s의 멤버 목록에서 %(m_id)s을(를) 찾을 수 없습니다." - -#, python-format -msgid "%(serv)s (pid %(pid)s) is running..." -msgstr "%(serv)s(pid %(pid)s)이(가) 실행 중..." - -#, python-format -msgid "%(serv)s appears to already be running: %(pid)s" -msgstr "%(serv)s이(가) 이미 실행 중으로 표시됨: %(pid)s" - -#, python-format -msgid "" -"%(strategy)s is registered as a module twice. %(module)s is not being used." -msgstr "" -"%(strategy)s이(가) 모듈로 두 번 등록되었습니다. %(module)s이(가) 사용되지 사" -"용됩니다." - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Could not load the " -"filesystem store" -msgstr "" -"%(task_type)s의 %(task_id)s가 제대로 구성되지 않았습니다. 파일 시스템 저장소" -"를 로드할 수 없습니다." - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Missing work dir: " -"%(work_dir)s" -msgstr "" -"%(task_type)s의 %(task_id)s가 제대로 구성되지 않았습니다. 누락 작업 디렉토" -"리: %(work_dir)s" - -#, python-format -msgid "%(verb)sing %(serv)s" -msgstr "%(serv)s을(를) %(verb)s 중" - -#, python-format -msgid "%(verb)sing %(serv)s with %(conf)s" -msgstr "%(serv)s에서 %(conf)s과(와) 함께 %(verb)s 중" - -#, python-format -msgid "" -"%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " -"address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " -"separately from the port (i.e., \"[fe80::a:b:c]:9876\")." -msgstr "" -"%s 호스트:포트 쌍을 지정하십시오. 여기서 호스트는 IPv4 주소, IPv6 주소, 호스" -"트 이름 또는 FQDN입니다. IPv6 주소를 사용하는 경우에는 포트와 분리하여 대괄호" -"로 묶으십시오(예: \"[fe80::a:b:c]:9876\")." - -#, python-format -msgid "%s can't contain 4 byte unicode characters." -msgstr "%s에는 4바이트 유니코드 문자를 포함할 수 없습니다." - -#, python-format -msgid "%s is already stopped" -msgstr "%s이(가) 이미 중지되었습니다." - -#, python-format -msgid "%s is stopped" -msgstr "%s이(가) 중지됨" - -msgid "" -"--os_auth_url option or OS_AUTH_URL environment variable required when " -"keystone authentication strategy is enabled\n" -msgstr "" -"키스톤 인증 전략이 사용될 경우 --os_auth_url 옵션 또는 OS_AUTH_URL 환경 변수" -"가 필요합니다.\n" - -msgid "A body is not expected with this request." -msgstr "이 요청에는 본문이 없어야 합니다." - -#, python-format -msgid "" -"A metadata definition object with name=%(object_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"name=%(object_name)s인 메타데이터 정의 오브젝트가 namespace=" -"%(namespace_name)s에서 찾을 수 없습니다." - -#, python-format -msgid "" -"A metadata definition property with name=%(property_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"name=%(property_name)s인 메타데이터 정의 특성이 namespace=%(namespace_name)s" -"에 이미 존재합니다." - -#, python-format -msgid "" -"A metadata definition resource-type with name=%(resource_type_name)s already " -"exists." -msgstr "" -"name=%(resource_type_name)s인 메타데이터 정의 자원 유형이 이미 존재합니다." - -msgid "A set of URLs to access the image file kept in external store" -msgstr "외부 저장소에 보관된 이미지 파일에 액세스하기 위한 URL 세트" - -msgid "Amount of disk space (in GB) required to boot image." -msgstr "이미지를 부팅하는 데 필요한 디스크 공간의 양(MB)" - -msgid "Amount of ram (in MB) required to boot image." -msgstr "이미지를 부팅하는 데 필요한 RAM의 양(MB)" - -msgid "An identifier for the image" -msgstr "이미지에 대한 ID" - -msgid "An identifier for the image member (tenantId)" -msgstr "이미지 멤버에 대한 ID(tenantId)" - -msgid "An identifier for the owner of this task" -msgstr "이 태스크 소유자의 ID" - -msgid "An identifier for the task" -msgstr "태스크의 ID" - -msgid "An image file url" -msgstr "이미지 파일 url" - -msgid "An image schema url" -msgstr "이미지 스키마 url" - -msgid "An image self url" -msgstr "이미지 자체 url" - -#, python-format -msgid "An image with identifier %s already exists" -msgstr "ID가 %s인 이미지가 이미 존재함" - -msgid "An import task exception occurred" -msgstr "가져오기 작업 예외 발생" - -msgid "An object with the same identifier already exists." -msgstr "동일한 ID를 갖는 오브젝트가 이미 존재합니다. " - -msgid "An object with the same identifier is currently being operated on." -msgstr "동일한 ID가 있는 오브젝트가 현재 작동됩니다." - -msgid "An object with the specified identifier was not found." -msgstr "지정된 ID를 갖는 오브젝트를 찾을 수 없습니다." - -msgid "An unknown exception occurred" -msgstr "알 수 없는 예외가 발생했음" - -msgid "An unknown task exception occurred" -msgstr "알 수 없는 태스크 예외 발생" - -#, python-format -msgid "Attempt to upload duplicate image: %s" -msgstr "중복 이미지를 업로드하려고 시도 중: %s" - -msgid "Attempted to update Location field for an image not in queued status." -msgstr "" -"큐에 들어간 상태에 있지 않은 이미지에 대한 위치 필드를 업데이트하려고 시도함" - -#, python-format -msgid "Attribute '%(property)s' is read-only." -msgstr "'%(property)s' 속성은 읽기 전용입니다." - -#, python-format -msgid "Attribute '%(property)s' is reserved." -msgstr "'%(property)s' 속성은 예약되어 있습니다." - -#, python-format -msgid "Attribute '%s' is read-only." -msgstr "'%s' 속성은 읽기 전용입니다." - -#, python-format -msgid "Attribute '%s' is reserved." -msgstr "'%s' 속성은 예약되어 있습니다." - -msgid "Attribute container_format can be only replaced for a queued image." -msgstr "큐에 있는 이미지에 대해 속성 container_format을 대체할 수 있습니다." - -msgid "Attribute disk_format can be only replaced for a queued image." -msgstr "큐에 있는 이미지에 대해 속성 disk_format을 대체할 수 있습니다." - -#, python-format -msgid "Auth service at URL %(url)s not found." -msgstr "URL %(url)s의 Auth 서비스를 찾을 수 없습니다." - -#, python-format -msgid "" -"Authentication error - the token may have expired during file upload. " -"Deleting image data for %s." -msgstr "" -"인증 오류 - 파일 업로드 중에 토큰이 만료되었습니다. %s의 이미지 데이터를 삭제" -"합니다." - -msgid "Authorization failed." -msgstr "권한 부여에 실패했습니다." - -msgid "Available categories:" -msgstr "사용 가능한 카테고리:" - -#, python-format -msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." -msgstr "" -"잘못된 \"%s\" 쿼리 필터 형식입니다. ISO 8601 DateTime 표기법을 사용하십시오." - -#, python-format -msgid "Bad Command: %s" -msgstr "잘못된 명령: %s" - -#, python-format -msgid "Bad header: %(header_name)s" -msgstr "잘못된 헤더: %(header_name)s" - -#, python-format -msgid "Bad value passed to filter %(filter)s got %(val)s" -msgstr "잘못된 값이 %(filter)s 필터에 전달됨, %(val)s 제공" - -#, python-format -msgid "Badly formed S3 URI: %(uri)s" -msgstr "양식이 잘못된 S3 URI: %(uri)s" - -#, python-format -msgid "Badly formed credentials '%(creds)s' in Swift URI" -msgstr "Swift URI에 양식이 잘못된 신임 정보 '%(creds)s'" - -msgid "Badly formed credentials in Swift URI." -msgstr "Swift URI에 양식이 잘못된 신임 정보가 있습니다." - -msgid "Body expected in request." -msgstr "요청에 본문이 있어야 합니다." - -msgid "Cannot be a negative value" -msgstr "음수 값일 수 없음" - -msgid "Cannot be a negative value." -msgstr "음수 값이 될 수 없습니다." - -#, python-format -msgid "Cannot convert image %(key)s '%(value)s' to an integer." -msgstr "이미지 %(key)s '%(value)s'을(를) 정수로 변환할 수 없습니다." - -msgid "Cannot remove last location in the image." -msgstr "이미지에서 마지막 위치를 제거할 수 없습니다." - -#, python-format -msgid "Cannot save data for image %(image_id)s: %(error)s" -msgstr "이미지 %(image_id)s 에 대한 데이터 저장 불가: %(error)s" - -msgid "Cannot set locations to empty list." -msgstr "위치를 비어 있는 목록으로 설정할 수 없습니다." - -msgid "Cannot upload to an unqueued image" -msgstr "큐에 들어가지 않은 이미지에 업로드할 수 없음" - -#, python-format -msgid "Checksum verification failed. Aborted caching of image '%s'." -msgstr "체크섬 검증에 실패했습니다. '%s' 이미지 캐시가 중단되었습니다." - -msgid "Client disconnected before sending all data to backend" -msgstr "모든 데이터를 백엔드로 전송하기 전에 클라이언트 연결이 끊어짐" - -msgid "Command not found" -msgstr "명령을 찾을 수 없음" - -msgid "Configuration option was not valid" -msgstr "구성 옵션이 올바르지 않음" - -#, python-format -msgid "Connect error/bad request to Auth service at URL %(url)s." -msgstr "연결 오류/URL %(url)s에서 Auth 서비스에 대한 잘못된 요청입니다." - -#, python-format -msgid "Constructed URL: %s" -msgstr "URL을 구성함: %s" - -msgid "Container format is not specified." -msgstr "컨테이너 형식이 지정되지 않았습니다." - -msgid "Content-Type must be application/octet-stream" -msgstr "Content-Type은 application/octet-stream이어야 함" - -#, python-format -msgid "Corrupt image download for image %(image_id)s" -msgstr "%(image_id)s 이미지에 대한 손상된 이미지 다운로드" - -#, python-format -msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" -msgstr "30초 동안 시도한 후 %(host)s:%(port)s에 바인드할 수 없음" - -msgid "Could not find OVF file in OVA archive file." -msgstr "OVA 아카이브 파일에서 OVF를 찾을 수 없습니다." - -#, python-format -msgid "Could not find metadata object %s" -msgstr "메타데이터 오브젝트 %s을(를) 찾을 수 없음" - -#, python-format -msgid "Could not find metadata tag %s" -msgstr "메타데이터 태그 %s을(를) 찾을 수 없음" - -#, python-format -msgid "Could not find namespace %s" -msgstr "%s 네임스페이스를 찾을 수 없음" - -#, python-format -msgid "Could not find property %s" -msgstr "특성 %s을(를) 찾을 수 없음" - -msgid "Could not find required configuration option" -msgstr "필요한 구성 옵션을 찾을 수 없음" - -#, python-format -msgid "Could not find task %s" -msgstr "태스크 %s을(를) 찾을 수 없음" - -#, python-format -msgid "Could not update image: %s" -msgstr "이미지를 업데이트할 수 없음: %s" - -msgid "Currently, OVA packages containing multiple disk are not supported." -msgstr "여러 디스크를 포함하는 OVA 패키지는 현재 지원되지 않습니다." - -#, python-format -msgid "Data for image_id not found: %s" -msgstr "image_id에 대한 데이터를 찾을 수 없음: %s" - -msgid "Data supplied was not valid." -msgstr "제공된 데이터가 올바르지 않습니다." - -msgid "Date and time of image member creation" -msgstr "이미지 멤버 작성 날짜 및 시간" - -msgid "Date and time of image registration" -msgstr "이미지 등록 날짜 및 시간" - -msgid "Date and time of last modification of image member" -msgstr "이미지 멤버의 최종 수정 날짜 및 시간" - -msgid "Date and time of namespace creation" -msgstr "네임스페이스 작성 날짜 및 시간" - -msgid "Date and time of object creation" -msgstr "오브젝트 작성 날짜 및 시간" - -msgid "Date and time of resource type association" -msgstr "자원 유형 연관 날짜 및 시간" - -msgid "Date and time of tag creation" -msgstr "태그 작성 날짜 및 시간" - -msgid "Date and time of the last image modification" -msgstr "최종 이미지 수정의 날짜 및 시간" - -msgid "Date and time of the last namespace modification" -msgstr "최종 네임스페이스 수정의 날짜 및 시간" - -msgid "Date and time of the last object modification" -msgstr "최종 오브젝트 수정의 날짜 및 시간" - -msgid "Date and time of the last resource type association modification" -msgstr "최종 자원 유형 연관 수정의 날짜 및 시간" - -msgid "Date and time of the last tag modification" -msgstr "최종 태그 수정 날짜 및 시간" - -msgid "Datetime when this resource was created" -msgstr "이 자원이 작성된 Datetime" - -msgid "Datetime when this resource was updated" -msgstr "이 자원이 업데이트된 Datetime" - -msgid "Datetime when this resource would be subject to removal" -msgstr "이 자원이 제거되는 Datetime" - -#, python-format -msgid "Denying attempt to upload image because it exceeds the quota: %s" -msgstr "할당량을 초과하기 때문에 이미지 업로드를 거부하는 중: %s" - -#, python-format -msgid "Denying attempt to upload image larger than %d bytes." -msgstr "%d바이트를 초과하는 이미지의 업로드를 거부하는 중입니다." - -msgid "Descriptive name for the image" -msgstr "이미지에 대한 설명식 이름" - -msgid "Disk format is not specified." -msgstr "디스크 형식이 지정되지 않았습니다." - -#, python-format -msgid "" -"Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" -msgstr "" -"%(driver_name)s 드라이버가 올바르게 구성되지 않았습니다. 이유: %(reason)s" - -msgid "" -"Error decoding your request. Either the URL or the request body contained " -"characters that could not be decoded by Glance" -msgstr "" -"요청을 디코딩하는 중에 오류가 발생했습니다. URL이나 요청 본문에 Glance에서 디" -"코딩할 수 없는 문자가 포함되어 있습니다." - -#, python-format -msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" -msgstr "이미지 %(image_id)s의 멤버를 페치하는 중에 오류 발생: %(inner_msg)s" - -msgid "Error in store configuration. Adding images to store is disabled." -msgstr "저장소 구성에 오류가 있습니다. 이미지를 저장소에 추가할 수 없습니다." - -msgid "Expected a member in the form: {\"member\": \"image_id\"}" -msgstr "{\"member\": \"image_id\"} 형식의 멤버가 있어야 함" - -msgid "Expected a status in the form: {\"status\": \"status\"}" -msgstr "{\"status\": \"status\"} 형식의 상태가 있어야 함" - -msgid "External source should not be empty" -msgstr "외부 소스는 비어있지 않아야 함" - -#, python-format -msgid "External sources are not supported: '%s'" -msgstr "외부 소스가 지원되지 않음: '%s'" - -#, python-format -msgid "Failed to activate image. Got error: %s" -msgstr "이미지 활성화에 실패했습니다. 오류 발생: %s" - -#, python-format -msgid "Failed to add image metadata. Got error: %s" -msgstr "이미지 메타데이터 추가 실패. 오류 발생: %s" - -#, python-format -msgid "Failed to find image %(image_id)s to delete" -msgstr "삭제할 %(image_id)s 이미지를 찾는 데 실패함" - -#, python-format -msgid "Failed to find image to delete: %s" -msgstr "삭제할 image 가 발견되지 않음 : %s" - -#, python-format -msgid "Failed to find image to update: %s" -msgstr "업데이트할 이미지를 찾는 데 실패함: %s" - -#, python-format -msgid "Failed to find resource type %(resourcetype)s to delete" -msgstr "삭제하기 위한 리소스 타입 %(resourcetype)s 검색 실패" - -#, python-format -msgid "Failed to initialize the image cache database. Got error: %s" -msgstr "이미지 캐시 데이터베이스를 초기화하지 못했습니다. 오류 발생: %s" - -#, python-format -msgid "Failed to read %s from config" -msgstr "구성에서 %s을(를) 읽지 못했음" - -#, python-format -msgid "Failed to reserve image. Got error: %s" -msgstr "이미지 예약 실패, 오류 발생: %s" - -#, python-format -msgid "Failed to update image metadata. Got error: %s" -msgstr "이미지 메타데이터 업데이트 실패. 오류 발생: %s" - -#, python-format -msgid "Failed to upload image %s" -msgstr "이미지 %s을(를) 업로드하지 못했습니다." - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to HTTP error: " -"%(error)s" -msgstr "" -"HTTP 오류로 인해 이미지 %(image_id)s의 이미지 데이터 업로드 실패: %(error)s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to internal error: " -"%(error)s" -msgstr "" -"내부 오류로 인해 이미지 %(image_id)s의 이미지 데이터 업로드 실패: %(error)s" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"파일 %(path)s에 올바르지 않은 백업 파일 %(bfile)s이(가) 있어 중단합니다." - -msgid "" -"File based imports are not allowed. Please use a non-local source of image " -"data." -msgstr "" -"파일 기반 가져오기는 허용되지 않습니다. 이미지 데이터의 로컬이 아닌 소스를 사" -"용하십시오." - -msgid "Forbidden image access" -msgstr "금지된 이미지 액세스" - -#, python-format -msgid "Forbidden to delete a %s image." -msgstr "%s 이미지를 삭제하는 것은 금지되어 있습니다." - -#, python-format -msgid "Forbidden to delete image: %s" -msgstr "이미지 삭제가 금지됨: %s" - -#, python-format -msgid "Forbidden to modify '%(key)s' of %(status)s image." -msgstr "%(status)s 이미지의 '%(key)s' 수정이 금지되었습니다." - -#, python-format -msgid "Forbidden to modify '%s' of image." -msgstr "이미지의 '%s'을(를) 수정하는 것이 금지되어 있습니다." - -msgid "Forbidden to reserve image." -msgstr "이미지 예약은 금지되어 있습니다." - -msgid "Forbidden to update deleted image." -msgstr "삭제된 이미지를 업데이트하는 것은 금지되어 있습니다." - -#, python-format -msgid "Forbidden to update image: %s" -msgstr "이미지 업데이트가 금지됨: %s" - -#, python-format -msgid "Forbidden upload attempt: %s" -msgstr "금지된 업로드 시도: %s" - -#, python-format -msgid "Forbidding request, metadata definition namespace=%s is not visible." -msgstr "요청이 금지되고 메타데이터 정의 namespace=%s이(가) 표시되지 않습니다." - -#, python-format -msgid "Forbidding request, task %s is not visible" -msgstr "요청 금지. 태스크 %s이(가) 표시되지 않음" - -msgid "Format of the container" -msgstr "컨테이너의 형식" - -msgid "Format of the disk" -msgstr "디스크의 형식" - -#, python-format -msgid "Host \"%s\" is not valid." -msgstr "\"%s\" 호스트가 올바르지 않습니다." - -#, python-format -msgid "Host and port \"%s\" is not valid." -msgstr "호스트 및 포트 \"%s\"이(가) 올바르지 않습니다." - -msgid "" -"Human-readable informative message only included when appropriate (usually " -"on failure)" -msgstr "" -"사용자가 읽을 수 있는 정보 메시지는 적절한 경우에만 포함됨 (일반적으로 실패 " -"시)" - -msgid "If true, image will not be deletable." -msgstr "true일 경우 이미지는 삭제 불가능합니다." - -msgid "If true, namespace will not be deletable." -msgstr "true일 경우 네임스페이스는 삭제 불가능합니다." - -#, python-format -msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" -msgstr "이미지 %(id)s이(가) 사용 중이므로 이를 삭제할 수 없음: %(exc)s" - -#, python-format -msgid "Image %(id)s not found" -msgstr "%(id)s 이미지를 찾을 수 없음" - -#, python-format -msgid "" -"Image %(image_id)s could not be found after upload. The image may have been " -"deleted during the upload: %(error)s" -msgstr "" -"업로드한 이미지 %(image_id)s을(를) 찾을 수 없음. 이미지는 업로드 중에 삭제되" -"었을 수 있음: %(error)s" - -#, python-format -msgid "Image %(image_id)s is protected and cannot be deleted." -msgstr "%(image_id)s 이미지는 보호되므로 삭제할 수 없습니다." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload, cleaning up the chunks uploaded." -msgstr "" -"업로드 후에 %s 이미지를 찾을 수 없습니다. 업로드 동안 이미지가 삭제되었을 수 " -"있습니다. 업로드된 청크를 정리합니다." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload." -msgstr "" -"업로드 후 이미지 %s을(를) 찾을 수 없습니다. 업로드 중에 이미지가 삭되었을 수 " -"있습니다." - -#, python-format -msgid "Image %s is deactivated" -msgstr "%s 이미지가 비활성화됨" - -#, python-format -msgid "Image %s is not active" -msgstr "%s 이미지가 활성 상태가 아님" - -#, python-format -msgid "Image %s not found." -msgstr "%s 이미지를 찾을 수 없음" - -#, python-format -msgid "Image exceeds the storage quota: %s" -msgstr "이미지가 스토리지 할당량을 초과함: %s" - -msgid "Image id is required." -msgstr "이미지 ID가 필요합니다." - -msgid "Image is protected" -msgstr "이미지가 보호됨" - -#, python-format -msgid "Image member limit exceeded for image %(id)s: %(e)s:" -msgstr "이미지 %(id)s에 대한 이미지 멤버 한계 초과: %(e)s:" - -#, python-format -msgid "Image name too long: %d" -msgstr "이미지 이름이 너무 김: %d" - -msgid "Image operation conflicts" -msgstr "이미지 조작이 충돌함" - -#, python-format -msgid "" -"Image status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"%(cur_status)s에서 %(new_status)s(으)로의 이미지 상태 전이가 허용되지 않음" - -#, python-format -msgid "Image storage media is full: %s" -msgstr "이미지 스토리지 미디어 공간이 꽉 참: %s" - -#, python-format -msgid "Image tag limit exceeded for image %(id)s: %(e)s:" -msgstr "이미지 %(id)s에 대한 이미지 태그 한계 초과: %(e)s:" - -#, python-format -msgid "Image upload problem: %s" -msgstr "이미지 업로드 문제: %s" - -#, python-format -msgid "Image with identifier %s already exists!" -msgstr "ID가 %s인 이미지가 이미 존재합니다!" - -#, python-format -msgid "Image with identifier %s has been deleted." -msgstr "ID가 %s인 이미지가 삭제되었습니다." - -#, python-format -msgid "Image with identifier %s not found" -msgstr "ID가 %s인 이미지를 찾을 수 없음" - -#, python-format -msgid "Image with the given id %(image_id)s was not found" -msgstr "지정된 ID %(image_id)s을(를) 가진 이미지를 찾을 수 없음" - -#, python-format -msgid "" -"Incorrect auth strategy, expected \"%(expected)s\" but received " -"\"%(received)s\"" -msgstr "" -"인증 전략이 올바르지 않음. 예상: \"%(expected)s\", 수신: \"%(received)s\"" - -#, python-format -msgid "Incorrect request: %s" -msgstr "올바르지 않은 요청: %s" - -#, python-format -msgid "Input does not contain '%(key)s' field" -msgstr "입력에 '%(key)s' 필드가 포함되어 있지 않음" - -#, python-format -msgid "Insufficient permissions on image storage media: %s" -msgstr "이미지 스토리지 미디어 권한 부족 : %s" - -#, python-format -msgid "Invalid JSON pointer for this resource: '/%s'" -msgstr "이 자원에 대해 올바르지 않은 JSON 포인터: '/%s'" - -#, python-format -msgid "Invalid checksum '%s': can't exceed 32 characters" -msgstr "올바르지 않은 체크섬 '%s': 32자를 초과할 수 없음" - -msgid "Invalid configuration in glance-swift conf file." -msgstr "glance-swift 구성 파일의 구성이 올바르지 않습니다." - -msgid "Invalid configuration in property protection file." -msgstr "특성 보호 파일의 올바르지 않은 구성입니다." - -#, python-format -msgid "Invalid container format '%s' for image." -msgstr "이미지에 대한 컨테이너 형식 '%s'이(가) 올바르지 않습니다." - -#, python-format -msgid "Invalid content type %(content_type)s" -msgstr "올바르지 않은 컨텐츠 유형 %(content_type)s" - -#, python-format -msgid "Invalid disk format '%s' for image." -msgstr "이미지에 대한 디스크 형식 '%s'이(가) 올바르지 않습니다." - -#, python-format -msgid "Invalid filter value %s. The quote is not closed." -msgstr "올바르지 않은 필터 값 %s입니다. 따옴표를 닫지 않았습니다." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma after closing quotation mark." -msgstr "올바르지 않은 필터 값 %s입니다. 닫기 따옴표 전에 쉼표가 없습니다." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma before opening quotation mark." -msgstr "올바르지 않은 필터 값 %s입니다. 열기 따옴표 전에 쉼표가 없습니다." - -msgid "Invalid image id format" -msgstr "올바르지 않은 이미지 ID 형식" - -msgid "Invalid location" -msgstr "잘못된 위치" - -#, python-format -msgid "Invalid location %s" -msgstr "올바르지 않은 위치 %s" - -#, python-format -msgid "Invalid location: %s" -msgstr "올바르지 않은 위치: %s" - -#, python-format -msgid "" -"Invalid location_strategy option: %(name)s. The valid strategy option(s) " -"is(are): %(strategies)s" -msgstr "" -"올바르지 않은 location_strategy 옵션: %(name)s. 올바른 전략 옵션 : " -"%(strategies)s" - -msgid "Invalid locations" -msgstr "잘못된 위치들" - -#, python-format -msgid "Invalid locations: %s" -msgstr "올바르지 않은 위치: %s" - -msgid "Invalid marker format" -msgstr "올바르지 않은 마커 형식" - -msgid "Invalid marker. Image could not be found." -msgstr "올바르지 않은 마커입니다. 이미지를 찾을 수 없습니다." - -#, python-format -msgid "Invalid membership association: %s" -msgstr "올바르지 않은 멤버십 연관: %s" - -msgid "" -"Invalid mix of disk and container formats. When setting a disk or container " -"format to one of 'aki', 'ari', or 'ami', the container and disk formats must " -"match." -msgstr "" -"디스크와 컨테이너 형식의 조합이 올바르지 않습니다. 디스크나 컨테이너 형식을 " -"'aki', 'ari', 또는 'ami' 중 하나로 설정할 경우 컨테이너와 디스크형식이 일치해" -"야 합니다." - -#, python-format -msgid "" -"Invalid operation: `%(op)s`. It must be one of the following: %(available)s." -msgstr "올바르지 않은 조작: `%(op)s`. 다음 중 하나여야 합니다. %(available)s." - -msgid "Invalid position for adding a location." -msgstr "위치를 추가하기에 올바르지 않은 포지션입니다." - -msgid "Invalid position for removing a location." -msgstr "위치를 제거하기에 올바르지 않은 포지션입니다." - -msgid "Invalid service catalog json." -msgstr "올바르지 않은 서비스 카탈로그 json입니다." - -#, python-format -msgid "Invalid sort direction: %s" -msgstr "올바르지 않은 정렬 방향: %s" - -#, python-format -msgid "" -"Invalid sort key: %(sort_key)s. It must be one of the following: " -"%(available)s." -msgstr "" -"올바르지 않은 정렬 키: %(sort_key)s. 다음 중 하나여야 합니다. %(available)s." - -#, python-format -msgid "Invalid status value: %s" -msgstr "올바르지 않은 상태 값: %s" - -#, python-format -msgid "Invalid status: %s" -msgstr "올바르지 않은 상태: %s" - -#, python-format -msgid "Invalid time format for %s." -msgstr "%s에 올바르지 않은 시간 형식입니다." - -#, python-format -msgid "Invalid type value: %s" -msgstr "올바르지 않은 유형 값: %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition namespace " -"with the same name of %s" -msgstr "" -"올바르지 않은 업데이트입니다. %s과(와) 동일한 이름의 메타데이터 정의 네임스페" -"이스가 중복됩니다." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"올바르지 않은 업데이트입니다. namespace=%(namespace_name)s에서 name=%(name)s" -"과(와) 동일한 이름의 메타데이터 정의 오브젝트가 중복됩니다." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"올바르지 않은 업데이트입니다. namespace=%(namespace_name)s에서 name=%(name)s" -"과(와) 동일한 이름의 메타데이터 정의 오브젝트가 중복됩니다." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition property " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"올바르지 않은 업데이트입니다. 네임스페이스=%(namespace_name)s의 동일한 이름=" -"%(name)s(으)로 메타데이터 정의 특성이 중복됩니다." - -#, python-format -msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" -msgstr "매개변수 '%(param)s'의 올바르지 않은 값 '%(value)s': %(extra_msg)s" - -#, python-format -msgid "Invalid value for option %(option)s: %(value)s" -msgstr "옵션 %(option)s에 올바르지 않은 값: %(value)s" - -#, python-format -msgid "Invalid visibility value: %s" -msgstr "올바르지 않은 가시성 값: %s" - -msgid "It's invalid to provide multiple image sources." -msgstr "여러 개의 이미지 소스를 제공하면 안 됩니다." - -msgid "It's not allowed to add locations if locations are invisible." -msgstr "위치가 표시되지 않는 경우 위치를 추가할 수 없습니다." - -msgid "It's not allowed to remove locations if locations are invisible." -msgstr "위치가 표시되지 않는 경우 위치를 제거할 수 없습니다." - -msgid "It's not allowed to update locations if locations are invisible." -msgstr "위치가 표시되지 않는 경우 위치를 업데이트할 수 없습니다." - -msgid "List of strings related to the image" -msgstr "이미지와 관련된 문자열 목록" - -msgid "Malformed JSON in request body." -msgstr "요청 본문에서 JSON의 형식이 올바르지 않습니다." - -msgid "Maximal age is count of days since epoch." -msgstr "최대 기간은 epoch 이후의 일 수입니다." - -#, python-format -msgid "Maximum redirects (%(redirects)s) was exceeded." -msgstr "최대 경로 재지정(%(redirects)s)에 도달했습니다." - -#, python-format -msgid "Member %(member_id)s is duplicated for image %(image_id)s" -msgstr "멤버 %(member_id)s이(가) 이미지 %(image_id)s에 대해 중복됨" - -msgid "Member can't be empty" -msgstr "멤버는 비어 있을 수 없음" - -msgid "Member to be added not specified" -msgstr "추가할 멤버를 지정하지 않음" - -msgid "Membership could not be found." -msgstr "멤버십을 찾을 수 없습니다." - -#, python-format -msgid "" -"Metadata definition namespace %(namespace)s is protected and cannot be " -"deleted." -msgstr "" -"메타데이터 정의 네임스페이스 %(namespace)s이(가) 보호되고 삭제되었을 수 있습" -"니다." - -#, python-format -msgid "Metadata definition namespace not found for id=%s" -msgstr "id=%s에 대한 메타데이터 정의 네임스페이스를 찾을 수 없음" - -#, python-format -msgid "" -"Metadata definition object %(object_name)s is protected and cannot be " -"deleted." -msgstr "" -"메타데이터 정의 오브젝트 %(object_name)s이(가) 보호되고 삭제되었을 수 있습니" -"다." - -#, python-format -msgid "Metadata definition object not found for id=%s" -msgstr "id=%s에 대한 메타데이터 정의 오브젝트를 찾을 수 없음" - -#, python-format -msgid "" -"Metadata definition property %(property_name)s is protected and cannot be " -"deleted." -msgstr "" -"메타데이터 정의 특성 %(property_name)s이(가) 보호되고 삭제되었을 수 있습니다." - -#, python-format -msgid "Metadata definition property not found for id=%s" -msgstr "id=%s에 대한 메타데이터 정의 특성을 찾을 수 없음" - -#, python-format -msgid "" -"Metadata definition resource-type %(resource_type_name)s is a seeded-system " -"type and cannot be deleted." -msgstr "" -"메타데이터 정의 resource-type %(resource_type_name)s은(는) 시드(seed) 시스템 " -"유형이고 삭제할 수 없습니다." - -#, python-format -msgid "" -"Metadata definition resource-type-association %(resource_type)s is protected " -"and cannot be deleted." -msgstr "" -"메타데이터 정의 resource-type-association %(resource_type)s이(가) 보호되고 삭" -"제할 수 없습니다." - -#, python-format -msgid "" -"Metadata definition tag %(tag_name)s is protected and cannot be deleted." -msgstr "메타데이터 정의 태그 %(tag_name)s은(는) 보호되므로 삭제할 수 없습니다." - -#, python-format -msgid "Metadata definition tag not found for id=%s" -msgstr "id=%s에 대한 메타데이터 정의 태그를 찾을 수 없음" - -msgid "Minimal rows limit is 1." -msgstr "최소 행 제한은 1입니다." - -#, python-format -msgid "Missing required credential: %(required)s" -msgstr "필수 신임 정보 누락: %(required)s" - -#, python-format -msgid "" -"Multiple 'image' service matches for region %(region)s. This generally means " -"that a region is required and you have not supplied one." -msgstr "" -"다중 '이미지' 서비스가 %(region)s 리젼에 일치합니다. 이는 일반적으로 리젼이 " -"필요하지만 아직 리젼을 제공하지 않은 경우 발생합니다." - -msgid "No authenticated user" -msgstr "인증된 사용자가 없음" - -#, python-format -msgid "No image found with ID %s" -msgstr "ID가 %s인 이미지를 찾을 수 없음" - -#, python-format -msgid "No location found with ID %(loc)s from image %(img)s" -msgstr "%(img)s 이미지에서 ID가 %(loc)s인 위치를 찾을 수 없음" - -msgid "No permission to share that image" -msgstr "해당 이미지를 공유한 권한이 없음" - -#, python-format -msgid "Not allowed to create members for image %s." -msgstr "이미지 %s의 멤버를 작성할 수 없습니다." - -#, python-format -msgid "Not allowed to deactivate image in status '%s'" -msgstr "'%s' 상태의 이미지를 비활성화할 수 없음" - -#, python-format -msgid "Not allowed to delete members for image %s." -msgstr "이미지 %s의 멤버를 삭제할 수 없습니다." - -#, python-format -msgid "Not allowed to delete tags for image %s." -msgstr "이미지 %s의 태그를 삭제할 수 없습니다." - -#, python-format -msgid "Not allowed to list members for image %s." -msgstr "이미지 %s의 멤버를 나열할 수 없습니다." - -#, python-format -msgid "Not allowed to reactivate image in status '%s'" -msgstr "'%s' 상태의 이미지를 다시 활성화할 수 없음" - -#, python-format -msgid "Not allowed to update members for image %s." -msgstr "이미지 %s의 멤버를 업데이트할 수 없습니다." - -#, python-format -msgid "Not allowed to update tags for image %s." -msgstr "이미지 %s의 태그를 업데이트할 수 없습니다." - -#, python-format -msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" -msgstr "" -"이미지 %(image_id)s에 대한 이미지 데이터의 업로드가 허용되지 않음: %(error)s" - -msgid "Number of sort dirs does not match the number of sort keys" -msgstr "정렬 디렉토리 수가 정렬 키 수와 일치하지 않음" - -msgid "OVA extract is limited to admin" -msgstr "관리자만 OVA를 추출할 수 있음" - -msgid "Old and new sorting syntax cannot be combined" -msgstr "이전 및 새 저장 구문은 결합할 수 없음" - -#, python-format -msgid "Operation \"%s\" requires a member named \"value\"." -msgstr "\"%s\" 오퍼레이션에는 \"value\"라는 이름의 멤버가 필요합니다." - -msgid "" -"Operation objects must contain exactly one member named \"add\", \"remove\", " -"or \"replace\"." -msgstr "" -"조작 오브젝트에는 \"add\", \"remove\", 또는 \"replace\" 멤버 중 하나만 포함되" -"어야 합니다." - -msgid "" -"Operation objects must contain only one member named \"add\", \"remove\", or " -"\"replace\"." -msgstr "" -"조작 오브젝트에는 \"add\", \"remove\",또는 \"replace\" 멤버 중 하나만 포함되" -"어야 합니다." - -msgid "Operations must be JSON objects." -msgstr "오퍼레이션은 JSON 오브젝트여야 합니다." - -#, python-format -msgid "Original locations is not empty: %s" -msgstr "원본 위치가 비어있지 않음: %s" - -msgid "Owner can't be updated by non admin." -msgstr "비관리자는 소유자를 업데이트할 수 없습니다." - -msgid "Owner must be specified to create a tag." -msgstr "태그를 작성하려면 소유자로 지정되어야 합니다." - -msgid "Owner of the image" -msgstr "이미지의 소유자" - -msgid "Owner of the namespace." -msgstr "네임스페이스의 소유자입니다." - -msgid "Param values can't contain 4 byte unicode." -msgstr "매개변수 값에 4바이트 유니코드를 포함할 수 없습니다." - -#, python-format -msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." -msgstr "" -"`%s` 포인터에 인식되는 이스케이프 시퀀스가 아닌 \"~\"가 포함되어 있습니다." - -#, python-format -msgid "Pointer `%s` contains adjacent \"/\"." -msgstr "포인터 `%s`에 인접 \"/\"가 포함됩니다." - -#, python-format -msgid "Pointer `%s` does not contains valid token." -msgstr "포인터 `%s`에 올바른 토큰이 포함되어 있지 않습니다." - -#, python-format -msgid "Pointer `%s` does not start with \"/\"." -msgstr "`%s` 포인터가 \"/\"로 시작하지 않습니다." - -#, python-format -msgid "Pointer `%s` end with \"/\"." -msgstr "포인터 `%s`이(가) \"/\"로 끝납니다." - -#, python-format -msgid "Port \"%s\" is not valid." -msgstr "\"%s\" 포트가 올바르지 않습니다." - -#, python-format -msgid "Process %d not running" -msgstr "프로세스 %d이(가) 실행 중이지 않음" - -#, python-format -msgid "Properties %s must be set prior to saving data." -msgstr "데이터를 저장하기 전에 %s 특성을 설정해야 합니다." - -#, python-format -msgid "" -"Property %(property_name)s does not start with the expected resource type " -"association prefix of '%(prefix)s'." -msgstr "" -"특성 %(property_name)s이(가) 예상 자원 유형 연관 접두부인 '%(prefix)s'(으)로 " -"시작하지 않습니다." - -#, python-format -msgid "Property %s already present." -msgstr "%s 특성이 이미 존재합니다." - -#, python-format -msgid "Property %s does not exist." -msgstr "%s 특성이 존재하지 않습니다." - -#, python-format -msgid "Property %s may not be removed." -msgstr "%s 특성을 제거할 수 없습니다." - -#, python-format -msgid "Property %s must be set prior to saving data." -msgstr "데이터를 저장하기 전에 %s 특성을 설정해야 합니다." - -#, python-format -msgid "Property '%s' is protected" -msgstr "'%s' 특성이 보호됨 " - -msgid "Property names can't contain 4 byte unicode." -msgstr "특성 이름에 4바이트 유니코드를 포함할 수 없습니다." - -#, python-format -msgid "" -"Provided image size must match the stored image size. (provided size: " -"%(ps)d, stored size: %(ss)d)" -msgstr "" -"제공된 이미지 크기가 저장된 이미지 크기와 일치해야 합니다(제공된 크기: " -"%(ps)d, 저장된 크기: %(ss)d)." - -#, python-format -msgid "Provided object does not match schema '%(schema)s': %(reason)s" -msgstr "제공된 오브젝트가 스키마 '%(schema)s'에 일치하지 않음: %(reason)s" - -#, python-format -msgid "Provided status of task is unsupported: %(status)s" -msgstr "제공된 태스크의 상태가 지원되지 않음: %(status)s" - -#, python-format -msgid "Provided type of task is unsupported: %(type)s" -msgstr "제공된 태스크 유형이 지원되지 않음: %(type)s" - -msgid "Provides a user friendly description of the namespace." -msgstr "사용자에게 익숙한 네임스페이스 설명을 제공합니다." - -msgid "Received invalid HTTP redirect." -msgstr "올바르지 않은 HTTP 경로 재지정을 수신했습니다." - -#, python-format -msgid "Redirecting to %(uri)s for authorization." -msgstr "권한 부여를 위해 %(uri)s(으)로 경로 재지정 중입니다." - -#, python-format -msgid "Registry service can't use %s" -msgstr "레지스트리 서비스에서 %s을(를) 사용할 수 없음" - -#, python-format -msgid "Registry was not configured correctly on API server. Reason: %(reason)s" -msgstr "" -"API 서버에서 레지스트리가 올바르게 구성되지 않았습니다. 이유: %(reason)s" - -#, python-format -msgid "Reload of %(serv)s not supported" -msgstr "%(serv)s을(를) 다시 로드할 수 없음" - -#, python-format -msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "신호(%(sig)s)와 함께 %(serv)s(pid %(pid)s) 다시 로드 중" - -#, python-format -msgid "Removing stale pid file %s" -msgstr "시간이 경과된 pid 파일 %s을(를) 제거하는 중" - -msgid "Request body must be a JSON array of operation objects." -msgstr "요청 본문은 오퍼레이션 오브젝트의 JSON 배열이어야 합니다." - -msgid "Request must be a list of commands" -msgstr "요청은 쉼표로 구분한 목록이어야 합니다." - -#, python-format -msgid "Required store %s is invalid" -msgstr "필수 저장소 %s이(가) 올바르지 않음" - -msgid "" -"Resource type names should be aligned with Heat resource types whenever " -"possible: http://docs.openstack.org/developer/heat/template_guide/openstack." -"html" -msgstr "" -"자원 유형 이름은 히트 자원 유형에 맞게 지정되어야 합니다.사용 가능: http://" -"docs.openstack.org/developer/heat/template_guide/openstack.html" - -msgid "Response from Keystone does not contain a Glance endpoint." -msgstr "Keystone의 응답에 Glance 엔드포인트가 들어있지 않습니다." - -msgid "Scope of image accessibility" -msgstr "이미지 접근성의 범위" - -msgid "Scope of namespace accessibility." -msgstr "네임스페이스 접근성의 범위입니다." - -#, python-format -msgid "Server %(serv)s is stopped" -msgstr "서버 %(serv)s이(가) 중지됨" - -#, python-format -msgid "Server worker creation failed: %(reason)s." -msgstr "서버 작업자 작성에 실패함: %(reason)s." - -msgid "Signature verification failed" -msgstr "서명 검증 실패" - -msgid "Size of image file in bytes" -msgstr "이미지 파일의 크기(바이트)" - -msgid "" -"Some resource types allow more than one key / value pair per instance. For " -"example, Cinder allows user and image metadata on volumes. Only the image " -"properties metadata is evaluated by Nova (scheduling or drivers). This " -"property allows a namespace target to remove the ambiguity." -msgstr "" -"일부 자원 유형은 인스턴스 당 둘 이상의 키 / 값 쌍을 허용합니다.예를 들어, " -"Cinder는 볼륨에 사용자 및 이미지 메타데이터를 허용합니다. 이미지 특성 메타데" -"이터만 Nova(스케줄링 또는 드라이버)에 의해 평가됩니다. 이 특성은 모호성을 제" -"거하기 위해 네임스페이스 대상을 허용합니다." - -msgid "Sort direction supplied was not valid." -msgstr "제공된 정렬 방향이 올바르지 않습니다." - -msgid "Sort key supplied was not valid." -msgstr "제공되는 정렬 키가 올바르지 않습니다." - -msgid "" -"Specifies the prefix to use for the given resource type. Any properties in " -"the namespace should be prefixed with this prefix when being applied to the " -"specified resource type. Must include prefix separator (e.g. a colon :)." -msgstr "" -"제공된 자원 유형에 사용할 접두부를 지정합니다. 지정된 자원 유형에 적용되는 경" -"우 네임스페이스의 모든 특성은 이 접두부로 시작해야 합니다. 접두부 구분 기호" -"(예: 콜론 :)를 포함해야 합니다." - -msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." -msgstr "상태는 \"보류 중\", \"수락됨\" 또는 \"거부됨\"이어야 합니다." - -msgid "Status not specified" -msgstr "상태를 지정하지 않음" - -msgid "Status of the image" -msgstr "이미지의 상태" - -#, python-format -msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "%(cur_status)s에서 %(new_status)s(으)로의 상태 전이가 허용되지 않음" - -#, python-format -msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "신호(%(sig)s)와 함께 %(serv)s(pid %(pid)s) 중지 중" - -#, python-format -msgid "Store for image_id not found: %s" -msgstr "image_id에 대한 저장소를 찾을 수 없음: %s" - -#, python-format -msgid "Store for scheme %s not found" -msgstr "%s 스키마에 대한 저장소를 찾을 수 없음" - -#, python-format -msgid "" -"Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " -"(%(actual)s) did not match. Setting image status to 'killed'." -msgstr "" -"제공된 %(attr)s (%(supplied)s) 및 %(attr)s (업로드된 이미지 %(actual)s(으)로" -"부터 생성됨)이(가) 일치하지 않음. 이미지 상태를 '강제 종료됨'으로 설정." - -msgid "Supported values for the 'container_format' image attribute" -msgstr "'container_format' 이미지 속성에 대해 지원되는 값" - -msgid "Supported values for the 'disk_format' image attribute" -msgstr "'disk_format' 이미지 속성에 대해 지원되는 값" - -#, python-format -msgid "Suppressed respawn as %(serv)s was %(rsn)s." -msgstr "%(serv)s이(가) %(rsn)s이므로 재파생이 억제되었습니다." - -msgid "System SIGHUP signal received." -msgstr "시스템 SIGHUP 신호를 수신했습니다." - -#, python-format -msgid "Task '%s' is required" -msgstr "태스크 '%s'이(가) 필요함" - -msgid "Task does not exist" -msgstr "태스크가 존재하지 않음" - -msgid "Task failed due to Internal Error" -msgstr "내부 오류로 인해 태스크 실패" - -msgid "Task was not configured properly" -msgstr "태스크가 제대로 구성되지 않음" - -#, python-format -msgid "Task with the given id %(task_id)s was not found" -msgstr "지정된 ID가 %(task_id)s인 태스크를 찾을 수 없음" - -msgid "The \"changes-since\" filter is no longer available on v2." -msgstr "\"changes-since\" 필터는 v2에서 더 이상 사용할 수 없습니다." - -#, python-format -msgid "The CA file you specified %s does not exist" -msgstr "사용자가 지정한 CA 파일 %s이(가) 존재하지 않음" - -#, python-format -msgid "" -"The Image %(image_id)s object being created by this task %(task_id)s, is no " -"longer in valid status for further processing." -msgstr "" -"이 태스크 %(task_id)s에서 작성 중인 이미지 %(image_id)s 오브젝트는 더 이상 향" -"후 처리에 사용할 수 있는 올바른 상태가 아닙니다." - -msgid "The Store URI was malformed." -msgstr "저장소 URI의 형식이 올바르지 않습니다." - -msgid "" -"The URL to the keystone service. If \"use_user_token\" is not in effect and " -"using keystone auth, then URL of keystone can be specified." -msgstr "" -"키스톤 서비스에 대한 URL입니다. \"use_user_token\"이(가) 적용되지 않는 경우 " -"키스톤 권한을 사용한 다음 키스톤의 URL을 지정할 수 있습니다." - -msgid "" -"The administrators password. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"관리자 비밀번호입니다. \"use_user_token\"이(가) 적용되지 않는 경우 관리 신임 " -"정보를 지정할 수 있습니다." - -msgid "" -"The administrators user name. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"관리 사용자 이름입니다. \"use_user_token\"이(가) 적용되지 않는 경우 관리 신" -"임 정보를 지정할 수 있습니다." - -#, python-format -msgid "The cert file you specified %s does not exist" -msgstr "사용자가 지정한 인증 파일 %s이(가) 존재하지 않음" - -msgid "The current status of this task" -msgstr "이 태스크의 현재 상태" - -#, python-format -msgid "" -"The device housing the image cache directory %(image_cache_dir)s does not " -"support xattr. It is likely you need to edit your fstab and add the " -"user_xattr option to the appropriate line for the device housing the cache " -"directory." -msgstr "" -"디바이스 하우징 이미지 캐시 디렉터리 %(image_cache_dir)s의 Device 는 xattr을 " -"지원하지 않습니다. fstab을 수정하거나 user_xattr 옵션을 디바이스 하우징 캐시 " -"디렉터리의 적합한 행에 추가하기 바랍니다." - -#, python-format -msgid "" -"The given uri is not valid. Please specify a valid uri from the following " -"list of supported uri %(supported)s" -msgstr "" -"제공된 uri가 올바르지 않습니다. 다음 지원 uri 목록에서 올바른 uri를 지정하십" -"시오. %(supported)s" - -#, python-format -msgid "The incoming image is too large: %s" -msgstr "수신 이미지가 너무 큼: %s" - -#, python-format -msgid "The key file you specified %s does not exist" -msgstr "사용자가 지정한 키 파일 %s이(가) 존재하지 않음" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image locations. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"허용된 이미지 위치 수의 한계가 초과되었습니다. 시도함: %(attempted)s, 최대: " -"%(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image members for this " -"image. Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"이 이미지에 대해 허용된 이미지 멤버 수의 한계가 초과되었습니다. 시도함: " -"%(attempted)s, 최대: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"허용된 이미지 특성 수의 한계가 초과되었습니다. 시도함: %(attempted)s, 최대: " -"%(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(num)s, Maximum: %(quota)s" -msgstr "" -"허용된 이미지 특성 수의 한계가 초과되었습니다. 시도함: %(num)s, 최대: " -"%(quota)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image tags. Attempted: " -"%(attempted)s, Maximum: %(maximum)s" -msgstr "" -"허용된 이미지 태그 수의 한계가 초과되었습니다. 시도함: %(attempted)s, 최대: " -"%(maximum)s" - -#, python-format -msgid "The location %(location)s already exists" -msgstr "위치 %(location)s이(가) 이미 있음" - -#, python-format -msgid "The location data has an invalid ID: %d" -msgstr "위치 데이터의 ID가 올바르지 않음: %d" - -#, python-format -msgid "" -"The metadata definition %(record_type)s with name=%(record_name)s not " -"deleted. Other records still refer to it." -msgstr "" -"name=%(record_name)s인 메타데이터 정의 %(record_type)s이(가) 삭제되지 않습니" -"다. 기타 레코드를 여전히 참조합니다." - -#, python-format -msgid "The metadata definition namespace=%(namespace_name)s already exists." -msgstr "메타데이터 정의 namespace=%(namespace_name)s이(가) 이미 존재합니다." - -#, python-format -msgid "" -"The metadata definition object with name=%(object_name)s was not found in " -"namespace=%(namespace_name)s." -msgstr "" -"name=%(object_name)s인 메타데이터 정의 오브젝트를 namespace=" -"%(namespace_name)s에서 찾을 수 없습니다." - -#, python-format -msgid "" -"The metadata definition property with name=%(property_name)s was not found " -"in namespace=%(namespace_name)s." -msgstr "" -"name=%(property_name)s인 메타데이터 정의 특성을 namespace=%(namespace_name)s" -"에서 찾을 수 없습니다." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s already exists." -msgstr "" -"resource-type=%(resource_type_name)s의 메타데이터 정의 자원 유형 연관이 " -"namespace=%(namespace_name)s에 이미 존재합니다." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s, was not found." -msgstr "" -"resource-type=%(resource_type_name)s의 메타데이터 정의 자원 유형 연관이 " -"namespace=%(namespace_name)s에서 찾을 수 없습니다." - -#, python-format -msgid "" -"The metadata definition resource-type with name=%(resource_type_name)s, was " -"not found." -msgstr "" -"name=%(resource_type_name)s인 메타데이터 정의 자원 유형을 찾을 수 없습니다." - -#, python-format -msgid "" -"The metadata definition tag with name=%(name)s was not found in namespace=" -"%(namespace_name)s." -msgstr "" -"name=%(name)s인 메타데이터 정의 태그를 namespace=%(namespace_name)s에서 찾을 " -"수 없습니다." - -msgid "The parameters required by task, JSON blob" -msgstr "태스크에서 필요로 하는 매개변수, JSON blob" - -msgid "The provided image is too large." -msgstr "제공된 이미지가 너무 큽니다." - -msgid "" -"The region for the authentication service. If \"use_user_token\" is not in " -"effect and using keystone auth, then region name can be specified." -msgstr "" -"인증 서비스에 대한 리젼입니다. If \"use_user_token\"이(가) 적용되지 않는 경" -"우 키스톤 권한을 사용한 다음 리젼 이름을 지정할 수 있습니다." - -msgid "The request returned 500 Internal Server Error." -msgstr "요청 시 500 내부 서버 오류가 리턴되었습니다." - -msgid "" -"The request returned 503 Service Unavailable. This generally occurs on " -"service overload or other transient outage." -msgstr "" -"요청에서 '503 서비스 사용 불가능'을 리턴했습니다. 이는 일반적으로 서비스 과부" -"하나 기타 일시적 정전일 경우 발생합니다." - -#, python-format -msgid "" -"The request returned a 302 Multiple Choices. This generally means that you " -"have not included a version indicator in a request URI.\n" -"\n" -"The body of response returned:\n" -"%(body)s" -msgstr "" -"요청이 302 다중 선택사항을 리턴했습니다. 이는 일반적으로 요청 URI에 버전 표시" -"기를 포함하지 않았음을 의미합니다.\n" -"\n" -"리턴된 응답의 본문:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned a 413 Request Entity Too Large. This generally means " -"that rate limiting or a quota threshold was breached.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"요청에서 '413 요청 엔티티가 너무 큼'을 리턴했습니다. 이는 일반적으로 등급 한" -"도나 할당량 임계값을 위반했음을 의미합니다.\n" -"\n" -"응답 본문:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned an unexpected status: %(status)s.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"요청이 예상치 않은 상태를 리턴함: %(status)s.\n" -"\n" -"응답 본문:\n" -"%(body)s" - -msgid "" -"The requested image has been deactivated. Image data download is forbidden." -msgstr "" -"요청된 이미지가 비활성화되었습니다. 이미지 데이터 다운로드가 금지됩니다." - -msgid "The result of current task, JSON blob" -msgstr "현재 태스크의 결과, JSON blob" - -#, python-format -msgid "" -"The size of the data %(image_size)s will exceed the limit. %(remaining)s " -"bytes remaining." -msgstr "" -"데이터 크기 %(image_size)s이(가) 남은 한도 바이트 %(remaining)s을(를) 초과합" -"니다." - -#, python-format -msgid "The specified member %s could not be found" -msgstr "지정된 멤버 %s을(를) 찾을 수 없음" - -#, python-format -msgid "The specified metadata object %s could not be found" -msgstr "지정된 메타데이터 오브젝트 %s을(를) 찾을 수 없음" - -#, python-format -msgid "The specified metadata tag %s could not be found" -msgstr "지정된 메타데이터 태그 %s을(를) 찾을 수 없음" - -#, python-format -msgid "The specified namespace %s could not be found" -msgstr "지정된 네임스페이스 %s을(를) 찾을 수 없음" - -#, python-format -msgid "The specified property %s could not be found" -msgstr "지정된 특성 %s을(를) 찾을 수 없음" - -#, python-format -msgid "The specified resource type %s could not be found " -msgstr "지정된 자원 유형 %s을(를) 찾을 수 없음" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'" -msgstr "" -"삭제된 이미지 위치의 상태는 'pending_delete' 또는 'deleted'로만 설정할 수 있" -"음" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'." -msgstr "" -"삭제된 이미지 위치의 상태는 'pending_delete' 또는 'deleted'로만 설정할 수 있" -"습니다." - -msgid "The status of this image member" -msgstr "이 이미지 멤버의 상태" - -msgid "" -"The strategy to use for authentication. If \"use_user_token\" is not in " -"effect, then auth strategy can be specified." -msgstr "" -"인증에 사용할 전략입니다. If \"use_user_token\"이(가) 적용되지 않는 경우인증 " -"전략을 지정할 수 있습니다." - -#, python-format -msgid "" -"The target member %(member_id)s is already associated with image " -"%(image_id)s." -msgstr "대상 멤버 %(member_id)s이(가) 이미 이미지 %(image_id)s." - -msgid "" -"The tenant name of the administrative user. If \"use_user_token\" is not in " -"effect, then admin tenant name can be specified." -msgstr "" -"관리 사용자의 테넌트 이름입니다. \"use_user_token\"이(가) 적용되지 않는 경우" -"관리 테넌트 이름을 지정할 수 있습니다." - -msgid "The type of task represented by this content" -msgstr "이 컨텐츠에서 나타내는 태스크의 유형" - -msgid "The unique namespace text." -msgstr "고유 네임스페이스 텍스트입니다." - -msgid "The user friendly name for the namespace. Used by UI if available." -msgstr "" -"사용자에게 익숙한 네임스페이스의 이름입니다. 가능한 경우 UI에서 사용됩니다." - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. Error: %(ioe)s" -msgstr "" -"%(error_key_name)s %(error_filename)s에 문제점이 있습니다. 문제점을 확인하십" -"시오. 오류: %(ioe)s" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. OpenSSL error: %(ce)s" -msgstr "" -"%(error_key_name)s %(error_filename)s에 문제점이 있습니다. 문제점을 확인하십" -"시오. OpenSSL 오류: %(ce)s" - -#, python-format -msgid "" -"There is a problem with your key pair. Please verify that cert " -"%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" -msgstr "" -"키 쌍에 문제점이 있습니다. 인증 %(cert_file)s 및 키 %(key_file)s이(가) 함께 " -"있는지 확인하십시오. OpenSSL 오류 %(ce)s" - -msgid "There was an error configuring the client." -msgstr "클라이언트 구성 오류가 있었습니다." - -msgid "There was an error connecting to a server" -msgstr "서버 연결 오류가 있었습니다." - -msgid "" -"This operation is currently not permitted on Glance Tasks. They are auto " -"deleted after reaching the time based on their expires_at property." -msgstr "" -"해당 동작은 현재 Glance 작업에 대해서는 허용되지 않습니다. 이들은 expires_at " -"특성에 기반한 시간에 도달하면 자동으로 삭제됩니다." - -msgid "This operation is currently not permitted on Glance images details." -msgstr "해당 동작은 현재 Glance 이미지 세부사항에 대해서는 허용되지 않습니다." - -msgid "" -"Time in hours for which a task lives after, either succeeding or failing" -msgstr "이후에 태스크가 활성이 되는 시간(시), 성공 또는 실패" - -msgid "Too few arguments." -msgstr "인수가 너무 적습니다." - -msgid "" -"URI cannot contain more than one occurrence of a scheme.If you have " -"specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " -"you need to change it to use the swift+http:// scheme, like so: swift+http://" -"user:pass@authurl.com/v1/container/obj" -msgstr "" -"URI는 스킴의 둘 이상의 발생을 포함할 수 없습니다. 다음과 유사한 URI를 지정한 " -"경우 swift://user:pass@http://authurl.com/v1/container/obj, 다음과 같이 swift" -"+http:// 스킴을 사용하도록 변경해야 합니다. swift+http://user:pass@authurl." -"com/v1/container/obj" - -msgid "URL to access the image file kept in external store" -msgstr "외부 저장소에 보관된 이미지 파일에 액세스하기 위한 URL" - -#, python-format -msgid "" -"Unable to create pid file %(pid)s. Running as non-root?\n" -"Falling back to a temp file, you can stop %(service)s service using:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" -msgstr "" -"pid 파일 %(pid)s을(를) 작성할 수 없습니다. 비루트로 실행 중인지 확인하십시" -"오.\n" -"임시 파일로 돌아가 다음을 사용하여 %(service)s 서비스를 중지할 수 있습니다.\n" -" %(file)s %(server)s stop --pid-file %(fb)s" - -#, python-format -msgid "Unable to filter by unknown operator '%s'." -msgstr "알 수 없는 연산자 '%s'(으)로 필터링할 수 없습니다." - -msgid "Unable to filter on a range with a non-numeric value." -msgstr "숫자가 아닌 값을 사용하여 범위에서 필터링할 수 없습니다." - -msgid "Unable to filter on a unknown operator." -msgstr "알 수 없는 연산자를 필터링할 수 없습니다." - -msgid "Unable to filter using the specified operator." -msgstr "지정된 연산자를 사용하여 필터링할 수 없습니다." - -msgid "Unable to filter using the specified range." -msgstr "지정된 범위를 사용하여 필터링할 수 없습니다." - -#, python-format -msgid "Unable to find '%s' in JSON Schema change" -msgstr "JSON 스키마 변경에서 '%s'을(를) 찾을 수 없음" - -#, python-format -msgid "" -"Unable to find `op` in JSON Schema change. It must be one of the following: " -"%(available)s." -msgstr "" -"JSON 스키마 변경에서 `op`를 찾을 수 없습니다. 다음 중 하나여야 합니다. " -"%(available)s." - -msgid "Unable to increase file descriptor limit. Running as non-root?" -msgstr "" -"파일 디스크립터 한계를 늘릴 수 없습니다. 비루트로 실행 중인지 확인하십시오." - -#, python-format -msgid "" -"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" -"Got: %(e)r" -msgstr "" -"구성 파일 %(conf_file)s에서 %(app_name)s을(를) 로드할 수 없습니다.\n" -"오류 발생: %(e)r" - -#, python-format -msgid "Unable to load schema: %(reason)s" -msgstr "스키마를 로드할 수 없음: %(reason)s" - -#, python-format -msgid "Unable to locate paste config file for %s." -msgstr "%s에 대한 붙여넣기 구성 파일을 찾을 수 없습니다." - -#, python-format -msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" -msgstr "" -"이미지 %(image_id)s에 대한 중복 이미지 데이터를 업로드할 수 없음: %(error)s" - -msgid "Unauthorized image access" -msgstr "권한 없는 이미지 액세스" - -msgid "Unexpected body type. Expected list/dict." -msgstr "예기치않은 본문 타입. list/dict를 예상합니다." - -#, python-format -msgid "Unexpected response: %s" -msgstr "예상치 않은 응답: %s" - -#, python-format -msgid "Unknown auth strategy '%s'" -msgstr "알 수 없는 auth 전략 '%s'" - -#, python-format -msgid "Unknown command: %s" -msgstr "알 수 없는 명령: %s" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "알 수 없는 정렬 방향입니다. 'desc' 또는 'asc'여야 함" - -msgid "Unrecognized JSON Schema draft version" -msgstr "인식되지 않는 JSON 스키마 드래프트 버전" - -msgid "Unrecognized changes-since value" -msgstr "인식되지 않는 changes-since 값" - -#, python-format -msgid "Unsupported sort_dir. Acceptable values: %s" -msgstr "지원되지 않는 sort_dir. 허용 가능한 값: %s" - -#, python-format -msgid "Unsupported sort_key. Acceptable values: %s" -msgstr "지원되지 않는 sort_key. 허용 가능한 값: %s" - -msgid "Virtual size of image in bytes" -msgstr "이미지의 가상 크기(바이트)" - -#, python-format -msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" -msgstr "pid %(pid)s(%(file)s)이 종료될 때까지 15초 대기함, 포기하는 중" - -msgid "" -"When running server in SSL mode, you must specify both a cert_file and " -"key_file option value in your configuration file" -msgstr "" -"서버를 SSL 모드에서 실행할 때 구성 파일에 cert_file 및 key_file 옵션 값을 모" -"두 지정해야 함" - -msgid "" -"Whether to pass through the user token when making requests to the registry. " -"To prevent failures with token expiration during big files upload, it is " -"recommended to set this parameter to False.If \"use_user_token\" is not in " -"effect, then admin credentials can be specified." -msgstr "" -"레지스트리에 대해 요청을 작성할 때 사용자 토큰을 통과할지 여부입니다. 큰 파일" -"을 업로드하는 동안 토큰 만기에 대한 실패를 방지하려면 이 매개변수를 False로 " -"설정하는 것이 좋습니다. \"use_user_token\"이 적용되지 않은 경우 관리 신임 정" -"보를 지정할 수 있습니다." - -#, python-format -msgid "Wrong command structure: %s" -msgstr "잘못된 명령 구조: %s" - -msgid "You are not authenticated." -msgstr "인증되지 않은 사용자입니다." - -msgid "You are not authorized to complete this action." -msgstr "이 조치를 완료할 권한이 없습니다." - -#, python-format -msgid "You are not authorized to lookup image %s." -msgstr "이미지 %s을(를) 검색할 권한이 없습니다." - -#, python-format -msgid "You are not authorized to lookup the members of the image %s." -msgstr "이미지 %s의 멤버를 검색할 권한이 없습니다." - -#, python-format -msgid "You are not permitted to create a tag in the namespace owned by '%s'" -msgstr "'%s' 소유의 네임스페이스에 태그를 작성할 권한이 없습니다." - -msgid "You are not permitted to create image members for the image." -msgstr "이미지에 대한 이미지 멤버를 작성할 권한이 없습니다." - -#, python-format -msgid "You are not permitted to create images owned by '%s'." -msgstr "'%s' 소유의 이미지를 작성할 권한이 없습니다." - -#, python-format -msgid "You are not permitted to create namespace owned by '%s'" -msgstr "'%s' 소유의 네임스페이스를 작성할 권한이 없습니다." - -#, python-format -msgid "You are not permitted to create object owned by '%s'" -msgstr "'%s' 소유의 오브젝트를 작성할 권한이 없습니다." - -#, python-format -msgid "You are not permitted to create property owned by '%s'" -msgstr "'%s' 소유의 특성을 작성할 권한이 없습니다." - -#, python-format -msgid "You are not permitted to create resource_type owned by '%s'" -msgstr "'%s' 소유의 resource_type을 작성할 권한이 없습니다." - -#, python-format -msgid "You are not permitted to create this task with owner as: %s" -msgstr "다음 소유자로 이 태스크를 작성하도록 허용되지 않았습니다. %s" - -msgid "You are not permitted to deactivate this image." -msgstr "이 이미지를 비활성화할 권한이 없습니다." - -msgid "You are not permitted to delete this image." -msgstr "이 이미지를 삭제할 권한이 없습니다." - -msgid "You are not permitted to delete this meta_resource_type." -msgstr "이 meta_resource_type을 삭제할 권한이 없습니다." - -msgid "You are not permitted to delete this namespace." -msgstr "이 네임스페이스를 삭제할 권한이 없습니다." - -msgid "You are not permitted to delete this object." -msgstr "이 오브젝트를 삭제할 권한이 없습니다." - -msgid "You are not permitted to delete this property." -msgstr "이 특성을 삭제할 권한이 없습니다." - -msgid "You are not permitted to delete this tag." -msgstr "이 태그를 삭제할 권한이 없습니다." - -#, python-format -msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." -msgstr "이 %(resource)s에서 '%(attr)s'을(를) 수정하도록 허용되지 않았습니다." - -#, python-format -msgid "You are not permitted to modify '%s' on this image." -msgstr "이 이미지에서 '%s'을(를) 수정할 권한이 없습니다." - -msgid "You are not permitted to modify locations for this image." -msgstr "이 이미지의 위치를 수정할 권한이 없습니다." - -msgid "You are not permitted to modify tags on this image." -msgstr "이 이미지의 태그를 수정할 권한이 없습니다." - -msgid "You are not permitted to modify this image." -msgstr "이 이미지를 수정할 권한이 없습니다." - -msgid "You are not permitted to reactivate this image." -msgstr "이 이미지를 재활성화할 권한이 없습니다." - -msgid "You are not permitted to set status on this task." -msgstr "이 태스크에서 상태를 설정하도록 허용되지 않았습니다." - -msgid "You are not permitted to update this namespace." -msgstr "이 네임스페이스를 업데이트할 권한이 없습니다." - -msgid "You are not permitted to update this object." -msgstr "이 오브젝트를 업데이트할 권한이 없습니다." - -msgid "You are not permitted to update this property." -msgstr "이 특성을 업데이트할 권한이 없습니다." - -msgid "You are not permitted to update this tag." -msgstr "이 태그를 업데이트할 권한이 없습니다." - -msgid "You are not permitted to upload data for this image." -msgstr "이 이미지에 대한 데이터를 작성할 권한이 없습니다." - -#, python-format -msgid "You cannot add image member for %s" -msgstr "%s에 대한 이미지 멤버를 추가할 수 없음" - -#, python-format -msgid "You cannot delete image member for %s" -msgstr "%s에 대한 이미지 멤버를 삭제할 수 없음" - -#, python-format -msgid "You cannot get image member for %s" -msgstr "%s에 대한 이미지 멤버를 가져올 수 없음" - -#, python-format -msgid "You cannot update image member %s" -msgstr "이미지 멤버 %s을(를) 업데이트할 수 없음" - -msgid "You do not own this image" -msgstr "이 이미지를 소유하지 않음" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a cert, " -"however you have failed to supply either a key_file parameter or set the " -"GLANCE_CLIENT_KEY_FILE environ variable" -msgstr "" -"연결에 SSL을 사용하도록 선택하고 인증을 제공했지만 key_file 매개변수를 제공하" -"지 못했거나 GLANCE_CLIENT_KEY_FILE 환경 변수를 설정하지 못했습니다." - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a key, " -"however you have failed to supply either a cert_file parameter or set the " -"GLANCE_CLIENT_CERT_FILE environ variable" -msgstr "" -"연결에 SSL을 사용하도록 선택하고 키를 제공했지만 cert_file 매개변수를 제공하" -"지 못했거나 GLANCE_CLIENT_CERT_FILE 환경 변수를 설정하지 못했습니다." - -msgid "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" -msgstr "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" - -#, python-format -msgid "__init__() got unexpected keyword argument '%s'" -msgstr "__init__()가 예상치 못한 키워드 인수 '%s'을(를) 가져옴" - -#, python-format -msgid "" -"cannot transition from %(current)s to %(next)s in update (wanted from_state=" -"%(from)s)" -msgstr "" -"업데이트에서 %(current)s에서 %(next)s(으)로 상태 전이할 수 (from_state=" -"%(from)s을(를) 원함)" - -#, python-format -msgid "custom properties (%(props)s) conflict with base properties" -msgstr "사용자 정의 특성 (%(props)s)이(가) 기본 특성과 충돌함" - -msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" -msgstr "이 플랫폼에서 eventlet 'poll'이나 'selects' 허브를 모두 사용할 수 없음" - -msgid "is_public must be None, True, or False" -msgstr "is_public은 None, True 또는 False여야 함" - -msgid "limit param must be an integer" -msgstr "limit 매개변수는 정수여야 함" - -msgid "limit param must be positive" -msgstr "limit 매개변수가 양수여야 함" - -msgid "md5 hash of image contents." -msgstr "이미지 컨텐츠의 md5 해시입니다." - -#, python-format -msgid "new_image() got unexpected keywords %s" -msgstr "new_image()가 예상치 못한 키워드 %s을(를) 가져옴" - -msgid "protected must be True, or False" -msgstr "protected는 True 또는 False여야 함" - -#, python-format -msgid "unable to launch %(serv)s. Got error: %(e)s" -msgstr "%(serv)s을(를) 실행할 수 없음. 오류 발생: %(e)s" - -#, python-format -msgid "x-openstack-request-id is too long, max size %s" -msgstr "x-openstack-request-id가 너무 김, 최대 크기 %s" diff --git a/glance/locale/pt_BR/LC_MESSAGES/glance.po b/glance/locale/pt_BR/LC_MESSAGES/glance.po deleted file mode 100644 index 6c044d89..00000000 --- a/glance/locale/pt_BR/LC_MESSAGES/glance.po +++ /dev/null @@ -1,2125 +0,0 @@ -# Translations template for glance. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the glance project. -# -# Translators: -# Gabriel Wainer, 2013 -# Gabriel Wainer, 2013 -# Rodrigo Felix de Almeida , 2014 -# Volmar Oliveira Junior , 2013 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: glance 15.0.0.0b3.dev29\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-06-23 20:54+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 05:22+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language: pt-BR\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: Portuguese (Brazil)\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "%(cls)s exception was raised in the last rpc call: %(val)s" -msgstr "exceção %(cls)s foi disparada na última chamada RPC: %(val)s" - -#, python-format -msgid "%(m_id)s not found in the member list of the image %(i_id)s." -msgstr "%(m_id)s não localizado na lista de membros da imagem %(i_id)s." - -#, python-format -msgid "%(serv)s (pid %(pid)s) is running..." -msgstr "%(serv)s (pid %(pid)s) está em execução..." - -#, python-format -msgid "%(serv)s appears to already be running: %(pid)s" -msgstr "%(serv)s parece já estar em execução: %(pid)s" - -#, python-format -msgid "" -"%(strategy)s is registered as a module twice. %(module)s is not being used." -msgstr "" -"%(strategy)s é registrado como um módulo duas vezes. %(module)s não está " -"sendo usado." - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Could not load the " -"filesystem store" -msgstr "" -"%(task_id)s de %(task_type)s não foi configurado adequadamente. Não foi " -"possível carregar o armazenamento de sistema de arquivos" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Missing work dir: " -"%(work_dir)s" -msgstr "" -"%(task_id)s de %(task_type)s não foi configurado adequadamente. Faltando o " -"diretório de trabalho: %(work_dir)s" - -#, python-format -msgid "%(verb)sing %(serv)s" -msgstr "%(verb)sing %(serv)s" - -#, python-format -msgid "%(verb)sing %(serv)s with %(conf)s" -msgstr "%(verb)sing %(serv)s com %(conf)s" - -#, python-format -msgid "" -"%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " -"address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " -"separately from the port (i.e., \"[fe80::a:b:c]:9876\")." -msgstr "" -"%s Especifique um par host:porta, em que o host é um endereço IPv4, IPv6, " -"nome do host ou FQDN. Se você estiver usando um endereço IPv6, coloque-o nos " -"suportes separadamente da porta (ou seja, \"[fe80::a:b:c]:9876\")." - -#, python-format -msgid "%s can't contain 4 byte unicode characters." -msgstr "%s não pode conter caracteres de unicode de 4 bytes." - -#, python-format -msgid "%s is already stopped" -msgstr "%s já está parado" - -#, python-format -msgid "%s is stopped" -msgstr "%s está parado" - -msgid "" -"--os_auth_url option or OS_AUTH_URL environment variable required when " -"keystone authentication strategy is enabled\n" -msgstr "" -"opção --os_auth_url ou variável de ambiente OS_AUTH_URL requerida quando " -"estratégia de autenticação keystone está ativada\n" - -msgid "A body is not expected with this request." -msgstr "Um corpo não é esperado com essa solicitação." - -#, python-format -msgid "" -"A metadata definition object with name=%(object_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Um objeto de definição de metadados com o nome=%(object_name)s já existe no " -"namespace=%(namespace_name)s." - -#, python-format -msgid "" -"A metadata definition property with name=%(property_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Uma propriedade de definição de metadados com o nome=%(property_name)s já " -"existe no namespace=%(namespace_name)s." - -#, python-format -msgid "" -"A metadata definition resource-type with name=%(resource_type_name)s already " -"exists." -msgstr "" -"Um tipo de recurso de definição de metadados com o nome=" -"%(resource_type_name)s já existe." - -msgid "A set of URLs to access the image file kept in external store" -msgstr "" -"Um conjunto de URLs para acessar o arquivo de imagem mantido em " -"armazenamento externo" - -msgid "Amount of disk space (in GB) required to boot image." -msgstr "" -"Quantidade de espaço em disco (em GB) necessária para a imagem de " -"inicialização." - -msgid "Amount of ram (in MB) required to boot image." -msgstr "Quantidade de ram (em MB) necessária para a imagem de inicialização." - -msgid "An identifier for the image" -msgstr "Um identificador para a imagem" - -msgid "An identifier for the image member (tenantId)" -msgstr "Um identificador para o membro de imagem (tenantId)" - -msgid "An identifier for the owner of this task" -msgstr "Um identificador para o proprietário desta tarefa" - -msgid "An identifier for the task" -msgstr "Um identificador para a tarefa" - -msgid "An image file url" -msgstr "Uma URL de arquivo de imagem" - -msgid "An image schema url" -msgstr "Uma URL de esquema de imagem" - -msgid "An image self url" -msgstr "Uma URL automática de imagem" - -#, python-format -msgid "An image with identifier %s already exists" -msgstr "Uma imagem com o identificador %s já existe" - -msgid "An import task exception occurred" -msgstr "Ocorreu uma exceção em uma tarefa importante" - -msgid "An object with the same identifier already exists." -msgstr "Um objeto com o mesmo identificador já existe." - -msgid "An object with the same identifier is currently being operated on." -msgstr "Um objeto com o mesmo identificador está atualmente sendo operado." - -msgid "An object with the specified identifier was not found." -msgstr "Um objeto com o identificador especificado não foi localizado." - -msgid "An unknown exception occurred" -msgstr "Ocorreu uma exceção desconhecida" - -msgid "An unknown task exception occurred" -msgstr "Ocorreu uma exceção de tarefa desconhecida" - -#, python-format -msgid "Attempt to upload duplicate image: %s" -msgstr "Tentativa de fazer upload de imagem duplicada: %s" - -msgid "Attempted to update Location field for an image not in queued status." -msgstr "" -"Tentativa de atualizar o campo Local para uma imagem não está no status em " -"fila." - -#, python-format -msgid "Attribute '%(property)s' is read-only." -msgstr "O atributo '%(property)s' é somente leitura." - -#, python-format -msgid "Attribute '%(property)s' is reserved." -msgstr "O atributo '%(property)s' é reservado." - -#, python-format -msgid "Attribute '%s' is read-only." -msgstr "Atributo '%s' é apenas leitura." - -#, python-format -msgid "Attribute '%s' is reserved." -msgstr "Atributo '%s' é reservado." - -msgid "Attribute container_format can be only replaced for a queued image." -msgstr "" -"Atributo container_format pode ser apenas substituído por uma imagem na fila." - -msgid "Attribute disk_format can be only replaced for a queued image." -msgstr "" -"Atributo disk_format pode ser apenas substituído por uma imagem na fila." - -#, python-format -msgid "Auth service at URL %(url)s not found." -msgstr "Serviço de autenticação na URL %(url)s não localizado." - -#, python-format -msgid "" -"Authentication error - the token may have expired during file upload. " -"Deleting image data for %s." -msgstr "" -"Erro de autenticação - o token pode ter expirado durante o envio do arquivo. " -"Removendo dados da imagem %s." - -msgid "Authorization failed." -msgstr "Falha de autorização." - -msgid "Available categories:" -msgstr "Categorias disponíveis:" - -#, python-format -msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." -msgstr "" -"Formato de filtro de consulta \"%s\" inválido. Use a notação ISO 8601 " -"DateTime." - -#, python-format -msgid "Bad Command: %s" -msgstr "Comandos inválidos: %s" - -#, python-format -msgid "Bad header: %(header_name)s" -msgstr "Cabeçalho inválido: %(header_name)s" - -#, python-format -msgid "Bad value passed to filter %(filter)s got %(val)s" -msgstr "Valor inválido passado para o filtro %(filter)s obteve %(val)s" - -#, python-format -msgid "Badly formed S3 URI: %(uri)s" -msgstr "URI S3 malformado: %(uri)s" - -#, python-format -msgid "Badly formed credentials '%(creds)s' in Swift URI" -msgstr "Credenciais malformadas '%(creds)s' no URI Swift" - -msgid "Badly formed credentials in Swift URI." -msgstr "Credenciais malformadas no URI Swift." - -msgid "Body expected in request." -msgstr "Corpo esperado na solicitação." - -msgid "Cannot be a negative value" -msgstr "Não pode ser um valor negativo" - -msgid "Cannot be a negative value." -msgstr "Não pode ser um valor negativo." - -#, python-format -msgid "Cannot convert image %(key)s '%(value)s' to an integer." -msgstr "" -"Não é possível converter a imagem %(key)s '%(value)s' para um número inteiro." - -msgid "Cannot remove last location in the image." -msgstr "Não é possível remover o último local na imagem." - -#, python-format -msgid "Cannot save data for image %(image_id)s: %(error)s" -msgstr "Não é possível salvar os dados da imagem %(image_id)s: %(error)s" - -msgid "Cannot set locations to empty list." -msgstr "Não é possível configurar locais para esvaziar a lista." - -msgid "Cannot upload to an unqueued image" -msgstr "Não é possível fazer upload para uma imagem fora da fila" - -#, python-format -msgid "Checksum verification failed. Aborted caching of image '%s'." -msgstr "" -"A soma de verificação falhou. Interrompido o armazenamento em cache da " -"imagem '%s'." - -msgid "Client disconnected before sending all data to backend" -msgstr "Cliente desconectado antes de enviar todos os dados para o backend" - -msgid "Command not found" -msgstr "Comando não encontrado" - -msgid "Configuration option was not valid" -msgstr "A opção de configuração não era válida" - -#, python-format -msgid "Connect error/bad request to Auth service at URL %(url)s." -msgstr "" -"Erro de conexão/solicitação inválida para serviço de autenticação na URL " -"%(url)s." - -#, python-format -msgid "Constructed URL: %s" -msgstr "URL construída: %s" - -msgid "Container format is not specified." -msgstr "O formato de contêiner não foi especificado." - -msgid "Content-Type must be application/octet-stream" -msgstr "Tipo de Conteúdo deve ser application/octet-stream" - -#, python-format -msgid "Corrupt image download for image %(image_id)s" -msgstr "Download de imagem corrompido para a imagem %(image_id)s" - -#, python-format -msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" -msgstr "" -"Não foi possível ligar a %(host)s:%(port)s depois de tentar por 30 segundos" - -msgid "Could not find OVF file in OVA archive file." -msgstr "Não foi possível localizar o arquivo OVF no archive OVA." - -#, python-format -msgid "Could not find metadata object %s" -msgstr "Não foi possível localizar o objeto de metadados %s" - -#, python-format -msgid "Could not find metadata tag %s" -msgstr "Não foi possível localizar a identificação de metadados %s" - -#, python-format -msgid "Could not find namespace %s" -msgstr "Não foi possível localizar o namespace %s" - -#, python-format -msgid "Could not find property %s" -msgstr "Não é possível localizar a propriedade %s" - -msgid "Could not find required configuration option" -msgstr "Não foi possível localizar a opção de configuração necessária" - -#, python-format -msgid "Could not find task %s" -msgstr "Não foi possível localizar tarefa %s" - -#, python-format -msgid "Could not update image: %s" -msgstr "Não foi possível atualizar a imagem: %s" - -msgid "Currently, OVA packages containing multiple disk are not supported." -msgstr "" -"Atualmente, os pacotes OVA que contêm diversos discos não são suportados. " - -#, python-format -msgid "Data for image_id not found: %s" -msgstr "Dados de image_id não localizados: %s" - -msgid "Data supplied was not valid." -msgstr "Os dados fornecidos não eram válidos." - -msgid "Date and time of image member creation" -msgstr "Data e hora da criação de membro da imagem" - -msgid "Date and time of image registration" -msgstr "Data e hora do registro da imagem " - -msgid "Date and time of last modification of image member" -msgstr "Data e hora da última modificação de membro da imagem" - -msgid "Date and time of namespace creation" -msgstr "Data e hora da criação do namespace" - -msgid "Date and time of object creation" -msgstr "Data e hora da criação do objeto" - -msgid "Date and time of resource type association" -msgstr "Data e hora da associação do tipo de recurso " - -msgid "Date and time of tag creation" -msgstr "Data e hora da criação da identificação " - -msgid "Date and time of the last image modification" -msgstr "Data e hora da última modificação da imagem " - -msgid "Date and time of the last namespace modification" -msgstr "Data e hora da última modificação do namespace " - -msgid "Date and time of the last object modification" -msgstr "Data e hora da última modificação do objeto" - -msgid "Date and time of the last resource type association modification" -msgstr "Data e hora da última modificação de associação de tipo de recurso " - -msgid "Date and time of the last tag modification" -msgstr "Data e hora da última modificação da identificação " - -msgid "Datetime when this resource was created" -msgstr "Data/hora quando este recurso foi criado" - -msgid "Datetime when this resource was updated" -msgstr "Data/Hora quando este recurso foi atualizado" - -msgid "Datetime when this resource would be subject to removal" -msgstr "Data/Hora quando este recurso deve ser objeto de remoção" - -#, python-format -msgid "Denying attempt to upload image because it exceeds the quota: %s" -msgstr "Negando a tentativa de upload da imagem porque ela excede a cota: %s" - -#, python-format -msgid "Denying attempt to upload image larger than %d bytes." -msgstr "Negando tentativa de fazer upload de imagem maior que %d bytes." - -msgid "Descriptive name for the image" -msgstr "Nome descritivo para a imagem" - -msgid "Disk format is not specified." -msgstr "O formato de disco não foi especificado." - -#, python-format -msgid "" -"Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" -msgstr "" -"O driver %(driver_name)s não pôde ser configurado corretamente. Motivo: " -"%(reason)s" - -msgid "" -"Error decoding your request. Either the URL or the request body contained " -"characters that could not be decoded by Glance" -msgstr "" -"Erro ao decodificar sua solicitação. A URL ou o corpo da solicitação " -"continha caracteres que não puderam ser decodificados pelo Glance" - -#, python-format -msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" -msgstr "Erro ao buscar membros da imagem %(image_id)s: %(inner_msg)s" - -msgid "Error in store configuration. Adding images to store is disabled." -msgstr "" -"Erro na configuração do armazenamento. A inclusão de imagens para " -"armazenamento está desativada." - -msgid "Expected a member in the form: {\"member\": \"image_id\"}" -msgstr "O membro era esperado no formato: {\"member\": \"image_id\"}" - -msgid "Expected a status in the form: {\"status\": \"status\"}" -msgstr "O estado era esperado no formato: {\"status\": \"status\"}" - -msgid "External source should not be empty" -msgstr "A fonte externa não deve estar vazia" - -#, python-format -msgid "External sources are not supported: '%s'" -msgstr "As fontes externas não são suportadas: '%s'" - -#, python-format -msgid "Failed to activate image. Got error: %s" -msgstr "Falha ao ativar imagem. Erro obtido: %s" - -#, python-format -msgid "Failed to add image metadata. Got error: %s" -msgstr "Falha ao incluir metadados da imagem. Erro obtido: %s" - -#, python-format -msgid "Failed to find image %(image_id)s to delete" -msgstr "Falhar ao localizar a imagem %(image_id)s para excluir" - -#, python-format -msgid "Failed to find image to delete: %s" -msgstr "Falha ao encontrar imagem para excluir: %s" - -#, python-format -msgid "Failed to find image to update: %s" -msgstr "Falha ao encontrar imagem para atualizar: %s" - -#, python-format -msgid "Failed to find resource type %(resourcetype)s to delete" -msgstr "Falha ao localizar o tipo de recurso %(resourcetype)s para excluir" - -#, python-format -msgid "Failed to initialize the image cache database. Got error: %s" -msgstr "" -"Falha ao inicializar o banco de dados de cache da imagem. Erro obtido: %s" - -#, python-format -msgid "Failed to read %s from config" -msgstr "Falha ao ler %s da configuração" - -#, python-format -msgid "Failed to reserve image. Got error: %s" -msgstr "Falha ao reservar imagem. Erro obtido: %s" - -#, python-format -msgid "Failed to update image metadata. Got error: %s" -msgstr "Falha ao atualizar metadados da imagem. Erro obtido: %s" - -#, python-format -msgid "Failed to upload image %s" -msgstr "Falha ao enviar imagem %s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to HTTP error: " -"%(error)s" -msgstr "" -"Falha ao fazer upload dos dados de imagem para a imagem %(image_id)s devido " -"a erro de HTTP: %(error)s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to internal error: " -"%(error)s" -msgstr "" -"Falha ao fazer upload dos dados de imagem para a imagem %(image_id)s devido " -"a erro interno: %(error)s" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"O arquivo %(path)s tem arquivo de backup inválido %(bfile)s, interrompendo." - -msgid "" -"File based imports are not allowed. Please use a non-local source of image " -"data." -msgstr "" -"Importações baseadas em arquivo não são permitidas. Use uma fonte não local " -"de dados de imagem." - -msgid "Forbidden image access" -msgstr "Proibido o acesso a imagem" - -#, python-format -msgid "Forbidden to delete a %s image." -msgstr "Proibido excluir uma imagem %s." - -#, python-format -msgid "Forbidden to delete image: %s" -msgstr "Proibido excluir imagem: %s" - -#, python-format -msgid "Forbidden to modify '%(key)s' of %(status)s image." -msgstr "Proibido modificar '%(key)s' da imagem %(status)s" - -#, python-format -msgid "Forbidden to modify '%s' of image." -msgstr "Proibido modificar '%s' de imagem." - -msgid "Forbidden to reserve image." -msgstr "Proibido reservar imagem." - -msgid "Forbidden to update deleted image." -msgstr "Proibido atualizar imagem excluída." - -#, python-format -msgid "Forbidden to update image: %s" -msgstr "Proibido atualizar imagem: %s" - -#, python-format -msgid "Forbidden upload attempt: %s" -msgstr "Tentativa de upload proibida: %s" - -#, python-format -msgid "Forbidding request, metadata definition namespace=%s is not visible." -msgstr "" -"Proibindo solicitação, o namespace de definição de metadados=%s não é " -"visível." - -#, python-format -msgid "Forbidding request, task %s is not visible" -msgstr "Proibindo solicitação, a tarefa %s não está visível" - -msgid "Format of the container" -msgstr "Formato do contêiner" - -msgid "Format of the disk" -msgstr "Formato do disco" - -#, python-format -msgid "Host \"%s\" is not valid." -msgstr "Host \"%s\" não é válido." - -#, python-format -msgid "Host and port \"%s\" is not valid." -msgstr "Host e porta \"%s\" não são válidos." - -msgid "" -"Human-readable informative message only included when appropriate (usually " -"on failure)" -msgstr "" -"Mensagem informativa legível apenas incluída quando apropriado (geralmente " -"em falha)" - -msgid "If true, image will not be deletable." -msgstr "Se true, a imagem não será excluível." - -msgid "If true, namespace will not be deletable." -msgstr "Se verdadeiro, o namespace não poderá ser excluído." - -#, python-format -msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" -msgstr "" -"A imagem %(id)s não pôde ser excluída, pois ela está sendo usada: %(exc)s" - -#, python-format -msgid "Image %(id)s not found" -msgstr "Imagem %(id)s não localizada" - -#, python-format -msgid "" -"Image %(image_id)s could not be found after upload. The image may have been " -"deleted during the upload: %(error)s" -msgstr "" -"Imagem %(image_id)s não pôde ser localizada após o upload. A imagem pode ter " -"sido excluída durante o upload: %(error)s" - -#, python-format -msgid "Image %(image_id)s is protected and cannot be deleted." -msgstr "A imagem %(image_id)s está protegida e não pode ser excluída." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload, cleaning up the chunks uploaded." -msgstr "" -"A imagem %s não pôde ser localizada após o upload. A imagem pode ter sido " -"excluída durante o upload, limpando os chunks transferidos por upload." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload." -msgstr "" -"A imagem %s não foi encontrada após o envio. A imagem pode ter sido removida " -"durante o envio." - -#, python-format -msgid "Image %s is deactivated" -msgstr "Imagem %s está desativada" - -#, python-format -msgid "Image %s is not active" -msgstr "A imagem %s não está ativa" - -#, python-format -msgid "Image %s not found." -msgstr "Imagem %s não localizada." - -#, python-format -msgid "Image exceeds the storage quota: %s" -msgstr "Imagem excede a cota de armazenamento: %s" - -msgid "Image id is required." -msgstr "ID da imagem é obrigatório." - -msgid "Image is protected" -msgstr "A imagem está protegida" - -#, python-format -msgid "Image member limit exceeded for image %(id)s: %(e)s:" -msgstr "O limite do membro da imagem excedido para imagem %(id)s: %(e)s:" - -#, python-format -msgid "Image name too long: %d" -msgstr "Nome da imagem muito longo: %d" - -msgid "Image operation conflicts" -msgstr "Conflitos da operação de imagem" - -#, python-format -msgid "" -"Image status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"Transição de status de imagem de %(cur_status)s para %(new_status)s não é " -"permitido" - -#, python-format -msgid "Image storage media is full: %s" -msgstr "A mídia de armazenamento da imagem está cheia: %s" - -#, python-format -msgid "Image tag limit exceeded for image %(id)s: %(e)s:" -msgstr "" -"O limite de identificação da imagem excedeu para a imagem %(id)s: %(e)s:" - -#, python-format -msgid "Image upload problem: %s" -msgstr "Problema ao fazer upload de imagem: %s" - -#, python-format -msgid "Image with identifier %s already exists!" -msgstr "A imagem o identificador %s já existe!" - -#, python-format -msgid "Image with identifier %s has been deleted." -msgstr "Imagem com identificador %s foi excluída." - -#, python-format -msgid "Image with identifier %s not found" -msgstr "Imagem com identificador %s não localizada" - -#, python-format -msgid "Image with the given id %(image_id)s was not found" -msgstr "Imagem com o ID fornecido %(image_id)s não foi localizada" - -#, python-format -msgid "" -"Incorrect auth strategy, expected \"%(expected)s\" but received " -"\"%(received)s\"" -msgstr "" -"Estratégia de autorização incorreta; esperava-se \"%(expected)s\", mas foi " -"recebido \"%(received)s\"" - -#, python-format -msgid "Incorrect request: %s" -msgstr "Requisição incorreta: %s" - -#, python-format -msgid "Input does not contain '%(key)s' field" -msgstr "A entrada não contém o campo '%(key)s'" - -#, python-format -msgid "Insufficient permissions on image storage media: %s" -msgstr "Permissões insuficientes na mídia de armazenamento da imagem: %s" - -#, python-format -msgid "Invalid JSON pointer for this resource: '/%s'" -msgstr "Ponteiro de JSON inválido para este recurso: '/%s'" - -#, python-format -msgid "Invalid checksum '%s': can't exceed 32 characters" -msgstr "Soma de verificação inválida '%s': não pode exceder 32 caracteres" - -msgid "Invalid configuration in glance-swift conf file." -msgstr "Configuração inválida no arquivo de configuração glance-swift." - -msgid "Invalid configuration in property protection file." -msgstr "Configuração inválida no arquivo de proteção de propriedade." - -#, python-format -msgid "Invalid container format '%s' for image." -msgstr "Formato de Contâiner inválido '%s' para imagem." - -#, python-format -msgid "Invalid content type %(content_type)s" -msgstr "Tipo de conteúdo inválido %(content_type)s" - -#, python-format -msgid "Invalid disk format '%s' for image." -msgstr "Formato de disco inválido '%s' para imagem." - -#, python-format -msgid "Invalid filter value %s. The quote is not closed." -msgstr "Valor de filtro inválido %s. A aspa não está fechada." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma after closing quotation mark." -msgstr "" -"Valor de filtro inválido %s.Não há nenhuma vírgula antes da aspa de " -"fechamento." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma before opening quotation mark." -msgstr "" -"Valor de filtro inválido %s.Não há nenhuma vírgula antes da aspa de abertura." - -msgid "Invalid image id format" -msgstr "Formato de ID da imagem inválido" - -msgid "Invalid location" -msgstr "Local inválido" - -#, python-format -msgid "Invalid location %s" -msgstr "Local inválido %s" - -#, python-format -msgid "Invalid location: %s" -msgstr "Localidade inválida: %s" - -#, python-format -msgid "" -"Invalid location_strategy option: %(name)s. The valid strategy option(s) " -"is(are): %(strategies)s" -msgstr "" -"Opção location_strategy inválida: %(name)s. A(s) opção(ões) de estratégia(s) " -"válida(s) é(são): %(strategies)s" - -msgid "Invalid locations" -msgstr "Locais inválidos" - -#, python-format -msgid "Invalid locations: %s" -msgstr "Localidades inválidas: %s" - -msgid "Invalid marker format" -msgstr "Formato de marcador inválido" - -msgid "Invalid marker. Image could not be found." -msgstr "Marcador inválido. A imagem não pôde ser localizada." - -#, python-format -msgid "Invalid membership association: %s" -msgstr "Associação inválida: %s" - -msgid "" -"Invalid mix of disk and container formats. When setting a disk or container " -"format to one of 'aki', 'ari', or 'ami', the container and disk formats must " -"match." -msgstr "" -"Combinação inválida de formatos de disco e contêiner. Ao configurar um " -"formato de disco ou contêiner para um destes, 'aki', 'ari' ou 'ami', os " -"formatos de contêiner e disco devem corresponder." - -#, python-format -msgid "" -"Invalid operation: `%(op)s`. It must be one of the following: %(available)s." -msgstr "" -"Operação inválida: `%(op)s`. Ela deve ser um das seguintes: %(available)s." - -msgid "Invalid position for adding a location." -msgstr "Posição inválida para adicionar uma localidade." - -msgid "Invalid position for removing a location." -msgstr "Posição inválida para remover uma localidade." - -msgid "Invalid service catalog json." -msgstr "Catálogo de serviço json inválido." - -#, python-format -msgid "Invalid sort direction: %s" -msgstr "Direção de classificação inválida: %s" - -#, python-format -msgid "" -"Invalid sort key: %(sort_key)s. It must be one of the following: " -"%(available)s." -msgstr "" -"Chave de classificação inválida: %(sort_key)s. Deve ser um dos seguintes: " -"%(available)s." - -#, python-format -msgid "Invalid status value: %s" -msgstr "Valro de status inválido: %s" - -#, python-format -msgid "Invalid status: %s" -msgstr "Status inválido: %s" - -#, python-format -msgid "Invalid time format for %s." -msgstr "Fromato de horário inválido para %s" - -#, python-format -msgid "Invalid type value: %s" -msgstr "Valor de tipo inválido: %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition namespace " -"with the same name of %s" -msgstr "" -"Atualização inválida. Ela resultaria em uma propriedade de definição de " -"metadados duplicada com o mesmo nome de %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Atualização inválida. Ela resultaria em um objeto de definição de metadados " -"duplicado com o mesmo nome=%(name)s no namespace=%(namespace_name)s." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Atualização inválida. Ela resultaria em um objeto de definição de metadados " -"duplicado com o mesmo nome=%(name)s no namespace=%(namespace_name)s." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition property " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Atualização inválida. Ela resultaria em uma propriedade de definição de " -"metadados duplicada com o mesmo nome=%(name)s no namespace=" -"%(namespace_name)s." - -#, python-format -msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" -msgstr "Valor inválido '%(value)s' para o parâmetro '%(param)s': %(extra_msg)s" - -#, python-format -msgid "Invalid value for option %(option)s: %(value)s" -msgstr "Valor inválido para a opção %(option)s: %(value)s" - -#, python-format -msgid "Invalid visibility value: %s" -msgstr "Valor de visibilidade inválido: %s" - -msgid "It's invalid to provide multiple image sources." -msgstr "é inválido fornecer múltiplas fontes de imagens." - -msgid "It's not allowed to add locations if locations are invisible." -msgstr "Não é permitido adicionar locais se os locais forem invisíveis." - -msgid "It's not allowed to remove locations if locations are invisible." -msgstr "Não é permitido remover locais se os locais forem invisíveis." - -msgid "It's not allowed to update locations if locations are invisible." -msgstr "Não é permitido atualizar locais se os locais forem invisíveis." - -msgid "List of strings related to the image" -msgstr "Lista de sequências relacionadas à imagem" - -msgid "Malformed JSON in request body." -msgstr "JSON malformado no corpo da solicitação." - -msgid "Maximal age is count of days since epoch." -msgstr "A idade máxima é a contagem de dias desde a época." - -#, python-format -msgid "Maximum redirects (%(redirects)s) was exceeded." -msgstr "O máximo de redirecionamentos (%(redirects)s) foi excedido." - -#, python-format -msgid "Member %(member_id)s is duplicated for image %(image_id)s" -msgstr "O membro %(member_id)s é duplicado para a imagem %(image_id)s" - -msgid "Member can't be empty" -msgstr "Membro não pode ser vazio" - -msgid "Member to be added not specified" -msgstr "Membro a ser incluído não especificado" - -msgid "Membership could not be found." -msgstr "Associação não pôde ser localizada." - -#, python-format -msgid "" -"Metadata definition namespace %(namespace)s is protected and cannot be " -"deleted." -msgstr "" -"O namespace de definição de metadados %(namespace)s é protegido e não pode " -"ser excluída." - -#, python-format -msgid "Metadata definition namespace not found for id=%s" -msgstr "Namespace de definição de metadados não localizado para o id=%s" - -#, python-format -msgid "" -"Metadata definition object %(object_name)s is protected and cannot be " -"deleted." -msgstr "" -"O objeto de definição de metadados %(object_name)s é protegido e não pode " -"ser excluída." - -#, python-format -msgid "Metadata definition object not found for id=%s" -msgstr "Objeto de definição de metadados não localizado para o id=%s" - -#, python-format -msgid "" -"Metadata definition property %(property_name)s is protected and cannot be " -"deleted." -msgstr "" -"A propriedade de definição de metadados %(property_name)s é protegida e não " -"pode ser excluída." - -#, python-format -msgid "Metadata definition property not found for id=%s" -msgstr "Propriedade de definição de metadados não localizada para id=%s" - -#, python-format -msgid "" -"Metadata definition resource-type %(resource_type_name)s is a seeded-system " -"type and cannot be deleted." -msgstr "" -"A definição de metadados resource-type %(resource_type_name)s é um tipo de " -"sistema com valor sementee não pode ser excluída." - -#, python-format -msgid "" -"Metadata definition resource-type-association %(resource_type)s is protected " -"and cannot be deleted." -msgstr "" -"A definição de metadados resource-type-association %(resource_type)s é " -"protegida e não poderá ser excluída." - -#, python-format -msgid "" -"Metadata definition tag %(tag_name)s is protected and cannot be deleted." -msgstr "" -"A identificação da definição de metadados %(tag_name)s é protegida e não " -"pode ser excluída." - -#, python-format -msgid "Metadata definition tag not found for id=%s" -msgstr "Identificação de definição de metadados não localizada para o id=%s" - -msgid "Minimal rows limit is 1." -msgstr "O limite mínimo de linhas é 1." - -#, python-format -msgid "Missing required credential: %(required)s" -msgstr "Credencial necessária ausente: %(required)s" - -#, python-format -msgid "" -"Multiple 'image' service matches for region %(region)s. This generally means " -"that a region is required and you have not supplied one." -msgstr "" -"Diversas correspondências do serviço de 'imagem' para a região %(region)s. " -"Isso geralmente significa que uma região é necessária e você não a forneceu." - -msgid "No authenticated user" -msgstr "Usuário não autenticado" - -#, python-format -msgid "No image found with ID %s" -msgstr "Nenhuma imagem encontrada com o ID %s" - -#, python-format -msgid "No location found with ID %(loc)s from image %(img)s" -msgstr "Nenhum local localizado com o ID %(loc)s da imagem %(img)s" - -msgid "No permission to share that image" -msgstr "Nenhum permissão para compartilhar essa imagem" - -#, python-format -msgid "Not allowed to create members for image %s." -msgstr "Não é permitido criar membros para a imagem %s." - -#, python-format -msgid "Not allowed to deactivate image in status '%s'" -msgstr "Não é permitido desativar a imagem no status '%s'" - -#, python-format -msgid "Not allowed to delete members for image %s." -msgstr "Não é permitido excluir membros para a imagem %s." - -#, python-format -msgid "Not allowed to delete tags for image %s." -msgstr "Não é permitido excluir identificações para a imagem %s." - -#, python-format -msgid "Not allowed to list members for image %s." -msgstr "Não é permitido listar os membros para a imagem %s." - -#, python-format -msgid "Not allowed to reactivate image in status '%s'" -msgstr "Não é permitido reativar a imagem no status '%s'" - -#, python-format -msgid "Not allowed to update members for image %s." -msgstr "Não é permitido atualizar os membros para a imagem %s." - -#, python-format -msgid "Not allowed to update tags for image %s." -msgstr "Não é permitido atualizar as identificações para a imagem %s." - -#, python-format -msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" -msgstr "" -"Não é permitido fazer upload de dados de imagem para a imagem %(image_id)s: " -"%(error)s" - -msgid "Number of sort dirs does not match the number of sort keys" -msgstr "" -"O número de diretórios de classificação não corresponde ao número de chaves " -"de classificação" - -msgid "OVA extract is limited to admin" -msgstr "O extrato de OVA é limitado para administrador" - -msgid "Old and new sorting syntax cannot be combined" -msgstr "A sintaxe de classificação nova e antiga não podem ser combinadas" - -#, python-format -msgid "Operation \"%s\" requires a member named \"value\"." -msgstr "A operação \"%s\" requer um membro denominado \"valor\"." - -msgid "" -"Operation objects must contain exactly one member named \"add\", \"remove\", " -"or \"replace\"." -msgstr "" -"Objetos de operação devem conter exatamente um membro denominado \"incluir" -"\", \"remover\" ou \"substituir\"." - -msgid "" -"Operation objects must contain only one member named \"add\", \"remove\", or " -"\"replace\"." -msgstr "" -"Objetos de operação devem conter apenas um membro denominado \"incluir\", " -"\"remover\" ou \"substituir\"." - -msgid "Operations must be JSON objects." -msgstr "As operações devem ser objetos JSON." - -#, python-format -msgid "Original locations is not empty: %s" -msgstr "Localidade original não está vazia: %s" - -msgid "Owner can't be updated by non admin." -msgstr "O proprietário não pode ser atualizado por um não administrador." - -msgid "Owner must be specified to create a tag." -msgstr "O proprietário deve ser especificado para criar uma identificação." - -msgid "Owner of the image" -msgstr "Proprietário da imagem" - -msgid "Owner of the namespace." -msgstr "Proprietário do namespace." - -msgid "Param values can't contain 4 byte unicode." -msgstr "Valores de parâmetro não podem conter unicode de 4 bytes." - -#, python-format -msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." -msgstr "" -"O ponteiro `%s` contém \"~\" não parte de uma sequência de escape " -"reconhecida." - -#, python-format -msgid "Pointer `%s` contains adjacent \"/\"." -msgstr "O ponteiro `%s` contém uma \"/\" adjacente." - -#, python-format -msgid "Pointer `%s` does not contains valid token." -msgstr "O ponteiro `%s` não contém um token válido." - -#, python-format -msgid "Pointer `%s` does not start with \"/\"." -msgstr "O ponteiro `%s` não começa com \"/\"." - -#, python-format -msgid "Pointer `%s` end with \"/\"." -msgstr "O ponteiro `%s` termina com \"/\"." - -#, python-format -msgid "Port \"%s\" is not valid." -msgstr "Porta \"%s\" não é válida." - -#, python-format -msgid "Process %d not running" -msgstr "O processo %d não está em execução" - -#, python-format -msgid "Properties %s must be set prior to saving data." -msgstr "As propriedades %s devem ser configuradas antes de salvar os dados." - -#, python-format -msgid "" -"Property %(property_name)s does not start with the expected resource type " -"association prefix of '%(prefix)s'." -msgstr "" -"A propriedade %(property_name)s não começa com o prefixo de associação do " -"tipo de recurso esperado de ‘%(prefix)s‘." - -#, python-format -msgid "Property %s already present." -msgstr "Propriedade %s já presente." - -#, python-format -msgid "Property %s does not exist." -msgstr "A propriedade %s não existe." - -#, python-format -msgid "Property %s may not be removed." -msgstr "A propriedade %s pode não ser removida." - -#, python-format -msgid "Property %s must be set prior to saving data." -msgstr "A propriedade %s deve ser configurada antes de salvar os dados." - -#, python-format -msgid "Property '%s' is protected" -msgstr "Propriedade '%s' é protegida" - -msgid "Property names can't contain 4 byte unicode." -msgstr "Os nomes de propriedade não podem conter unicode de 4 bytes." - -#, python-format -msgid "" -"Provided image size must match the stored image size. (provided size: " -"%(ps)d, stored size: %(ss)d)" -msgstr "" -"O tamanho da imagem fornecida deve corresponder ao tamanho da imagem " -"armazenada. (tamanho fornecido: %(ps)d, tamanho armazenado:%(ss)d)" - -#, python-format -msgid "Provided object does not match schema '%(schema)s': %(reason)s" -msgstr "O objeto fornecido não corresponde ao esquema '%(schema)s': %(reason)s" - -#, python-format -msgid "Provided status of task is unsupported: %(status)s" -msgstr "Status de tarefa fornecido não é suportado: %(status)s" - -#, python-format -msgid "Provided type of task is unsupported: %(type)s" -msgstr "Tipo de tarefa fornecido não é suportado: %(type)s" - -msgid "Provides a user friendly description of the namespace." -msgstr "Fornece uma descrição fácil do namespace." - -msgid "Received invalid HTTP redirect." -msgstr "Redirecionamento de HTTP inválido recebido." - -#, python-format -msgid "Redirecting to %(uri)s for authorization." -msgstr "Redirecionando para %(uri)s para obter autorização." - -#, python-format -msgid "Registry service can't use %s" -msgstr "Serviço de registro não pode utilizar %s" - -#, python-format -msgid "Registry was not configured correctly on API server. Reason: %(reason)s" -msgstr "" -"O registro não foi configurado corretamente no servidor de API. Motivo: " -"%(reason)s" - -#, python-format -msgid "Reload of %(serv)s not supported" -msgstr "Recarregamento de %(serv)s não suportado" - -#, python-format -msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "Recarregando %(serv)s (pid %(pid)s) com sinal (%(sig)s)" - -#, python-format -msgid "Removing stale pid file %s" -msgstr "Removendo o arquivo pid %s antigo" - -msgid "Request body must be a JSON array of operation objects." -msgstr "" -"O corpo da solicitação deve ser uma matriz JSON de objetos de operação." - -msgid "Request must be a list of commands" -msgstr "Requisição deve ser uma lista de comandos" - -#, python-format -msgid "Required store %s is invalid" -msgstr "O armazenamento necessário %s é inválido" - -msgid "" -"Resource type names should be aligned with Heat resource types whenever " -"possible: http://docs.openstack.org/developer/heat/template_guide/openstack." -"html" -msgstr "" -"Os nomes do tipo de recurso devem estar alinhados aos tipos de recurso do " -"Heat sempre que possível: http://docs.openstack.org/developer/heat/" -"template_guide/openstack.html" - -msgid "Response from Keystone does not contain a Glance endpoint." -msgstr "A resposta de Keystone não contém um terminal de Visão Rápida." - -msgid "Scope of image accessibility" -msgstr "Escopo de acessibilidade de imagem" - -msgid "Scope of namespace accessibility." -msgstr "Escopo da acessibilidade do namespace." - -#, python-format -msgid "Server %(serv)s is stopped" -msgstr "O servidor %(serv)s foi interrompido" - -#, python-format -msgid "Server worker creation failed: %(reason)s." -msgstr "Falha na criação do trabalhador do servidor: %(reason)s." - -msgid "Signature verification failed" -msgstr "A verificação de assinatura falhou" - -msgid "Size of image file in bytes" -msgstr "Tamanho do arquivo da imagem em bytes " - -msgid "" -"Some resource types allow more than one key / value pair per instance. For " -"example, Cinder allows user and image metadata on volumes. Only the image " -"properties metadata is evaluated by Nova (scheduling or drivers). This " -"property allows a namespace target to remove the ambiguity." -msgstr "" -"Alguns tipos de recurso permitem mais de um par de chave/valor por " -"instância. Por exemplo, o Cinder permite metadados do usuário e da imagem " -"em volumes. Somente os metadados de propriedades da imagem são avaliados " -"pelo Nova (planejamento ou drivers). Essa propriedade permite que um destino " -"de namespace remova a ambiguidade." - -msgid "Sort direction supplied was not valid." -msgstr "A direção de classificação fornecida não era válida." - -msgid "Sort key supplied was not valid." -msgstr "A chave de classificação fornecida não era válida." - -msgid "" -"Specifies the prefix to use for the given resource type. Any properties in " -"the namespace should be prefixed with this prefix when being applied to the " -"specified resource type. Must include prefix separator (e.g. a colon :)." -msgstr "" -"Especifica o prefixo a ser usado para o tipo de recurso determinado. " -"Qualquer propriedade no namespace deve ter esse prefixo ao ser aplicada ao " -"tipo de recurso especificado. O separador de prefixo deve ser incluído (p. " -"ex., dois pontos :)." - -msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." -msgstr "O status deve ser \"pendente\", \"aceito\" ou \"rejeitado\"." - -msgid "Status not specified" -msgstr "Status não especificado" - -msgid "Status of the image" -msgstr "Status da imagem" - -#, python-format -msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"Status de transição de %(cur_status)s para %(new_status)s não é permitido" - -#, python-format -msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "Parando %(serv)s (pid %(pid)s) com sinal (%(sig)s)" - -#, python-format -msgid "Store for image_id not found: %s" -msgstr "Armazenamento de image_id não localizado: %s" - -#, python-format -msgid "Store for scheme %s not found" -msgstr "Armazenamento do esquema %s não localizado" - -#, python-format -msgid "" -"Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " -"(%(actual)s) did not match. Setting image status to 'killed'." -msgstr "" -"%(attr)s fornecido (%(supplied)s) e %(attr)s gerado da imagem transferida " -"por upload (%(actual)s) não corresponderam. Configurando o status da imagem " -"para 'eliminado'." - -msgid "Supported values for the 'container_format' image attribute" -msgstr "Valores suportados para o atributo de imagem 'container_format'" - -msgid "Supported values for the 'disk_format' image attribute" -msgstr "Valores suportados para o atributo de imagem 'disk_format'" - -#, python-format -msgid "Suppressed respawn as %(serv)s was %(rsn)s." -msgstr "Novo spawn suprimido já que %(serv)s era %(rsn)s." - -msgid "System SIGHUP signal received." -msgstr "Sinal SIGHUP do sistema recebido." - -#, python-format -msgid "Task '%s' is required" -msgstr "Tarefa '%s é obrigatória" - -msgid "Task does not exist" -msgstr "A tarefa não existe" - -msgid "Task failed due to Internal Error" -msgstr "A tarefa falhou devido a Erro interno" - -msgid "Task was not configured properly" -msgstr "A tarefa não foi configurada adequadamente" - -#, python-format -msgid "Task with the given id %(task_id)s was not found" -msgstr "Tarefa com o ID fornecido %(task_id)s não foi localizada" - -msgid "The \"changes-since\" filter is no longer available on v2." -msgstr "O filtro \" changes-since \" não está mais disponível na v2." - -#, python-format -msgid "The CA file you specified %s does not exist" -msgstr "O arquivo CA especificado %s não existe" - -#, python-format -msgid "" -"The Image %(image_id)s object being created by this task %(task_id)s, is no " -"longer in valid status for further processing." -msgstr "" -"O objeto da Imagem %(image_id)s que está sendo criado por esta tarefa " -"%(task_id)s não está mais no status válido para processamento adicional." - -msgid "The Store URI was malformed." -msgstr "O URI de Armazenamento foi malformado." - -msgid "" -"The URL to the keystone service. If \"use_user_token\" is not in effect and " -"using keystone auth, then URL of keystone can be specified." -msgstr "" -"A URL para o serviço do keystone. Se \"use_user_token\" não estiver em vigor " -"e utilizando uma autorização do keystone, então a URL do keystone pode ser " -"especificada." - -msgid "" -"The administrators password. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"A senha do administrador. Se \"use_user_token\" não estiver em vigor, então " -"as credenciais do administrador podem ser especificadas." - -msgid "" -"The administrators user name. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"O nome de usuário do administrador. Se \"use_user_token\" não estiver em " -"vigor, então as credenciais do administrador podem ser especificadas." - -#, python-format -msgid "The cert file you specified %s does not exist" -msgstr "O arquivo de certificado especificado %s não existe" - -msgid "The current status of this task" -msgstr "O status atual desta tarefa" - -#, python-format -msgid "" -"The device housing the image cache directory %(image_cache_dir)s does not " -"support xattr. It is likely you need to edit your fstab and add the " -"user_xattr option to the appropriate line for the device housing the cache " -"directory." -msgstr "" -"O dispositivo no qual reside o diretório de cache de imagem " -"%(image_cache_dir)s não suporta xattr. É provável que você precise editar " -"fstab e incluir a opção user_xattr na linha apropriada do dispositivo que " -"contém o diretório de cache." - -#, python-format -msgid "" -"The given uri is not valid. Please specify a valid uri from the following " -"list of supported uri %(supported)s" -msgstr "" -"O URI fornecido não é válido. Especifique um uri válido a partir da seguinte " -"lista de URI suportados %(supported)s" - -#, python-format -msgid "The incoming image is too large: %s" -msgstr "A imagem recebida é muito grande: %s" - -#, python-format -msgid "The key file you specified %s does not exist" -msgstr "O arquivo-chave especificado %s não existe" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image locations. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"O limite foi excedido no número de localizações de imagens permitidas. " -"Tentativa: %(attempted)s, Máximo: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image members for this " -"image. Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"O limite foi excedido no número de membros de imagem permitidos para esta " -"imagem. Tentativa: %(attempted)s, Máximo: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"O limite foi excedido no número de propriedades de imagem permitidas. " -"Tentativa: %(attempted)s, Máximo: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(num)s, Maximum: %(quota)s" -msgstr "" -"O limite foi excedido no número de propriedades de imagem permitidas. " -"Tentativa: %(num)s, Máximo: %(quota)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image tags. Attempted: " -"%(attempted)s, Maximum: %(maximum)s" -msgstr "" -"O limite foi excedido no número de tags de imagem permitidas. Tentativa: " -"%(attempted)s, Máximo: %(maximum)s" - -#, python-format -msgid "The location %(location)s already exists" -msgstr "O local %(location)s já existe" - -#, python-format -msgid "The location data has an invalid ID: %d" -msgstr "Os dados da localização têm um ID inválido: %d" - -#, python-format -msgid "" -"The metadata definition %(record_type)s with name=%(record_name)s not " -"deleted. Other records still refer to it." -msgstr "" -"Definição de metadados %(record_type)s com o nome=%(record_name)s não " -"excluída. Outros registros ainda se referem a ela." - -#, python-format -msgid "The metadata definition namespace=%(namespace_name)s already exists." -msgstr "O namespace de definição de metadados=%(namespace_name)s já existe." - -#, python-format -msgid "" -"The metadata definition object with name=%(object_name)s was not found in " -"namespace=%(namespace_name)s." -msgstr "" -"O objeto de definição de metadados com o nome=%(object_name)s não foi " -"localizado no namespace=%(namespace_name)s." - -#, python-format -msgid "" -"The metadata definition property with name=%(property_name)s was not found " -"in namespace=%(namespace_name)s." -msgstr "" -"A propriedade de definição de metadados com o nome=%(property_name)s não foi " -"localizada no namespace=%(namespace_name)s." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s already exists." -msgstr "" -"A associação do tipo de recurso de definição de metadados do tipo derecurso=" -"%(resource_type_name)s ao namespace=%(namespace_name)s já existe." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s, was not found." -msgstr "" -"A associação do tipo de recurso de definição de metadados do tipo derecurso=" -"%(resource_type_name)s ao namespace=%(namespace_name)s, não foi localizada." - -#, python-format -msgid "" -"The metadata definition resource-type with name=%(resource_type_name)s, was " -"not found." -msgstr "" -"O tipo de recurso de definição de metadados com o nome=" -"%(resource_type_name)s, não foi localizado." - -#, python-format -msgid "" -"The metadata definition tag with name=%(name)s was not found in namespace=" -"%(namespace_name)s." -msgstr "" -"A identificação da definição de metadados com o nome=%(name)s não foi " -"localizada no namespace=%(namespace_name)s." - -msgid "The parameters required by task, JSON blob" -msgstr "Os parâmetros requeridos pela tarefa, blob JSON" - -msgid "The provided image is too large." -msgstr "A imagem fornecida é muito grande." - -msgid "" -"The region for the authentication service. If \"use_user_token\" is not in " -"effect and using keystone auth, then region name can be specified." -msgstr "" -"A região para o serviço de autenticação. Se \"use_user_token\" não estiver " -"em vigor e utilizando a autorização do keystone, então o nome da região pode " -"ser especificado." - -msgid "The request returned 500 Internal Server Error." -msgstr "A solicitação retornou 500 Erro Interno do Servidor." - -msgid "" -"The request returned 503 Service Unavailable. This generally occurs on " -"service overload or other transient outage." -msgstr "" -"A solicitação retornou 503 Serviço Indisponível. Isso geralmente ocorre em " -"sobrecarga de serviço ou outra interrupção temporária." - -#, python-format -msgid "" -"The request returned a 302 Multiple Choices. This generally means that you " -"have not included a version indicator in a request URI.\n" -"\n" -"The body of response returned:\n" -"%(body)s" -msgstr "" -"A solicitação retornou 302 Várias Opções. Isso geralmente significa que você " -"não incluiu um indicador de versão em um URI de solicitação.\n" -"\n" -"O corpo da resposta retornou:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned a 413 Request Entity Too Large. This generally means " -"that rate limiting or a quota threshold was breached.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"A solicitação retornou 413 Entidade de Solicitação Muito Grande. Isso " -"geralmente significa que a taxa de limitação ou um limite de cota foi " -"violado.\n" -"\n" -"O corpo de resposta:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned an unexpected status: %(status)s.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"A solicitação retornou um status inesperado: %(status)s.\n" -"\n" -"O corpo de resposta:\n" -"%(body)s" - -msgid "" -"The requested image has been deactivated. Image data download is forbidden." -msgstr "" -"A imagem solicitada foi desativada. O download de dados da imagem é proibido." - -msgid "The result of current task, JSON blob" -msgstr "O resultado da tarefa atual, blob JSON" - -#, python-format -msgid "" -"The size of the data %(image_size)s will exceed the limit. %(remaining)s " -"bytes remaining." -msgstr "" -"O tamanho dos dados que %(image_size)s irá exceder do limite. %(remaining)s " -"bytes restantes." - -#, python-format -msgid "The specified member %s could not be found" -msgstr "O membro especificado %s não pôde ser localizado" - -#, python-format -msgid "The specified metadata object %s could not be found" -msgstr "O objeto de metadados especificado %s não pôde ser localizado" - -#, python-format -msgid "The specified metadata tag %s could not be found" -msgstr "A identificação de metadados especificada %s não pôde ser localizada" - -#, python-format -msgid "The specified namespace %s could not be found" -msgstr "O namespace especificado %s não pôde ser localizado" - -#, python-format -msgid "The specified property %s could not be found" -msgstr "A propriedade especificada %s não pôde ser localizada" - -#, python-format -msgid "The specified resource type %s could not be found " -msgstr "O tipo de recurso especificado %s não pôde ser localizado " - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'" -msgstr "" -"O status de local da imagem excluída só pode ser definido como " -"'pending_delete' ou 'deleted'" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'." -msgstr "" -"O status de local da imagem excluída só pode ser definido como " -"'pending_delete' ou 'deleted'." - -msgid "The status of this image member" -msgstr "O status desse membro da imagem" - -msgid "" -"The strategy to use for authentication. If \"use_user_token\" is not in " -"effect, then auth strategy can be specified." -msgstr "" -"A estratégia a ser utilizada para autenticação. Se \"use_user_token\" não " -"estiver em vigor, então a estratégia de autorização pode ser especificada." - -#, python-format -msgid "" -"The target member %(member_id)s is already associated with image " -"%(image_id)s." -msgstr "" -"O membro de destino %(member_id)s já está associado à imagem %(image_id)s." - -msgid "" -"The tenant name of the administrative user. If \"use_user_token\" is not in " -"effect, then admin tenant name can be specified." -msgstr "" -"O nome de locatário do usuário administrativo. Se \"use_user_token\" não " -"estiver em vigor, então o nome de locatário do administrador pode ser " -"especificado." - -msgid "The type of task represented by this content" -msgstr "O tipo de tarefa representada por este conteúdo" - -msgid "The unique namespace text." -msgstr "O texto do namespace exclusivo." - -msgid "The user friendly name for the namespace. Used by UI if available." -msgstr "" -"O nome fácil do namespace. Usando pela interface com o usuário, se " -"disponível." - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. Error: %(ioe)s" -msgstr "" -"Há um problema com o %(error_key_name)s %(error_filename)s. Verifique-o. " -"Erro: %(ioe)s" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. OpenSSL error: %(ce)s" -msgstr "" -"Há um problema com o %(error_key_name)s %(error_filename)s. Verifique-o. " -"Erro de OpenSSL: %(ce)s" - -#, python-format -msgid "" -"There is a problem with your key pair. Please verify that cert " -"%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" -msgstr "" -"Há um problema com seu par de chaves. Verifique se o certificado " -"%(cert_file)s e a chave %(key_file)s estão juntos. Erro de OpenSSL %(ce)s" - -msgid "There was an error configuring the client." -msgstr "Houve um erro ao configurar o cliente." - -msgid "There was an error connecting to a server" -msgstr "Houve um erro ao conectar a um servidor" - -msgid "" -"This operation is currently not permitted on Glance Tasks. They are auto " -"deleted after reaching the time based on their expires_at property." -msgstr "" -"Esta operação não é atualmente permitida em Tarefas do Glance. Elas são " -"automaticamente excluídas após atingir o tempo com base em sua propriedade " -"expires_at." - -msgid "This operation is currently not permitted on Glance images details." -msgstr "" -"Esta operação não é atualmente permitida em detalhes de imagens do Glance." - -msgid "" -"Time in hours for which a task lives after, either succeeding or failing" -msgstr "Tempo em horas durante o qual uma tarefa é mantida, com êxito ou falha" - -msgid "Too few arguments." -msgstr "Muito poucos argumentos." - -msgid "" -"URI cannot contain more than one occurrence of a scheme.If you have " -"specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " -"you need to change it to use the swift+http:// scheme, like so: swift+http://" -"user:pass@authurl.com/v1/container/obj" -msgstr "" -"URI não pode conter mais de uma ocorrência de um esquema. Se você tiver " -"especificado um URI como swift://user:pass@http://authurl.com/v1/container/" -"obj, precisará alterá-lo para usar o esquema swift+http://, desta forma: " -"swift+http://user:pass@authurl.com/v1/container/obj" - -msgid "URL to access the image file kept in external store" -msgstr "URL para acessar o arquivo de imagem mantido no armazenamento externo " - -#, python-format -msgid "" -"Unable to create pid file %(pid)s. Running as non-root?\n" -"Falling back to a temp file, you can stop %(service)s service using:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" -msgstr "" -"Impossível criar arquivo pid %(pid)s. Executando como não raiz?\n" -"Voltando para um arquivo temporário, é possível parar o serviço %(service)s " -"usando:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" - -#, python-format -msgid "Unable to filter by unknown operator '%s'." -msgstr "Não é possível filtrar por operador desconhecido '%s'." - -msgid "Unable to filter on a range with a non-numeric value." -msgstr "Não é possível filtrar um intervalo com um valor não numérico." - -msgid "Unable to filter on a unknown operator." -msgstr "Não é possível filtrar em um operador desconhecido." - -msgid "Unable to filter using the specified operator." -msgstr "Não é possível filtrar usando o operador especificado." - -msgid "Unable to filter using the specified range." -msgstr "Não é possível filtrar usando o intervalo especificado." - -#, python-format -msgid "Unable to find '%s' in JSON Schema change" -msgstr "Não é possível localizar '%s' na mudança de Esquema JSON" - -#, python-format -msgid "" -"Unable to find `op` in JSON Schema change. It must be one of the following: " -"%(available)s." -msgstr "" -"Não é possível localizar `op` na mudança de Esquema JSON. Deve ser um dos " -"seguintes: %(available)s." - -msgid "Unable to increase file descriptor limit. Running as non-root?" -msgstr "" -"Não é possível aumentar o limite do descritor de arquivo. Executando como " -"não-raiz?" - -#, python-format -msgid "" -"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" -"Got: %(e)r" -msgstr "" -"Não é possível carregar %(app_name)s do arquivo de configuração " -"%(conf_file)s.\n" -"Obtido: %(e)r" - -#, python-format -msgid "Unable to load schema: %(reason)s" -msgstr "Não é possível carregar o esquema: %(reason)s" - -#, python-format -msgid "Unable to locate paste config file for %s." -msgstr "Impossível localizar o arquivo de configuração de colagem para %s." - -#, python-format -msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" -msgstr "" -"Não é possível fazer upload de dados de imagem duplicados para a imagem " -"%(image_id)s: %(error)s" - -msgid "Unauthorized image access" -msgstr "Acesso à imagem desautorizado" - -msgid "Unexpected body type. Expected list/dict." -msgstr "Tipo de corpo inesperado. Lista/dicionário esperados." - -#, python-format -msgid "Unexpected response: %s" -msgstr "Resposta inesperada: %s" - -#, python-format -msgid "Unknown auth strategy '%s'" -msgstr "Estratégia de autenticação desconhecida %s'" - -#, python-format -msgid "Unknown command: %s" -msgstr "Comando desconhecido: %s" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Direção de classificação desconhecida; deve ser 'desc' ou 'asc'" - -msgid "Unrecognized JSON Schema draft version" -msgstr "Versão rascunho do Esquema JSON não reconhecida" - -msgid "Unrecognized changes-since value" -msgstr "Valor desde as alterações não reconhecido" - -#, python-format -msgid "Unsupported sort_dir. Acceptable values: %s" -msgstr "sort_dir não suportado. Valores aceitáveis: %s" - -#, python-format -msgid "Unsupported sort_key. Acceptable values: %s" -msgstr "sort_key não suportado. Valores aceitáveis: %s" - -msgid "Virtual size of image in bytes" -msgstr "Tamanho virtual de imagem em bytes " - -#, python-format -msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" -msgstr "" -"Esperou 15 segundos para pid %(pid)s (%(file)s) ser eliminado; desistindo" - -msgid "" -"When running server in SSL mode, you must specify both a cert_file and " -"key_file option value in your configuration file" -msgstr "" -"Ao executar o servidor no modo SSL, você deve especificar um valor de opção " -"cert_file e key_file no seu arquivo de configuração" - -msgid "" -"Whether to pass through the user token when making requests to the registry. " -"To prevent failures with token expiration during big files upload, it is " -"recommended to set this parameter to False.If \"use_user_token\" is not in " -"effect, then admin credentials can be specified." -msgstr "" -"Se passar pelo token do usuário ao fazer solicitações ao registro. Para " -"evitar falhas com expiração de token durante o upload de arquivos grandes, é " -"recomendável configurar esse parâmetro como False. Se \"use_user_token\" não " -"estiver em vigor, as credenciais do administrador poderão ser especificadas." - -#, python-format -msgid "Wrong command structure: %s" -msgstr "Estrutura de comandos incorreta: %s" - -msgid "You are not authenticated." -msgstr "Você não está autenticado." - -msgid "You are not authorized to complete this action." -msgstr "Você não está autorizado a concluir esta ação." - -#, python-format -msgid "You are not authorized to lookup image %s." -msgstr "Você não está autorizado a consultar a imagem %s." - -#, python-format -msgid "You are not authorized to lookup the members of the image %s." -msgstr "Você não está autorizado a consultar os membros da imagem %s." - -#, python-format -msgid "You are not permitted to create a tag in the namespace owned by '%s'" -msgstr "" -"Você não tem permissão para criar uma identificação no namespace de " -"propriedade de '%s'" - -msgid "You are not permitted to create image members for the image." -msgstr "Você não tem permissão para criar membros da imagem." - -#, python-format -msgid "You are not permitted to create images owned by '%s'." -msgstr "Você não tem permissão para criar imagens de propriedade de '%s'." - -#, python-format -msgid "You are not permitted to create namespace owned by '%s'" -msgstr "Você não tem permissão para criar namespace de propriedade de '%s'" - -#, python-format -msgid "You are not permitted to create object owned by '%s'" -msgstr "Você não tem permissão para criar objeto de propriedade de '%s'" - -#, python-format -msgid "You are not permitted to create property owned by '%s'" -msgstr "" -"Você não tem permissão para criar essa propriedade de propriedade de '%s'" - -#, python-format -msgid "You are not permitted to create resource_type owned by '%s'" -msgstr "Você não tem permissão para criar resource_type de propriedade de '%s'" - -#, python-format -msgid "You are not permitted to create this task with owner as: %s" -msgstr "" -"Você não tem permissão para criar essa tarefa com proprietário como: %s" - -msgid "You are not permitted to deactivate this image." -msgstr "Você não tem permissão para desativar esta imagem." - -msgid "You are not permitted to delete this image." -msgstr "Você não tem permissão para excluir esta imagem." - -msgid "You are not permitted to delete this meta_resource_type." -msgstr "Você não tem permissão para excluir esse meta_resource_type." - -msgid "You are not permitted to delete this namespace." -msgstr "Você não tem permissão para excluir esse namespace." - -msgid "You are not permitted to delete this object." -msgstr "Você não tem permissão para excluir esse objeto." - -msgid "You are not permitted to delete this property." -msgstr "Você não tem permissão para excluir essa propriedade." - -msgid "You are not permitted to delete this tag." -msgstr "Você não tem permissão para excluir esta identificação." - -#, python-format -msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." -msgstr "Você não ter permissão para modificar '%(attr)s' nesse %(resource)s." - -#, python-format -msgid "You are not permitted to modify '%s' on this image." -msgstr "Você não tem permissão para modificar '%s' nesta imagem." - -msgid "You are not permitted to modify locations for this image." -msgstr "Você não tem permissão para modificar locais para esta imagem." - -msgid "You are not permitted to modify tags on this image." -msgstr "Você não tem permissão para modificar tags nesta imagem." - -msgid "You are not permitted to modify this image." -msgstr "Você não tem permissão para modificar esta imagem." - -msgid "You are not permitted to reactivate this image." -msgstr "Você não tem permissão para reativar essa imagem." - -msgid "You are not permitted to set status on this task." -msgstr "Você não tem permissão para definir o status dessa tarefa." - -msgid "You are not permitted to update this namespace." -msgstr "Você não tem permissão para editar esse namespace." - -msgid "You are not permitted to update this object." -msgstr "Você não tem permissão para atualizar esse objeto." - -msgid "You are not permitted to update this property." -msgstr "Você não tem permissão para atualizar essa propriedade." - -msgid "You are not permitted to update this tag." -msgstr "Você não tem permissão para atualizar esta identificação." - -msgid "You are not permitted to upload data for this image." -msgstr "Você não tem permissão para fazer upload de dados para esta imagem." - -#, python-format -msgid "You cannot add image member for %s" -msgstr "Não é possível incluir o membro da imagem para %s" - -#, python-format -msgid "You cannot delete image member for %s" -msgstr "Não é possível excluir o membro da imagem para %s" - -#, python-format -msgid "You cannot get image member for %s" -msgstr "Não é possível obter o membro da imagem para %s" - -#, python-format -msgid "You cannot update image member %s" -msgstr "Não é possível atualizar o membro da imagem %s" - -msgid "You do not own this image" -msgstr "Você não possui essa imagem" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a cert, " -"however you have failed to supply either a key_file parameter or set the " -"GLANCE_CLIENT_KEY_FILE environ variable" -msgstr "" -"Você optou por usar SSL na conexão e forneceu um certificado, mas falhou em " -"fornecer um parâmetro key_file ou configurar a variável de ambiente " -"GLANCE_CLIENT_KEY_FILE" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a key, " -"however you have failed to supply either a cert_file parameter or set the " -"GLANCE_CLIENT_CERT_FILE environ variable" -msgstr "" -"Você optou por usar SSL na conexão e forneceu uma chave, mas falhou em " -"fornecer um parâmetro cert_file ou configurar a variável de ambiente " -"GLANCE_CLIENT_CERT_FILE" - -msgid "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" -msgstr "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" - -#, python-format -msgid "__init__() got unexpected keyword argument '%s'" -msgstr "__init__() obteve argumento de palavra-chave inesperado '%s'" - -#, python-format -msgid "" -"cannot transition from %(current)s to %(next)s in update (wanted from_state=" -"%(from)s)" -msgstr "" -"Não é possível a transição de %(current)s para %(next)s na atualização " -"(desejado from_state=%(from)s)" - -#, python-format -msgid "custom properties (%(props)s) conflict with base properties" -msgstr "" -"conflito de propriedades customizadas (%(props)s) com propriedades de base" - -msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" -msgstr "" -"nem o hub 'poll' nem o 'selects' do eventlet estão disponíveis nesta " -"plataforma" - -msgid "is_public must be None, True, or False" -msgstr "is_public deve ser Nenhum, True ou False" - -msgid "limit param must be an integer" -msgstr "o parâmetro limit deve ser um número inteiro" - -msgid "limit param must be positive" -msgstr "o parâmetro limit deve ser positivo" - -msgid "md5 hash of image contents." -msgstr "Hash md5 do conteúdo da imagem." - -#, python-format -msgid "new_image() got unexpected keywords %s" -msgstr "new_image() obteve palavras-chave inesperadas %s" - -msgid "protected must be True, or False" -msgstr "protegido deve ser True, ou False" - -#, python-format -msgid "unable to launch %(serv)s. Got error: %(e)s" -msgstr "Não é possível ativar %(serv)s. Obteve erro: %(e)s" - -#, python-format -msgid "x-openstack-request-id is too long, max size %s" -msgstr "x-openstack-request-id é muito longo; tamanho máximo %s" diff --git a/glance/locale/ru/LC_MESSAGES/glance.po b/glance/locale/ru/LC_MESSAGES/glance.po deleted file mode 100644 index f9a37c3f..00000000 --- a/glance/locale/ru/LC_MESSAGES/glance.po +++ /dev/null @@ -1,2091 +0,0 @@ -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: glance 15.0.0.0b3.dev29\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-06-23 20:54+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 05:21+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language: ru\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" -"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: Russian\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "%(cls)s exception was raised in the last rpc call: %(val)s" -msgstr "" -"В последнем вызове rpc возникла исключительная ситуация %(cls)s: %(val)s" - -#, python-format -msgid "%(m_id)s not found in the member list of the image %(i_id)s." -msgstr "%(m_id)s не найден в списке элементов образа %(i_id)s." - -#, python-format -msgid "%(serv)s (pid %(pid)s) is running..." -msgstr "%(serv)s (pid %(pid)s) работает..." - -#, python-format -msgid "%(serv)s appears to already be running: %(pid)s" -msgstr "%(serv)s уже запущен: %(pid)s" - -#, python-format -msgid "" -"%(strategy)s is registered as a module twice. %(module)s is not being used." -msgstr "" -"%(strategy)s зарегистрирована как модуль дважды. %(module)s не используется." - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Could not load the " -"filesystem store" -msgstr "" -"Служба %(task_id)s типа %(task_type)s настроена неправильно. Не удалось " -"загрузить хранилище в файловой системе" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Missing work dir: " -"%(work_dir)s" -msgstr "" -"Служба %(task_id)s типа %(task_type)s настроена неправильно. Отсутствует " -"рабочий каталог: %(work_dir)s" - -#, python-format -msgid "%(verb)sing %(serv)s" -msgstr "%(verb)s на %(serv)s" - -#, python-format -msgid "%(verb)sing %(serv)s with %(conf)s" -msgstr "%(verb)s %(serv)s с %(conf)s" - -#, python-format -msgid "" -"%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " -"address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " -"separately from the port (i.e., \"[fe80::a:b:c]:9876\")." -msgstr "" -"%s Укажите пару host:port, где host - это адрес IPv4, адрес IPv6, имя хоста " -"или FQDN. При указании адреса IPv6 заключите его в квадратные скобки " -"отдельно от порта (например, \"[fe80::a:b:c]:9876\")." - -#, python-format -msgid "%s can't contain 4 byte unicode characters." -msgstr "%s не может содержать символы в кодировке 4-байтового unicode." - -#, python-format -msgid "%s is already stopped" -msgstr "%s уже остановлен" - -#, python-format -msgid "%s is stopped" -msgstr "%s остановлен" - -msgid "" -"--os_auth_url option or OS_AUTH_URL environment variable required when " -"keystone authentication strategy is enabled\n" -msgstr "" -"Опция --os_auth_url или переменная среды OS_AUTH_URL требуется, если " -"включена стратегия идентификации Keystone\n" - -msgid "A body is not expected with this request." -msgstr "В этом запросе не должно быть тела." - -#, python-format -msgid "" -"A metadata definition object with name=%(object_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Объект определения метаданных с именем %(object_name)s уже существует в " -"пространстве имен %(namespace_name)s." - -#, python-format -msgid "" -"A metadata definition property with name=%(property_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Свойство определения метаданных с именем %(property_name)s уже существует в " -"пространстве имен %(namespace_name)s." - -#, python-format -msgid "" -"A metadata definition resource-type with name=%(resource_type_name)s already " -"exists." -msgstr "" -"Тип ресурса определения метаданных с именем %(resource_type_name)s уже " -"существует." - -msgid "A set of URLs to access the image file kept in external store" -msgstr "" -"Набор URL для доступа к файлу образа, находящемуся во внешнем хранилище" - -msgid "Amount of disk space (in GB) required to boot image." -msgstr "Объем дисковой памяти (в ГБ), необходимой для загрузки образа." - -msgid "Amount of ram (in MB) required to boot image." -msgstr "Объем оперативной памяти (в МБ), необходимой для загрузки образа." - -msgid "An identifier for the image" -msgstr "Идентификатор образа" - -msgid "An identifier for the image member (tenantId)" -msgstr "Идентификатор участника образа (tenantId)" - -msgid "An identifier for the owner of this task" -msgstr "Идентификатор владельца задачи" - -msgid "An identifier for the task" -msgstr "Идентификатор задачи" - -msgid "An image file url" -msgstr "url файла образа" - -msgid "An image schema url" -msgstr "url схемы образа" - -msgid "An image self url" -msgstr "Собственный url образа" - -#, python-format -msgid "An image with identifier %s already exists" -msgstr "Образ с идентификатором %s уже существует" - -msgid "An import task exception occurred" -msgstr "Исключительная ситуация в задаче импорта" - -msgid "An object with the same identifier already exists." -msgstr "Объект с таким идентификатором уже существует." - -msgid "An object with the same identifier is currently being operated on." -msgstr "Объект с таким идентификатором занят в текущей операции." - -msgid "An object with the specified identifier was not found." -msgstr "Объект с указанным идентификатором не найден." - -msgid "An unknown exception occurred" -msgstr "Возникла неизвестная исключительная ситуация" - -msgid "An unknown task exception occurred" -msgstr "Непредвиденная исключительная ситуация" - -#, python-format -msgid "Attempt to upload duplicate image: %s" -msgstr "Попытка загрузить дубликат образа: %s" - -msgid "Attempted to update Location field for an image not in queued status." -msgstr "" -"Предпринята попытка обновить поле Расположение для образа, не находящегося в " -"очереди." - -#, python-format -msgid "Attribute '%(property)s' is read-only." -msgstr "Атрибут '%(property)s' предназначен только для чтения." - -#, python-format -msgid "Attribute '%(property)s' is reserved." -msgstr "Атрибут '%(property)s' зарезервирован." - -#, python-format -msgid "Attribute '%s' is read-only." -msgstr "Атрибут '%s' предназначен только для чтения." - -#, python-format -msgid "Attribute '%s' is reserved." -msgstr "Атрибут '%s' зарезервирован." - -msgid "Attribute container_format can be only replaced for a queued image." -msgstr "" -"container_format атрибута может быть заменен только для образа, находящегося " -"в очереди." - -msgid "Attribute disk_format can be only replaced for a queued image." -msgstr "" -"disk_format атрибута может быть заменен только для образа, находящегося в " -"очереди." - -#, python-format -msgid "Auth service at URL %(url)s not found." -msgstr "Служба идентификации с URL %(url)s не найдена." - -#, python-format -msgid "" -"Authentication error - the token may have expired during file upload. " -"Deleting image data for %s." -msgstr "" -"Ошибка идентификации. Возможно, время действия маркера истекло во время " -"загрузки файла. Данные образа для %s будут удалены." - -msgid "Authorization failed." -msgstr "Доступ не предоставлен." - -msgid "Available categories:" -msgstr "Доступные категории:" - -#, python-format -msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." -msgstr "" -"Недопустимый формат фильтра запроса \"%s\". Используйте нотацию DateTime ISO " -"8601." - -#, python-format -msgid "Bad Command: %s" -msgstr "Неправильная команда: %s" - -#, python-format -msgid "Bad header: %(header_name)s" -msgstr "Неправильный заголовок: %(header_name)s" - -#, python-format -msgid "Bad value passed to filter %(filter)s got %(val)s" -msgstr "Фильтру %(filter)s передано неверное значение, получено %(val)s" - -#, python-format -msgid "Badly formed S3 URI: %(uri)s" -msgstr "Неправильно сформированный URI S3: %(uri)s" - -#, python-format -msgid "Badly formed credentials '%(creds)s' in Swift URI" -msgstr "" -"Неправильно сформированные идентификационные данные '%(creds)s' в URI Swift" - -msgid "Badly formed credentials in Swift URI." -msgstr "Неправильно сформированные идентификационные данные в URI Swift." - -msgid "Body expected in request." -msgstr "В запросе ожидалось тело." - -msgid "Cannot be a negative value" -msgstr "Значение не может быть отрицательным" - -msgid "Cannot be a negative value." -msgstr "Не может быть отрицательным значением." - -#, python-format -msgid "Cannot convert image %(key)s '%(value)s' to an integer." -msgstr "Не удается преобразовать %(key)s '%(value)s' в целое число." - -msgid "Cannot remove last location in the image." -msgstr "Нельзя удалять последнее расположение из образа." - -#, python-format -msgid "Cannot save data for image %(image_id)s: %(error)s" -msgstr "Не удается сохранить данные для образа %(image_id)s: %(error)s" - -msgid "Cannot set locations to empty list." -msgstr "Список расположений не может быть пустым." - -msgid "Cannot upload to an unqueued image" -msgstr "Невозможно загрузить в образ, не находящийся в очереди" - -#, python-format -msgid "Checksum verification failed. Aborted caching of image '%s'." -msgstr "" -"Проверка контрольной суммой не выполнена. Кэширование образа '%s' прервано." - -msgid "Client disconnected before sending all data to backend" -msgstr "Клиент отключился, отправив не все данные в базовую систему" - -msgid "Command not found" -msgstr "Команда не найдена" - -msgid "Configuration option was not valid" -msgstr "Недопустимая опция конфигурации" - -#, python-format -msgid "Connect error/bad request to Auth service at URL %(url)s." -msgstr "" -"Ошибка соединения или неправильный запрос к службе идентификации с URL " -"%(url)s." - -#, python-format -msgid "Constructed URL: %s" -msgstr "Сформированный URL: %s" - -msgid "Container format is not specified." -msgstr "Не указан формат контейнера." - -msgid "Content-Type must be application/octet-stream" -msgstr "Content-Type должен быть задан в формате приложение/октет-поток" - -#, python-format -msgid "Corrupt image download for image %(image_id)s" -msgstr "Образ %(image_id)s скачан поврежденным" - -#, python-format -msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" -msgstr "" -"Не удалось выполнить связывание с %(host)s:%(port)s в течение 30 секунд" - -msgid "Could not find OVF file in OVA archive file." -msgstr "Не найден файл OVF в файле архива OVA." - -#, python-format -msgid "Could not find metadata object %s" -msgstr "Не найден объект метаданных %s" - -#, python-format -msgid "Could not find metadata tag %s" -msgstr "Не удалось найти тег метаданных %s" - -#, python-format -msgid "Could not find namespace %s" -msgstr "Не найдено пространство имен %s" - -#, python-format -msgid "Could not find property %s" -msgstr "Не найдено свойство %s" - -msgid "Could not find required configuration option" -msgstr "Обязательная опция конфигурации не найдена" - -#, python-format -msgid "Could not find task %s" -msgstr "Задача %s не найдена" - -#, python-format -msgid "Could not update image: %s" -msgstr "Не удалось изменить образ: %s" - -msgid "Currently, OVA packages containing multiple disk are not supported." -msgstr "В настоящее время пакеты OVA с несколькими дисками не поддерживаются." - -#, python-format -msgid "Data for image_id not found: %s" -msgstr "Не найдены данные для image_id: %s" - -msgid "Data supplied was not valid." -msgstr "Предоставленные данные недопустимы." - -msgid "Date and time of image member creation" -msgstr "Дата и время создания участника образа" - -msgid "Date and time of image registration" -msgstr "Дата и время регистрации образа" - -msgid "Date and time of last modification of image member" -msgstr "Дата и время последней модификации участника образа" - -msgid "Date and time of namespace creation" -msgstr "Дата и время создания пространства имен" - -msgid "Date and time of object creation" -msgstr "Дата и время создания объекта" - -msgid "Date and time of resource type association" -msgstr "Дата и время связывания типа ресурса" - -msgid "Date and time of tag creation" -msgstr "Дата и время создания тега" - -msgid "Date and time of the last image modification" -msgstr "Дата и время последнего изменения образа" - -msgid "Date and time of the last namespace modification" -msgstr "Дата и время последнего изменения пространства имен" - -msgid "Date and time of the last object modification" -msgstr "Дата и время последнего изменения объекта" - -msgid "Date and time of the last resource type association modification" -msgstr "Дата и время последнего изменения связи типа ресурса" - -msgid "Date and time of the last tag modification" -msgstr "Дата и время последнего изменения тега" - -msgid "Datetime when this resource was created" -msgstr "Дата и время создания ресурса" - -msgid "Datetime when this resource was updated" -msgstr "Дата и время обновления ресурса" - -msgid "Datetime when this resource would be subject to removal" -msgstr "Дата и время планового удаления ресурса" - -#, python-format -msgid "Denying attempt to upload image because it exceeds the quota: %s" -msgstr "Попытка загрузить образ с превышением квоты отклонена: %s" - -#, python-format -msgid "Denying attempt to upload image larger than %d bytes." -msgstr "Попытка загрузить образ размером более %d байт отклонена." - -msgid "Descriptive name for the image" -msgstr "Описательное имя образа" - -msgid "Disk format is not specified." -msgstr "Не указан формат диска." - -#, python-format -msgid "" -"Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" -msgstr "" -"Драйвер %(driver_name)s не удалось правильно настроить. Причина: %(reason)s" - -msgid "" -"Error decoding your request. Either the URL or the request body contained " -"characters that could not be decoded by Glance" -msgstr "" -"Ошибка при декодировании запроса. URL или тело запроса содержат символы, " -"которые Glance не способен декодировать" - -#, python-format -msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" -msgstr "Ошибка при выборке элементов образа %(image_id)s: %(inner_msg)s" - -msgid "Error in store configuration. Adding images to store is disabled." -msgstr "" -"Ошибка в конфигурации хранилища. Добавление образов в хранилище отключено." - -msgid "Expected a member in the form: {\"member\": \"image_id\"}" -msgstr "Элемент должен быть задан в формате: {\"member\": \"image_id\"}" - -msgid "Expected a status in the form: {\"status\": \"status\"}" -msgstr "Состояние должно быть указано в формате: {\"status\": \"status\"}" - -msgid "External source should not be empty" -msgstr "Внешний источник не должен быть пустым" - -#, python-format -msgid "External sources are not supported: '%s'" -msgstr "Внешние ресурсы не поддерживаются: %s" - -#, python-format -msgid "Failed to activate image. Got error: %s" -msgstr "Активировать образ не удалось. Ошибка: %s" - -#, python-format -msgid "Failed to add image metadata. Got error: %s" -msgstr "Добавить метаданные образа не удалось. Ошибка: %s" - -#, python-format -msgid "Failed to find image %(image_id)s to delete" -msgstr "Найти образ для удаления %(image_id)s не удалось" - -#, python-format -msgid "Failed to find image to delete: %s" -msgstr "Найти образ для удаления не удалось: %s" - -#, python-format -msgid "Failed to find image to update: %s" -msgstr "Найти образ для обновления не удалось: %s" - -#, python-format -msgid "Failed to find resource type %(resourcetype)s to delete" -msgstr "Не удалось найти тип ресурса %(resourcetype)s для удаления" - -#, python-format -msgid "Failed to initialize the image cache database. Got error: %s" -msgstr "Инициализировать базу данных кэша образов не удалось. Ошибка: %s" - -#, python-format -msgid "Failed to read %s from config" -msgstr "Прочесть %s из конфигурации не удалось" - -#, python-format -msgid "Failed to reserve image. Got error: %s" -msgstr "Зарезервировать образ не удалось. Ошибка: %s" - -#, python-format -msgid "Failed to update image metadata. Got error: %s" -msgstr "Обновить метаданные образа не удалось. Ошибка: %s" - -#, python-format -msgid "Failed to upload image %s" -msgstr "Загрузить образ %s не удалось" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to HTTP error: " -"%(error)s" -msgstr "" -"Загрузить данные образа %(image_id)s не удалось из-за ошибки HTTP: %(error)s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to internal error: " -"%(error)s" -msgstr "" -"Загрузить данные образа %(image_id)s не удалось из-за внутренней ошибки: " -"%(error)s" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"Файл %(path)s содержит недопустимый базовый файл %(bfile)s, принудительное " -"завершение." - -msgid "" -"File based imports are not allowed. Please use a non-local source of image " -"data." -msgstr "" -"Импорты на основе файлов не разрешены. Используйте нелокальный источник " -"данных образа." - -msgid "Forbidden image access" -msgstr "Доступ к образу запрещен" - -#, python-format -msgid "Forbidden to delete a %s image." -msgstr "Удалять образ %s запрещено." - -#, python-format -msgid "Forbidden to delete image: %s" -msgstr "Удалять образ запрещено: %s" - -#, python-format -msgid "Forbidden to modify '%(key)s' of %(status)s image." -msgstr "Запрещено изменять '%(key)s' образа %(status)s." - -#, python-format -msgid "Forbidden to modify '%s' of image." -msgstr "Изменять '%s' образа запрещено." - -msgid "Forbidden to reserve image." -msgstr "Резервировать образ запрещено." - -msgid "Forbidden to update deleted image." -msgstr "Обновлять удаленный образ запрещено." - -#, python-format -msgid "Forbidden to update image: %s" -msgstr "Обновлять образ запрещено: %s" - -#, python-format -msgid "Forbidden upload attempt: %s" -msgstr "Запрещенная попытка загрузки: %s" - -#, python-format -msgid "Forbidding request, metadata definition namespace=%s is not visible." -msgstr "" -"Запрещенный запрос: пространство имен %s определения метаданных невидимое." - -#, python-format -msgid "Forbidding request, task %s is not visible" -msgstr "Запрос запрещается, задача %s невидима" - -msgid "Format of the container" -msgstr "Формат контейнера" - -msgid "Format of the disk" -msgstr "Формат диска" - -#, python-format -msgid "Host \"%s\" is not valid." -msgstr "Хост \"%s\" недопустим." - -#, python-format -msgid "Host and port \"%s\" is not valid." -msgstr "Хост и порт \"%s\" недопустимы." - -msgid "" -"Human-readable informative message only included when appropriate (usually " -"on failure)" -msgstr "" -"Информационное сообщение для пользователя добавляется только в " -"соответствующих случаях (обычно в случае ошибки)" - -msgid "If true, image will not be deletable." -msgstr "Если значение равно true, то образ нельзя будет удалить." - -msgid "If true, namespace will not be deletable." -msgstr "Если true, пространство имен будет неудаляемым." - -#, python-format -msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" -msgstr "Не удается удалить образ %(id)s, так как он используется: %(exc)s" - -#, python-format -msgid "Image %(id)s not found" -msgstr "Образ %(id)s не найден" - -#, python-format -msgid "" -"Image %(image_id)s could not be found after upload. The image may have been " -"deleted during the upload: %(error)s" -msgstr "" -"Образ %(image_id)s не найден после загрузки. Возможно, он удален во время " -"загрузки: %(error)s" - -#, python-format -msgid "Image %(image_id)s is protected and cannot be deleted." -msgstr "Образ %(image_id)s защищен и не может быть удален." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload, cleaning up the chunks uploaded." -msgstr "" -"Образ %s не найден после загрузки. Возможно, он был удален во время " -"передачи, выполняется очистка переданных фрагментов." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload." -msgstr "" -"Образ %s не найден после загрузки. Возможно, он удален во время загрузки." - -#, python-format -msgid "Image %s is deactivated" -msgstr "Образ %s деактивирован" - -#, python-format -msgid "Image %s is not active" -msgstr "Образ %s неактивен" - -#, python-format -msgid "Image %s not found." -msgstr "Образ %s не найден." - -#, python-format -msgid "Image exceeds the storage quota: %s" -msgstr "Размер образа превышает квоту хранилища: %s" - -msgid "Image id is required." -msgstr "Требуется ИД образа." - -msgid "Image is protected" -msgstr "Образ защищен" - -#, python-format -msgid "Image member limit exceeded for image %(id)s: %(e)s:" -msgstr "" -"Превышено предельно допустимое число участников для образа %(id)s: %(e)s:" - -#, python-format -msgid "Image name too long: %d" -msgstr "Имя образа слишком длинное: %d" - -msgid "Image operation conflicts" -msgstr "Конфликт операций с образом" - -#, python-format -msgid "" -"Image status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"Изменять состояние %(cur_status)s образа на %(new_status)s не разрешается" - -#, python-format -msgid "Image storage media is full: %s" -msgstr "Носитель образов переполнен: %s" - -#, python-format -msgid "Image tag limit exceeded for image %(id)s: %(e)s:" -msgstr "Превышено предельно допустимое число тегов для образа %(id)s: %(e)s:" - -#, python-format -msgid "Image upload problem: %s" -msgstr "Неполадка при передаче образа: %s" - -#, python-format -msgid "Image with identifier %s already exists!" -msgstr "Образ с идентификатором %s уже существует!" - -#, python-format -msgid "Image with identifier %s has been deleted." -msgstr "Образ с идентификатором %s удален." - -#, python-format -msgid "Image with identifier %s not found" -msgstr "Образ с идентификатором %s не найден" - -#, python-format -msgid "Image with the given id %(image_id)s was not found" -msgstr "Не найден образ с заданным ИД %(image_id)s" - -#, python-format -msgid "" -"Incorrect auth strategy, expected \"%(expected)s\" but received " -"\"%(received)s\"" -msgstr "" -"Неправильная стратегия идентификации, ожидалось \"%(expected)s\", но " -"получено \"%(received)s\"" - -#, python-format -msgid "Incorrect request: %s" -msgstr "Неправильный запрос: %s" - -#, python-format -msgid "Input does not contain '%(key)s' field" -msgstr "Ввод не содержит поле %(key)s" - -#, python-format -msgid "Insufficient permissions on image storage media: %s" -msgstr "Недостаточные права для доступа к носителю образов: %s" - -#, python-format -msgid "Invalid JSON pointer for this resource: '/%s'" -msgstr "Недопустимый указатель JSON для этого ресурса: '%s'" - -#, python-format -msgid "Invalid checksum '%s': can't exceed 32 characters" -msgstr "" -"Недопустимая контрольная сумма '%s': длина не может превышать 32 символа" - -msgid "Invalid configuration in glance-swift conf file." -msgstr "Недопустимая конфигурация в файле конфигурации glance-swift." - -msgid "Invalid configuration in property protection file." -msgstr "Недопустимая конфигурация в файле защиты свойств." - -#, python-format -msgid "Invalid container format '%s' for image." -msgstr "Неверный формат контейнера '%s' для образа." - -#, python-format -msgid "Invalid content type %(content_type)s" -msgstr "Недопустимый тип содержимого: %(content_type)s" - -#, python-format -msgid "Invalid disk format '%s' for image." -msgstr "Неверный формат диска '%s' для образа." - -#, python-format -msgid "Invalid filter value %s. The quote is not closed." -msgstr "Недопустимое значение фильтра %s. Нет закрывающей кавычки." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma after closing quotation mark." -msgstr "" -"Недопустимое значение фильтра %s. Нет запятой после закрывающей кавычки." - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma before opening quotation mark." -msgstr "" -"Недопустимое значение фильтра %s. Нет запятой перед открывающей кавычкой." - -msgid "Invalid image id format" -msgstr "Недопустимый формат ИД образа" - -msgid "Invalid location" -msgstr "Недопустимое расположение" - -#, python-format -msgid "Invalid location %s" -msgstr "Неверное расположение %s" - -#, python-format -msgid "Invalid location: %s" -msgstr "Недопустимое расположение: %s" - -#, python-format -msgid "" -"Invalid location_strategy option: %(name)s. The valid strategy option(s) " -"is(are): %(strategies)s" -msgstr "" -"Неверный параметр location_strategy: %(name)s. Верные параметры стратегии: " -"%(strategies)s" - -msgid "Invalid locations" -msgstr "Недопустимые расположения" - -#, python-format -msgid "Invalid locations: %s" -msgstr "Недопустимые расположения: %s" - -msgid "Invalid marker format" -msgstr "Недопустимый формат маркера" - -msgid "Invalid marker. Image could not be found." -msgstr "Недопустимый маркер. Образ не найден." - -#, python-format -msgid "Invalid membership association: %s" -msgstr "Недопустимая ассоциация членства: %s" - -msgid "" -"Invalid mix of disk and container formats. When setting a disk or container " -"format to one of 'aki', 'ari', or 'ami', the container and disk formats must " -"match." -msgstr "" -"Недопустимое сочетание форматов диска и контейнера. При задании формата " -"диска или контейнера равным 'aki', 'ari' или 'ami' форматы контейнера и " -"диска должны совпадать." - -#, python-format -msgid "" -"Invalid operation: `%(op)s`. It must be one of the following: %(available)s." -msgstr "" -"Недопустимая операция: `%(op)s`. Допускается одна из следующих операций: " -"%(available)s." - -msgid "Invalid position for adding a location." -msgstr "Недопустимая позиция для добавления расположения." - -msgid "Invalid position for removing a location." -msgstr "Недопустимая позиция для удаления расположения." - -msgid "Invalid service catalog json." -msgstr "Недопустимый json каталога службы." - -#, python-format -msgid "Invalid sort direction: %s" -msgstr "Недопустимое направление сортировки: %s" - -#, python-format -msgid "" -"Invalid sort key: %(sort_key)s. It must be one of the following: " -"%(available)s." -msgstr "" -"Недопустимый ключ сортировки %(sort_key)s. Допускается один из следующих " -"ключей: %(available)s." - -#, python-format -msgid "Invalid status value: %s" -msgstr "Недопустимое значение состояния: %s" - -#, python-format -msgid "Invalid status: %s" -msgstr "Недопустимое состояние: %s" - -#, python-format -msgid "Invalid time format for %s." -msgstr "Недопустимый формат времени для %s." - -#, python-format -msgid "Invalid type value: %s" -msgstr "Недопустимое значение типа: %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition namespace " -"with the same name of %s" -msgstr "" -"Недопустимое обновление. Оно создает пространство имен определения " -"метаданных с таким же именем, как у пространства имен %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Недопустимое обновление. Оно создает объект определения метаданных с таким " -"же именем, как у объекта %(name)s в пространстве имен %(namespace_name)s." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Недопустимое обновление. Оно создает объект определения метаданных с таким " -"же именем, как у объекта %(name)s в пространстве имен %(namespace_name)s." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition property " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Недопустимое обновление. Оно создает пространство имен определения " -"метаданных с таким же именем, как у свойства %(name)s в пространстве имен " -"%(namespace_name)s." - -#, python-format -msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" -msgstr "Неверное значение '%(value)s' параметра '%(param)s': %(extra_msg)s" - -#, python-format -msgid "Invalid value for option %(option)s: %(value)s" -msgstr "Недопустимое значение для опции %(option)s: %(value)s" - -#, python-format -msgid "Invalid visibility value: %s" -msgstr "Недопустимое значение видимости: %s" - -msgid "It's invalid to provide multiple image sources." -msgstr "Указывать несколько источников образов нельзя." - -msgid "It's not allowed to add locations if locations are invisible." -msgstr "Не разрешено добавлять расположения, если они невидимы." - -msgid "It's not allowed to remove locations if locations are invisible." -msgstr "Не разрешено удалять расположения, если они невидимы." - -msgid "It's not allowed to update locations if locations are invisible." -msgstr "Не разрешено обновлять расположения, если они невидимы." - -msgid "List of strings related to the image" -msgstr "Список строк, относящихся к образу" - -msgid "Malformed JSON in request body." -msgstr "Неправильно сформированный JSON в теле запроса." - -msgid "Maximal age is count of days since epoch." -msgstr "Максимальный возраст - число дней с начала эпохи." - -#, python-format -msgid "Maximum redirects (%(redirects)s) was exceeded." -msgstr "Превышено максимальное количество перенаправлений (%(redirects)s)." - -#, python-format -msgid "Member %(member_id)s is duplicated for image %(image_id)s" -msgstr "Обнаружена копия участника %(member_id)s для образа %(image_id)s" - -msgid "Member can't be empty" -msgstr "Участник не может быть пустым" - -msgid "Member to be added not specified" -msgstr "Добавляемый участник не указан" - -msgid "Membership could not be found." -msgstr "Членство не найдено." - -#, python-format -msgid "" -"Metadata definition namespace %(namespace)s is protected and cannot be " -"deleted." -msgstr "" -"Пространство имен %(namespace)s определения метаданных защищено и не может " -"быть удален." - -#, python-format -msgid "Metadata definition namespace not found for id=%s" -msgstr "Не найдено пространство имен определения метаданных для ИД %s" - -#, python-format -msgid "" -"Metadata definition object %(object_name)s is protected and cannot be " -"deleted." -msgstr "" -"Объект %(object_name)s определения метаданных защищен и не может быть удален." - -#, python-format -msgid "Metadata definition object not found for id=%s" -msgstr "Не найден объект определения метаданных для ИД %s" - -#, python-format -msgid "" -"Metadata definition property %(property_name)s is protected and cannot be " -"deleted." -msgstr "" -"Свойство %(property_name)s определения метаданных защищено и не может быть " -"удалено." - -#, python-format -msgid "Metadata definition property not found for id=%s" -msgstr "Не найдено свойство определения метаданных для ИД %s" - -#, python-format -msgid "" -"Metadata definition resource-type %(resource_type_name)s is a seeded-system " -"type and cannot be deleted." -msgstr "" -"Тип ресурса %(resource_type_name)s определения метаданных являетсясистемным " -"типом и не может быть удален." - -#, python-format -msgid "" -"Metadata definition resource-type-association %(resource_type)s is protected " -"and cannot be deleted." -msgstr "" -"Связь типа ресурса %(resource_type)s определения метаданных защищена и не " -"может быть удалена." - -#, python-format -msgid "" -"Metadata definition tag %(tag_name)s is protected and cannot be deleted." -msgstr "" -"Тег %(tag_name)s определения метаданных защищен и не может быть удален." - -#, python-format -msgid "Metadata definition tag not found for id=%s" -msgstr "Не найден тег определения метаданных для ИД %s" - -msgid "Minimal rows limit is 1." -msgstr "Минимальное число строк равно 1." - -#, python-format -msgid "Missing required credential: %(required)s" -msgstr "Отсутствуют обязательные идентификационные данные: %(required)s" - -#, python-format -msgid "" -"Multiple 'image' service matches for region %(region)s. This generally means " -"that a region is required and you have not supplied one." -msgstr "" -"Несколько соответствий службы 'image' для региона %(region)s. Обычно это " -"означает, что регион обязателен, но вы его не указали." - -msgid "No authenticated user" -msgstr "Нет идентифицированного пользователя" - -#, python-format -msgid "No image found with ID %s" -msgstr "Образ с ИД %s не найден" - -#, python-format -msgid "No location found with ID %(loc)s from image %(img)s" -msgstr "Расположение с ИД %(loc)s из образа %(img)s не найдено" - -msgid "No permission to share that image" -msgstr "Нет прав на совместное использование этого образа" - -#, python-format -msgid "Not allowed to create members for image %s." -msgstr "Не разрешено создавать участников для образа %s." - -#, python-format -msgid "Not allowed to deactivate image in status '%s'" -msgstr "Запрещено деактивировать образ в состоянии %s" - -#, python-format -msgid "Not allowed to delete members for image %s." -msgstr "Не разрешено удалять участников для образа %s." - -#, python-format -msgid "Not allowed to delete tags for image %s." -msgstr "Не разрешено удалять теги для образа %s." - -#, python-format -msgid "Not allowed to list members for image %s." -msgstr "Не разрешено выводить список участников для образа %s." - -#, python-format -msgid "Not allowed to reactivate image in status '%s'" -msgstr "Запрещено повторно активировать образ в состоянии %s" - -#, python-format -msgid "Not allowed to update members for image %s." -msgstr "Не разрешено изменять участников для образа %s." - -#, python-format -msgid "Not allowed to update tags for image %s." -msgstr "Не разрешено изменять теги для образа %s." - -#, python-format -msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" -msgstr "Загружать данные для образа %(image_id)s не разрешено: %(error)s" - -msgid "Number of sort dirs does not match the number of sort keys" -msgstr "Число направлений сортировки не совпадает с числом ключей сортировки" - -msgid "OVA extract is limited to admin" -msgstr "Распаковку OVA может выполнить только администратор" - -msgid "Old and new sorting syntax cannot be combined" -msgstr "Прежний и новый синтаксисы сортировки нельзя смешивать" - -#, python-format -msgid "Operation \"%s\" requires a member named \"value\"." -msgstr "Операции \"%s\" требуется участник с именем \"value\"." - -msgid "" -"Operation objects must contain exactly one member named \"add\", \"remove\", " -"or \"replace\"." -msgstr "" -"Объекты операции должны содержать в точности один участник с именем \"add\", " -"\"remove\" или \"replace\"." - -msgid "" -"Operation objects must contain only one member named \"add\", \"remove\", or " -"\"replace\"." -msgstr "" -"Объекты операции должны содержать только один участник с именем \"add\", " -"\"remove\" или \"replace\"." - -msgid "Operations must be JSON objects." -msgstr "Операции должны быть объектами JSON." - -#, python-format -msgid "Original locations is not empty: %s" -msgstr "Исходные расположения не пусты: %s" - -msgid "Owner can't be updated by non admin." -msgstr "Обычный пользователь не может изменить владельца." - -msgid "Owner must be specified to create a tag." -msgstr "Для создания тега необходимо указать владельца." - -msgid "Owner of the image" -msgstr "Владелец образа" - -msgid "Owner of the namespace." -msgstr "Владелец пространства имен." - -msgid "Param values can't contain 4 byte unicode." -msgstr "" -"Значения параметров не могут содержать символы в кодировке 4-байтового " -"unicode." - -#, python-format -msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." -msgstr "" -"Указатель `%s` содержит символ \"~\", не входящий в распознаваемую Esc-" -"последовательность." - -#, python-format -msgid "Pointer `%s` contains adjacent \"/\"." -msgstr "Указатель `%s` содержит смежный \"/\"." - -#, python-format -msgid "Pointer `%s` does not contains valid token." -msgstr "Указатель `%s` не содержит допустимого маркера." - -#, python-format -msgid "Pointer `%s` does not start with \"/\"." -msgstr "Указатель `%s` не начинается с \"/\"." - -#, python-format -msgid "Pointer `%s` end with \"/\"." -msgstr "Указатель `%s` оканчивается на \"/\"." - -#, python-format -msgid "Port \"%s\" is not valid." -msgstr "Порт \"%s\" недопустим." - -#, python-format -msgid "Process %d not running" -msgstr "Процесс %d не выполняется" - -#, python-format -msgid "Properties %s must be set prior to saving data." -msgstr "Свойства %s должны быть заданы до сохранения данных." - -#, python-format -msgid "" -"Property %(property_name)s does not start with the expected resource type " -"association prefix of '%(prefix)s'." -msgstr "" -"Свойство %(property_name)s не начинается с ожидаемого префикса связи типа " -"ресурса '%(prefix)s'." - -#, python-format -msgid "Property %s already present." -msgstr "Свойство %s уже существует." - -#, python-format -msgid "Property %s does not exist." -msgstr "Свойство %s не существует." - -#, python-format -msgid "Property %s may not be removed." -msgstr "Свойство %s нельзя удалить." - -#, python-format -msgid "Property %s must be set prior to saving data." -msgstr "Свойство %s должно быть задано до сохранения данных." - -#, python-format -msgid "Property '%s' is protected" -msgstr "Свойство '%s' защищено" - -msgid "Property names can't contain 4 byte unicode." -msgstr "" -"Имена свойств не могут содержать символы в кодировке 4-байтового unicode." - -#, python-format -msgid "" -"Provided image size must match the stored image size. (provided size: " -"%(ps)d, stored size: %(ss)d)" -msgstr "" -"Указанный размер образа должен быть равен сохраненному размеру образа. " -"(Указанный размер: %(ps)d, сохраненный размер: %(ss)d)" - -#, python-format -msgid "Provided object does not match schema '%(schema)s': %(reason)s" -msgstr "Предоставленный объект не соответствует схеме '%(schema)s': %(reason)s" - -#, python-format -msgid "Provided status of task is unsupported: %(status)s" -msgstr "Указано неподдерживаемое состояние задачи: %(status)s" - -#, python-format -msgid "Provided type of task is unsupported: %(type)s" -msgstr "Указан неподдерживаемый тип задачи: %(type)s" - -msgid "Provides a user friendly description of the namespace." -msgstr "Описание пространства имен для пользователя." - -msgid "Received invalid HTTP redirect." -msgstr "Получено недопустимое перенаправление HTTP." - -#, python-format -msgid "Redirecting to %(uri)s for authorization." -msgstr "Перенаправляется на %(uri)s для предоставления доступа." - -#, python-format -msgid "Registry service can't use %s" -msgstr "Служба реестра не может использовать %s" - -#, python-format -msgid "Registry was not configured correctly on API server. Reason: %(reason)s" -msgstr "Реестр настроен неправильно на сервере API. Причина: %(reason)s" - -#, python-format -msgid "Reload of %(serv)s not supported" -msgstr "Перезагрузка %(serv)s не поддерживается" - -#, python-format -msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "Перезагрузка %(serv)s (pid %(pid)s) с сигналом (%(sig)s)" - -#, python-format -msgid "Removing stale pid file %s" -msgstr "Удаление устаревшего файла pid %s" - -msgid "Request body must be a JSON array of operation objects." -msgstr "Тело запроса должно быть массивом JSON объектов операции." - -msgid "Request must be a list of commands" -msgstr "Запрос должен быть списком команд" - -#, python-format -msgid "Required store %s is invalid" -msgstr "Необходимое хранилище %s недопустимо" - -msgid "" -"Resource type names should be aligned with Heat resource types whenever " -"possible: http://docs.openstack.org/developer/heat/template_guide/openstack." -"html" -msgstr "" -"Имена типов ресурсов должны быть согласованы с типами ресурсов Heat, когда " -"это возможно: http://docs.openstack.org/developer/heat/template_guide/" -"openstack.html" - -msgid "Response from Keystone does not contain a Glance endpoint." -msgstr "Ответ от Keystone не содержит конечной точки Glance." - -msgid "Scope of image accessibility" -msgstr "Область доступности образа" - -msgid "Scope of namespace accessibility." -msgstr "Область доступности пространства имен." - -#, python-format -msgid "Server %(serv)s is stopped" -msgstr "Сервер %(serv)s остановлен" - -#, python-format -msgid "Server worker creation failed: %(reason)s." -msgstr "Создать исполнитель сервера не удалось: %(reason)s." - -msgid "Signature verification failed" -msgstr "Проверка подписи не выполнена." - -msgid "Size of image file in bytes" -msgstr "Размер файла образа в байтах" - -msgid "" -"Some resource types allow more than one key / value pair per instance. For " -"example, Cinder allows user and image metadata on volumes. Only the image " -"properties metadata is evaluated by Nova (scheduling or drivers). This " -"property allows a namespace target to remove the ambiguity." -msgstr "" -"Некоторые типы ресурсов допускают более одной пары ключ-значение на " -"экземпляр. Например, в Cinder разрешены метаданные пользователей и образов " -"для томов. Только метаданные свойств образа обрабатываются Nova " -"(планирование или драйверы). Это свойство позволяет целевому объекту " -"пространства имен устранить неоднозначность." - -msgid "Sort direction supplied was not valid." -msgstr "Указано недопустимое направление сортировки." - -msgid "Sort key supplied was not valid." -msgstr "Задан недопустимый ключ сортировки." - -msgid "" -"Specifies the prefix to use for the given resource type. Any properties in " -"the namespace should be prefixed with this prefix when being applied to the " -"specified resource type. Must include prefix separator (e.g. a colon :)." -msgstr "" -"Задает префикс для данного типа ресурсов. Все свойства в пространстве имен " -"должны иметь этот префикс при применении к указанному типу ресурсов. Должен " -"использоваться разделитель префикса (например, двоеточие :)." - -msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." -msgstr "Состояние должно быть \"pending\", \"accepted\" или \"rejected\"." - -msgid "Status not specified" -msgstr "Состояние не указано" - -msgid "Status of the image" -msgstr "Состояние образа" - -#, python-format -msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "Изменять состояние %(cur_status)s на %(new_status)s не разрешается" - -#, python-format -msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "Остановка %(serv)s (pid %(pid)s) с сигналом (%(sig)s)" - -#, python-format -msgid "Store for image_id not found: %s" -msgstr "Хранилище для image_id не найдено: %s" - -#, python-format -msgid "Store for scheme %s not found" -msgstr "Хранилище для схемы %s не найдено" - -#, python-format -msgid "" -"Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " -"(%(actual)s) did not match. Setting image status to 'killed'." -msgstr "" -"Предоставленный %(attr)s (%(supplied)s) и %(attr)s, сгенерированный из " -"загруженного образа (%(actual)s), не совпадают. Образ переводится в " -"состояние 'killed'." - -msgid "Supported values for the 'container_format' image attribute" -msgstr "Поддерживаемые значения атрибута образа 'container_format'" - -msgid "Supported values for the 'disk_format' image attribute" -msgstr "Поддерживаемые значения атрибута образа 'disk_format'" - -#, python-format -msgid "Suppressed respawn as %(serv)s was %(rsn)s." -msgstr "Повторное порождение подавлено, поскольку %(serv)s был %(rsn)s." - -msgid "System SIGHUP signal received." -msgstr "Получен системный сигнал SIGHUP." - -#, python-format -msgid "Task '%s' is required" -msgstr "Требуется задача '%s'" - -msgid "Task does not exist" -msgstr "Задача не существует" - -msgid "Task failed due to Internal Error" -msgstr "Задача не выполнена из-за внутренней ошибки" - -msgid "Task was not configured properly" -msgstr "Задача неправильно настроена" - -#, python-format -msgid "Task with the given id %(task_id)s was not found" -msgstr "Задача с указанным ИД %(task_id)s не найдена" - -msgid "The \"changes-since\" filter is no longer available on v2." -msgstr "Фильтр \"changes-since\" больше недоступен в v2." - -#, python-format -msgid "The CA file you specified %s does not exist" -msgstr "Указанный файл CA %s не существует" - -#, python-format -msgid "" -"The Image %(image_id)s object being created by this task %(task_id)s, is no " -"longer in valid status for further processing." -msgstr "" -"Объект образа %(image_id)s, создаваемый с помощью задачи %(task_id)s, больше " -"не находится в допустимом состоянии для дальнейшей обработки." - -msgid "The Store URI was malformed." -msgstr "URI хранилища неправильно сформирован." - -msgid "" -"The URL to the keystone service. If \"use_user_token\" is not in effect and " -"using keystone auth, then URL of keystone can be specified." -msgstr "" -"URL службы Keystone. Если \"use_user_token\" не действует и используется " -"идентификация Keystone, можно указать URL Keystone." - -msgid "" -"The administrators password. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"Пароль администратора. Если \"use_user_token\" не действует, могут быть " -"указаны идентификационные данные администратора." - -msgid "" -"The administrators user name. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"Имя администратора. Если \"use_user_token\" не действует, могут быть указаны " -"идентификационные данные администратора." - -#, python-format -msgid "The cert file you specified %s does not exist" -msgstr "Указанный файл сертификата %s не существует" - -msgid "The current status of this task" -msgstr "Текущее состояние задачи" - -#, python-format -msgid "" -"The device housing the image cache directory %(image_cache_dir)s does not " -"support xattr. It is likely you need to edit your fstab and add the " -"user_xattr option to the appropriate line for the device housing the cache " -"directory." -msgstr "" -"Устройство, на котором размещен каталог %(image_cache_dir)s кэша образов, не " -"поддерживает xattr. По-видимому, вам нужно отредактировать fstab, добавив " -"опцию user_xattr в соответствующую строку для устройства, на котором " -"размещен каталог кэша." - -#, python-format -msgid "" -"The given uri is not valid. Please specify a valid uri from the following " -"list of supported uri %(supported)s" -msgstr "" -"Заданный uri недопустим. Укажите допустимый uri из следующего списка " -"поддерживаемых uri %(supported)s" - -#, python-format -msgid "The incoming image is too large: %s" -msgstr "Чересчур большой размер входящего образа: %s" - -#, python-format -msgid "The key file you specified %s does not exist" -msgstr "Указанный файл ключа %s не существует" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image locations. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Превышено ограничение по числу разрешенных расположений образа. Указанное " -"число: %(attempted)s, максимальное число: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image members for this " -"image. Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Превышено ограничение по числу разрешенных участников данного образа. " -"Указанное число: %(attempted)s, максимальное число: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Превышено ограничение по числу разрешенных свойств образа. Указанное число: " -"%(attempted)s, максимальное число: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(num)s, Maximum: %(quota)s" -msgstr "" -"Превышено ограничение по числу разрешенных свойств образа. Указанное число: " -"%(num)s, максимальное число: %(quota)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image tags. Attempted: " -"%(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Превышено ограничение по числу разрешенных тегов образа. Указанное число: " -"%(attempted)s, максимальное число: %(maximum)s" - -#, python-format -msgid "The location %(location)s already exists" -msgstr "Расположение %(location)s уже существует" - -#, python-format -msgid "The location data has an invalid ID: %d" -msgstr "Данные о расположении содержат недопустимый ИД: %d" - -#, python-format -msgid "" -"The metadata definition %(record_type)s with name=%(record_name)s not " -"deleted. Other records still refer to it." -msgstr "" -"Определение метаданных %(record_type)s с именем %(record_name)s не удалено. " -"Другие записи все еще ссылаются на него." - -#, python-format -msgid "The metadata definition namespace=%(namespace_name)s already exists." -msgstr "" -"Пространство имен %(namespace_name)s определения метаданных уже существует." - -#, python-format -msgid "" -"The metadata definition object with name=%(object_name)s was not found in " -"namespace=%(namespace_name)s." -msgstr "" -"Объект определения метаданных с именем %(object_name)s не найден в " -"пространстве имен %(namespace_name)s." - -#, python-format -msgid "" -"The metadata definition property with name=%(property_name)s was not found " -"in namespace=%(namespace_name)s." -msgstr "" -"Свойство определения метаданных с именем %(property_name)s не найдено в " -"пространстве имен %(namespace_name)s." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s already exists." -msgstr "" -"Связь типа ресурса определения метаданных для типа ресурса" -"%(resource_type_name)s и пространства имен %(namespace_name)s уже существует." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s, was not found." -msgstr "" -"Связь типа ресурса определения метаданных для типа ресурса" -"%(resource_type_name)s и пространства имен %(namespace_name)s не найдена." - -#, python-format -msgid "" -"The metadata definition resource-type with name=%(resource_type_name)s, was " -"not found." -msgstr "" -"Тип ресурса определения метаданных с именем %(resource_type_name)s не найден." - -#, python-format -msgid "" -"The metadata definition tag with name=%(name)s was not found in namespace=" -"%(namespace_name)s." -msgstr "" -"Тег определения метаданных с именем %(name)s не найден в пространстве имен " -"%(namespace_name)s." - -msgid "The parameters required by task, JSON blob" -msgstr "Параметры, обязательные для задачи JSON blob" - -msgid "The provided image is too large." -msgstr "Предоставленный образ слишком велик." - -msgid "" -"The region for the authentication service. If \"use_user_token\" is not in " -"effect and using keystone auth, then region name can be specified." -msgstr "" -"Регион службы идентификации. Если \"use_user_token\" не действует и " -"используется идентификация Keystone, можно указать имя региона." - -msgid "The request returned 500 Internal Server Error." -msgstr "Запрос возвратил ошибку 500 - Внутренняя ошибка сервера." - -msgid "" -"The request returned 503 Service Unavailable. This generally occurs on " -"service overload or other transient outage." -msgstr "" -"Запрос возвратил ошибку 503 - Служба недоступна. Как правило, это происходит " -"при перегруженности службы или другом временном сбое." - -#, python-format -msgid "" -"The request returned a 302 Multiple Choices. This generally means that you " -"have not included a version indicator in a request URI.\n" -"\n" -"The body of response returned:\n" -"%(body)s" -msgstr "" -"Запрос возвратил ошибку 302 - Множественный выбор. Как правило, это " -"означает, что вы не включили индикатор версии в URI запроса.\n" -"\n" -"Возвращенное тело запроса:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned a 413 Request Entity Too Large. This generally means " -"that rate limiting or a quota threshold was breached.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"Запрос возвратил ошибку 413 - Сущность запроса слишком велика. Как правило, " -"это означает, что нарушено ограничение на скорость или порог квоты.\n" -"\n" -"Тело ответа:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned an unexpected status: %(status)s.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"Запрос возвратил непредвиденное состояние: %(status)s.\n" -"\n" -"Тело ответа:\n" -"%(body)s" - -msgid "" -"The requested image has been deactivated. Image data download is forbidden." -msgstr "Запрошенный образ деактивирован. Загрузка данных образа запрещена." - -msgid "The result of current task, JSON blob" -msgstr "Результат текущей задачи JSON blob" - -#, python-format -msgid "" -"The size of the data %(image_size)s will exceed the limit. %(remaining)s " -"bytes remaining." -msgstr "" -"Объем данных %(image_size)s превышает допустимый максимум. Остаток: " -"%(remaining)s байт." - -#, python-format -msgid "The specified member %s could not be found" -msgstr "Указанный участник %s не найден" - -#, python-format -msgid "The specified metadata object %s could not be found" -msgstr "Указанный объект метаданных %s не найден" - -#, python-format -msgid "The specified metadata tag %s could not be found" -msgstr "Не удалось найти указанный тег метаданных %s" - -#, python-format -msgid "The specified namespace %s could not be found" -msgstr "Указанное пространство имен %s не найдено" - -#, python-format -msgid "The specified property %s could not be found" -msgstr "Указанное свойство %s не найдено" - -#, python-format -msgid "The specified resource type %s could not be found " -msgstr "Указанный тип ресурса %s не найден " - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'" -msgstr "" -"Состояние расположения удаленного образа может быть равно только " -"'pending_delete' или 'deleted'" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'." -msgstr "" -"Состояние расположения удаленного образа может быть равно только " -"'pending_delete' или 'deleted'." - -msgid "The status of this image member" -msgstr "Состояние этого участника образа" - -msgid "" -"The strategy to use for authentication. If \"use_user_token\" is not in " -"effect, then auth strategy can be specified." -msgstr "" -"Стратегия идентификации. Если \"use_user_token\" не действует, можно указать " -"стратегию идентификации." - -#, python-format -msgid "" -"The target member %(member_id)s is already associated with image " -"%(image_id)s." -msgstr "Целевой участник %(member_id)s уже связан с образом %(image_id)s." - -msgid "" -"The tenant name of the administrative user. If \"use_user_token\" is not in " -"effect, then admin tenant name can be specified." -msgstr "" -"Имя арендатора администратора. Если \"use_user_token\" не действует, можно " -"указать имя арендатора администратора." - -msgid "The type of task represented by this content" -msgstr "Тип задачи, представленной этим содержимым" - -msgid "The unique namespace text." -msgstr "Уникальный текст пространства имен." - -msgid "The user friendly name for the namespace. Used by UI if available." -msgstr "" -"Имя пространства имен для пользователя. Используется в пользовательском " -"интерфейсе." - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. Error: %(ioe)s" -msgstr "" -"Ошибка в %(error_key_name)s %(error_filename)s. Проверьте. Ошибка: %(ioe)s" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. OpenSSL error: %(ce)s" -msgstr "" -"Ошибка в %(error_key_name)s %(error_filename)s. Проверьте. Ошибка OpenSSL: " -"%(ce)s" - -#, python-format -msgid "" -"There is a problem with your key pair. Please verify that cert " -"%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" -msgstr "" -"Неправильная пара ключей. Убедитесь, что сертификат %(cert_file)s и ключ " -"%(key_file)sсоответствуют друг другу. Ошибка OpenSSL: %(ce)s" - -msgid "There was an error configuring the client." -msgstr "При настройке клиента произошла ошибка." - -msgid "There was an error connecting to a server" -msgstr "При подключении к серверу произошла ошибка" - -msgid "" -"This operation is currently not permitted on Glance Tasks. They are auto " -"deleted after reaching the time based on their expires_at property." -msgstr "" -"Эта операция в настоящее время не разрешена для задач Glance. Они " -"автоматически удаляются после достижения срока, указанного в их свойстве " -"expires_at." - -msgid "This operation is currently not permitted on Glance images details." -msgstr "" -"Эта операция в настоящее время не разрешена для сведений об образах Glance." - -msgid "" -"Time in hours for which a task lives after, either succeeding or failing" -msgstr "" -"Время (ч) существования задачи после успешного выполнения или завершения с " -"ошибкой" - -msgid "Too few arguments." -msgstr "Недостаточно аргументов." - -msgid "" -"URI cannot contain more than one occurrence of a scheme.If you have " -"specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " -"you need to change it to use the swift+http:// scheme, like so: swift+http://" -"user:pass@authurl.com/v1/container/obj" -msgstr "" -"URI не может содержать больше одного вхождения схемы. Если вы указали URI " -"вида swift://user:pass@http://authurl.com/v1/container/obj, то вам нужно " -"изменить его так, чтобы использовалась схема swift+http://, например: swift" -"+http://user:pass@authurl.com/v1/container/obj" - -msgid "URL to access the image file kept in external store" -msgstr "URL для доступа к файлу образа, находящемуся во внешнем хранилище" - -#, python-format -msgid "" -"Unable to create pid file %(pid)s. Running as non-root?\n" -"Falling back to a temp file, you can stop %(service)s service using:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" -msgstr "" -"Не удается создать файл pid %(pid)s. Запущен без прав доступа root?\n" -"Возврат к файлу temp, для завершения работы службы %(service)s:\n" -" остановить %(file)s %(server)s - pid-файл %(fb)s" - -#, python-format -msgid "Unable to filter by unknown operator '%s'." -msgstr "Не удается отфильтровать с использованием неизвестного оператора: '%s'" - -msgid "Unable to filter on a range with a non-numeric value." -msgstr "Отфильтровать по диапазону с нечисловым значением невозможно." - -msgid "Unable to filter on a unknown operator." -msgstr "Не удается отфильтровать с использованием неизвестного оператора." - -msgid "Unable to filter using the specified operator." -msgstr "Не удается отфильтровать с использованием указанного оператора." - -msgid "Unable to filter using the specified range." -msgstr "Отфильтровать согласно указанному диапазону невозможно." - -#, python-format -msgid "Unable to find '%s' in JSON Schema change" -msgstr "'%s' не найден в изменении схемы JSON" - -#, python-format -msgid "" -"Unable to find `op` in JSON Schema change. It must be one of the following: " -"%(available)s." -msgstr "" -"Не удалось найти `op` в изменении схемы JSON. Допускается одно из следующих " -"значений: %(available)s." - -msgid "Unable to increase file descriptor limit. Running as non-root?" -msgstr "" -"Не удается увеличить предельное значение для дескриптора файлов. Запущен без " -"прав доступа root?" - -#, python-format -msgid "" -"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" -"Got: %(e)r" -msgstr "" -"Невозможно загрузить %(app_name)s из файла конфигурации %(conf_file)s.\n" -"Ошибка: %(e)r" - -#, python-format -msgid "Unable to load schema: %(reason)s" -msgstr "Не удалось загрузить схему: %(reason)s" - -#, python-format -msgid "Unable to locate paste config file for %s." -msgstr "Не удается найти/вставить файл конфигурации для %s." - -#, python-format -msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" -msgstr "" -"Не удается загрузить данные для дубликата образа %(image_id)s: %(error)s" - -msgid "Unauthorized image access" -msgstr "Нет прав на доступ к образу" - -msgid "Unexpected body type. Expected list/dict." -msgstr "Непредвиденный тип тела. Ожидался список или словарь." - -#, python-format -msgid "Unexpected response: %s" -msgstr "Непредвиденный ответ: %s" - -#, python-format -msgid "Unknown auth strategy '%s'" -msgstr "Неизвестная стратегия идентификации: '%s'" - -#, python-format -msgid "Unknown command: %s" -msgstr "Неизвестная команда: %s" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Неизвестное направление сортировки, должно быть 'desc' или 'asc'" - -msgid "Unrecognized JSON Schema draft version" -msgstr "Нераспознанная версия черновика схемы JSON" - -msgid "Unrecognized changes-since value" -msgstr "Нераспознанное значение изменений за период" - -#, python-format -msgid "Unsupported sort_dir. Acceptable values: %s" -msgstr "Неподдерживаемый sort_dir. Допустимые значения: %s" - -#, python-format -msgid "Unsupported sort_key. Acceptable values: %s" -msgstr "Неподдерживаемый sort_key. Допустимые значения: %s" - -msgid "Virtual size of image in bytes" -msgstr "Виртуальный размер образа в байтах" - -#, python-format -msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" -msgstr "" -"Система ожидала завершения pid %(pid)s (%(file)s) в течение 15 секунд; " -"освобождение" - -msgid "" -"When running server in SSL mode, you must specify both a cert_file and " -"key_file option value in your configuration file" -msgstr "" -"При работе сервера в режиме SSL необходимо указать cert_file и key_file в " -"файле конфигурации" - -msgid "" -"Whether to pass through the user token when making requests to the registry. " -"To prevent failures with token expiration during big files upload, it is " -"recommended to set this parameter to False.If \"use_user_token\" is not in " -"effect, then admin credentials can be specified." -msgstr "" -"Осуществлять ли сквозную передачу пользовательского маркера при создании " -"запросов в реестр Для предотвращения сбоев, связанных с истечением срока " -"действия маркера во время передачи больших данных, рекомендуется присваивать " -"этому параметру значение False. Если \"use_user_token\" не используется, " -"можно указать идентификационные данные администратора." - -#, python-format -msgid "Wrong command structure: %s" -msgstr "Неверная структура команды: %s" - -msgid "You are not authenticated." -msgstr "Вы не прошли идентификацию." - -msgid "You are not authorized to complete this action." -msgstr "У вас нет прав на выполнение этого действия." - -#, python-format -msgid "You are not authorized to lookup image %s." -msgstr "У вас нет прав доступа для поиска образа %s." - -#, python-format -msgid "You are not authorized to lookup the members of the image %s." -msgstr "У вас нет прав доступа для поиска элементов образа %s." - -#, python-format -msgid "You are not permitted to create a tag in the namespace owned by '%s'" -msgstr "" -"У вас нет прав доступа для создания тега в пространстве имен, владельцем " -"которого является '%s'" - -msgid "You are not permitted to create image members for the image." -msgstr "Вам не разрешено создавать участники образов для данного образа." - -#, python-format -msgid "You are not permitted to create images owned by '%s'." -msgstr "Вам не разрешено создавать образы, принадлежащие '%s'." - -#, python-format -msgid "You are not permitted to create namespace owned by '%s'" -msgstr "Нет прав доступа на создание пространства имен, принадлежащего %s." - -#, python-format -msgid "You are not permitted to create object owned by '%s'" -msgstr "Нет прав доступа на создание объекта, принадлежащего %s." - -#, python-format -msgid "You are not permitted to create property owned by '%s'" -msgstr "Нет прав доступа на создание свойства, принадлежащего %s." - -#, python-format -msgid "You are not permitted to create resource_type owned by '%s'" -msgstr "Нет прав доступа на создание resource_type, принадлежащего %s." - -#, python-format -msgid "You are not permitted to create this task with owner as: %s" -msgstr "Вам не разрешено создавать эту задачу с владельцем: %s" - -msgid "You are not permitted to deactivate this image." -msgstr "Вам не разрешено деактивировать этот образ." - -msgid "You are not permitted to delete this image." -msgstr "Вам не разрешено удалять этот образ." - -msgid "You are not permitted to delete this meta_resource_type." -msgstr "Нет прав доступа на удаление этого meta_resource_type." - -msgid "You are not permitted to delete this namespace." -msgstr "Нет прав доступа на удаление этого пространства имен." - -msgid "You are not permitted to delete this object." -msgstr "Нет прав доступа на удаление этого объекта." - -msgid "You are not permitted to delete this property." -msgstr "Нет прав доступа на удаление этого свойства." - -msgid "You are not permitted to delete this tag." -msgstr "У вас нет прав доступа для удаления этого тега." - -#, python-format -msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." -msgstr "Вам не разрешено изменять '%(attr)s' в этом %(resource)s." - -#, python-format -msgid "You are not permitted to modify '%s' on this image." -msgstr "Вам не разрешено изменять '%s' в этом образе." - -msgid "You are not permitted to modify locations for this image." -msgstr "Вам не разрешено изменять расположения этого образа." - -msgid "You are not permitted to modify tags on this image." -msgstr "Вам не разрешено изменять теги этого образа." - -msgid "You are not permitted to modify this image." -msgstr "Вам не разрешено изменять этот образ." - -msgid "You are not permitted to reactivate this image." -msgstr "Вам не разрешено повторно активировать этот образ." - -msgid "You are not permitted to set status on this task." -msgstr "Вам не разрешено указывать состояние этой задачи." - -msgid "You are not permitted to update this namespace." -msgstr "Нет прав доступа на обновление этого пространства имен." - -msgid "You are not permitted to update this object." -msgstr "Нет прав доступа на обновление этого объекта." - -msgid "You are not permitted to update this property." -msgstr "Нет прав доступа на обновление этого свойства." - -msgid "You are not permitted to update this tag." -msgstr "У вас нет прав доступа для изменения этого тега." - -msgid "You are not permitted to upload data for this image." -msgstr "Вам не разрешено загружать данные для этого образа." - -#, python-format -msgid "You cannot add image member for %s" -msgstr "Невозможно добавить участник образа для %s" - -#, python-format -msgid "You cannot delete image member for %s" -msgstr "Невозможно удалить участник образа для %s" - -#, python-format -msgid "You cannot get image member for %s" -msgstr "Невозможно получить участник образа для %s" - -#, python-format -msgid "You cannot update image member %s" -msgstr "Невозможно обновить участник образа %s" - -msgid "You do not own this image" -msgstr "Этот образ вам не принадлежит" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a cert, " -"however you have failed to supply either a key_file parameter or set the " -"GLANCE_CLIENT_KEY_FILE environ variable" -msgstr "" -"Вы выбрали применение SSL в соединении и предоставили сертификат, однако вам " -"не удалось ни предоставить параметр key_file, ни задать переменную среды " -"GLANCE_CLIENT_KEY_FILE" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a key, " -"however you have failed to supply either a cert_file parameter or set the " -"GLANCE_CLIENT_CERT_FILE environ variable" -msgstr "" -"Вы выбрали применение SSL в соединении и предоставили ключ, однако вам не " -"удалось ни предоставить параметр cert_file, ни задать переменную среды " -"GLANCE_CLIENT_CERT_FILE" - -msgid "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" -msgstr "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" - -#, python-format -msgid "__init__() got unexpected keyword argument '%s'" -msgstr "В __init__() получен непредвиденный именованный аргумент '%s'" - -#, python-format -msgid "" -"cannot transition from %(current)s to %(next)s in update (wanted from_state=" -"%(from)s)" -msgstr "" -"не удается выполнить переход от %(current)s к %(next)s при обновлении " -"(требуется from_state=%(from)s)" - -#, python-format -msgid "custom properties (%(props)s) conflict with base properties" -msgstr "настраиваемые свойства (%(props)s) конфликтуют с базовыми свойствами" - -msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" -msgstr "" -"Для этой платформы отсутствуют центры обработки событий poll и selects " -"библиотеки eventlet" - -msgid "is_public must be None, True, or False" -msgstr "Параметр is_public должен быть равен None, True или False" - -msgid "limit param must be an integer" -msgstr "Параметр limit должен быть целым числом" - -msgid "limit param must be positive" -msgstr "Параметр limit должен быть положительным" - -msgid "md5 hash of image contents." -msgstr "Хэш md5 содержимого образа." - -#, python-format -msgid "new_image() got unexpected keywords %s" -msgstr "В new_image() получены непредвиденные ключевые слова %s" - -msgid "protected must be True, or False" -msgstr "Параметр protected должен быть равен True или False" - -#, python-format -msgid "unable to launch %(serv)s. Got error: %(e)s" -msgstr "не удается запустить %(serv)s. Ошибка: %(e)s" - -#, python-format -msgid "x-openstack-request-id is too long, max size %s" -msgstr "Слишком большая длина x-openstack-request-id, максимальная длина: %s" diff --git a/glance/locale/tr_TR/LC_MESSAGES/glance.po b/glance/locale/tr_TR/LC_MESSAGES/glance.po deleted file mode 100644 index 65b52a83..00000000 --- a/glance/locale/tr_TR/LC_MESSAGES/glance.po +++ /dev/null @@ -1,1867 +0,0 @@ -# Translations template for glance. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the glance project. -# -# Translators: -# Andreas Jaeger , 2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: glance 15.0.0.0b3.dev29\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-06-23 20:54+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 05:22+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language: tr-TR\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: Turkish (Turkey)\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "%(cls)s exception was raised in the last rpc call: %(val)s" -msgstr "Son rpc çağrısında %(cls)s istisnası oluştu: %(val)s" - -#, python-format -msgid "%(serv)s (pid %(pid)s) is running..." -msgstr "%(serv)s (pid %(pid)s) çalıştırılıyor..." - -#, python-format -msgid "%(serv)s appears to already be running: %(pid)s" -msgstr "%(serv)s çalışıyor görünüyor: %(pid)s" - -#, python-format -msgid "" -"%(strategy)s is registered as a module twice. %(module)s is not being used." -msgstr "" -"%(strategy)s bir birim olarak iki kez kaydedildi. %(module)s kullanılmıyor." - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Could not load the " -"filesystem store" -msgstr "" -"%(task_type)s görev türündeki %(task_id)s düzgün bir şekilde " -"yapılandırılamadı. Dosya sistem deposuna yüklenemedi" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Missing work dir: " -"%(work_dir)s" -msgstr "" -"%(task_type)s görev türündeki %(task_id)s düzgün bir şekilde " -"yapılandırılamadı. Eksik çalışma dizini: %(work_dir)s" - -#, python-format -msgid "%(verb)sing %(serv)s" -msgstr "%(verb)sing %(serv)s" - -#, python-format -msgid "%(verb)sing %(serv)s with %(conf)s" -msgstr "%(conf)s ile %(verb)sing %(serv)s" - -#, python-format -msgid "" -"%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " -"address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " -"separately from the port (i.e., \"[fe80::a:b:c]:9876\")." -msgstr "" -"%s Lütfen istemcinin bir IPv4, IPv6 adresi, makine adı ya da FQDN olduğu bir " -"istemci:bağlantı noktası çifti belirtin. Eğer IPv6 kullanılırsa, bağlantı " -"noktasından ayrı parantez içine alın (örneğin, \"[fe80::a:b:c]:9876\")." - -#, python-format -msgid "%s can't contain 4 byte unicode characters." -msgstr "%s 4 bayt unicode karakterler içeremez." - -#, python-format -msgid "%s is already stopped" -msgstr "%s zaten durdurulmuş" - -#, python-format -msgid "%s is stopped" -msgstr "%s durduruldu" - -msgid "" -"--os_auth_url option or OS_AUTH_URL environment variable required when " -"keystone authentication strategy is enabled\n" -msgstr "" -"--os_auth_url seçeneği ya da OS_AUTH_URL ortam değişkeni, keystone kimlik " -"doğrulama stratejisi etkinken gereklidir\n" - -#, python-format -msgid "" -"A metadata definition object with name=%(object_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Ad=%(object_name)s ile bir metadata tanım nesnesi ad alanında=" -"%(namespace_name)s zaten var." - -#, python-format -msgid "" -"A metadata definition property with name=%(property_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"Ad=%(property_name)s ile bir metadata tanım özelliği ad alanında=" -"%(namespace_name)s zaten mevcut." - -#, python-format -msgid "" -"A metadata definition resource-type with name=%(resource_type_name)s already " -"exists." -msgstr "" -"Ad=%(resource_type_name)s ile bir metadata tanım kaynak-türü zaten mevcut." - -msgid "A set of URLs to access the image file kept in external store" -msgstr "Harici depoda tutulan imaj dosyasına erişilecek URL kümesi" - -msgid "Amount of disk space (in GB) required to boot image." -msgstr "İmajı ön yüklemek için gereken disk alanı miktarı (GB olarak)." - -msgid "Amount of ram (in MB) required to boot image." -msgstr "İmaj ön yüklemesi için gereken (MB olarak) bellek miktarı." - -msgid "An identifier for the image" -msgstr "İmaj için bir tanımlayıcı" - -msgid "An identifier for the image member (tenantId)" -msgstr "İmaj üyesi için bir tanımlayıcı (tenantId)" - -msgid "An identifier for the owner of this task" -msgstr "Görevin sahibi için bir tanımlayıcı" - -msgid "An identifier for the task" -msgstr "Görev için bir tanımlayıcı" - -#, python-format -msgid "An image with identifier %s already exists" -msgstr "%s belirteçli imaj zaten var" - -msgid "An object with the same identifier already exists." -msgstr "Aynı tanımlayıcı ile bir nesne zaten mevcut." - -msgid "An object with the same identifier is currently being operated on." -msgstr "Aynı tanımlayıcıya sahip bir nesne şu anda işleniyor." - -msgid "An object with the specified identifier was not found." -msgstr "Belirtilen tanımlayıcı ile bir nesne bulunamadı." - -msgid "An unknown exception occurred" -msgstr "Bilinmeyen olağandışı bir durum oluştu" - -msgid "An unknown task exception occurred" -msgstr "Bilinmeyen bir görev olağandışı durumu oluştu" - -#, python-format -msgid "Attempt to upload duplicate image: %s" -msgstr "Çift imaj yüklemeyi dene: %s" - -msgid "Attempted to update Location field for an image not in queued status." -msgstr "" -"Durumu kuyruğa alınmış olmayan bir imaj için Konum alanı güncellemesi " -"denendi." - -#, python-format -msgid "Attribute '%(property)s' is read-only." -msgstr "'%(property)s' özniteliği salt okunurdur." - -#, python-format -msgid "Attribute '%(property)s' is reserved." -msgstr "'%(property)s' özniteliği ayrılmıştır." - -#, python-format -msgid "Attribute '%s' is read-only." -msgstr "'%s' özniteliği salt okunurdur." - -#, python-format -msgid "Attribute '%s' is reserved." -msgstr "'%s' özniteliği ayrılmıştır." - -msgid "Attribute container_format can be only replaced for a queued image." -msgstr "" -"container_format özniteliği sadece kuyruğa alınmış bir imaj için " -"değiştirilebilir." - -msgid "Attribute disk_format can be only replaced for a queued image." -msgstr "" -"disk_format özniteliği sadece kuyruğa alınmış bir imaj için değiştirilebilir." - -#, python-format -msgid "Auth service at URL %(url)s not found." -msgstr "%(url)s URL'inde kimlik doğrulama servisi bulunamadı." - -msgid "Authorization failed." -msgstr "Yetkilendirme başarısız oldu." - -msgid "Available categories:" -msgstr "Kullanılabilir kategoriler:" - -#, python-format -msgid "Bad Command: %s" -msgstr "Hatalı Komut: %s" - -#, python-format -msgid "Bad header: %(header_name)s" -msgstr "Kötü başlık: %(header_name)s" - -#, python-format -msgid "Bad value passed to filter %(filter)s got %(val)s" -msgstr "%(filter)s süzgecine geçirilen hatalı değer %(val)s var" - -#, python-format -msgid "Badly formed S3 URI: %(uri)s" -msgstr "Hatalı oluşturulmuş S3 URI: %(uri)s" - -#, python-format -msgid "Badly formed credentials '%(creds)s' in Swift URI" -msgstr "Swift URI içinde hatalı oluşturulmuş kimlik bilgileri '%(creds)s'" - -msgid "Badly formed credentials in Swift URI." -msgstr "Swift URI içinde hatalı oluşturulmuş kimlik bilgileri." - -msgid "Body expected in request." -msgstr "İstekte beklenen vücut." - -msgid "Cannot be a negative value" -msgstr "Negatif bir değer olamaz" - -msgid "Cannot be a negative value." -msgstr "Negatif bir değer olamaz." - -#, python-format -msgid "Cannot convert image %(key)s '%(value)s' to an integer." -msgstr "%(key)s '%(value)s' imaj değeri bir tam sayıya dönüştürülemez." - -#, python-format -msgid "Cannot save data for image %(image_id)s: %(error)s" -msgstr "%(image_id)s imajı için veri kaydedilemiyor: %(error)s" - -msgid "Cannot upload to an unqueued image" -msgstr "Kuyruğa alınmamış imaj yüklenemez" - -#, python-format -msgid "Checksum verification failed. Aborted caching of image '%s'." -msgstr "" -"Sağlama doğrulama başarısız oldu. '%s' imajını önbelleğe alma işlemi " -"durduruldu." - -msgid "Client disconnected before sending all data to backend" -msgstr "" -"İstemci tüm verileri art alanda çalışan uygulamaya göndermeden önce " -"bağlantıyı kesti" - -msgid "Command not found" -msgstr "Komut bulunamadı" - -msgid "Configuration option was not valid" -msgstr "Yapılandırma seçeneği geçerli değildi." - -#, python-format -msgid "Connect error/bad request to Auth service at URL %(url)s." -msgstr "" -"%(url)s URL'indeki kimlik doğrulama servisine bağlantı hatası/hatalı istek." - -#, python-format -msgid "Constructed URL: %s" -msgstr "URL inşa edildi: %s" - -msgid "Container format is not specified." -msgstr "Kap biçimi belirtilmemiş." - -msgid "Content-Type must be application/octet-stream" -msgstr "İçerik-Türü uygulama/sekiz bitli bayt akışı olmalıdır" - -#, python-format -msgid "Corrupt image download for image %(image_id)s" -msgstr "%(image_id)s imajı için bozuk imaj indir" - -#, python-format -msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" -msgstr "30 saniyelik denemeden sonra %(host)s:%(port)s bağlanamadı" - -#, python-format -msgid "Could not find metadata object %s" -msgstr "Metadata nesnesi %s bulunamadı" - -#, python-format -msgid "Could not find metadata tag %s" -msgstr "%s metadata etiketi bulunamadı" - -#, python-format -msgid "Could not find namespace %s" -msgstr "%s ad alanı bulunamadı" - -#, python-format -msgid "Could not find property %s" -msgstr "%s özelliği bulunamadı" - -msgid "Could not find required configuration option" -msgstr "Gerekli yapılandırma seçeneği bulunamadı" - -#, python-format -msgid "Could not find task %s" -msgstr "%s görevi bulunamadı" - -#, python-format -msgid "Could not update image: %s" -msgstr "İmaj güncellenemiyor: %s" - -msgid "Data supplied was not valid." -msgstr "Sağlanan veri geçersizdir." - -msgid "Date and time of image member creation" -msgstr "İmaj üyesi oluşturma tarih ve saati" - -msgid "Date and time of last modification of image member" -msgstr "İmaj üyesi son değişiklik tarih ve saati" - -msgid "Datetime when this resource was created" -msgstr "Bu kaynak oluşturulduğundaki tarih saat" - -msgid "Datetime when this resource was updated" -msgstr "Bu kaynak güncellendiğindeki tarih saat" - -msgid "Datetime when this resource would be subject to removal" -msgstr "Bu kaynağın kaldırılacağı tarih zaman" - -#, python-format -msgid "Denying attempt to upload image because it exceeds the quota: %s" -msgstr "İmaj yükleme girişimi kotayı aştığından dolayı reddediliyor: %s" - -#, python-format -msgid "Denying attempt to upload image larger than %d bytes." -msgstr "%d bayttan büyük bir imajın yükleme girişimi reddediliyor." - -msgid "Descriptive name for the image" -msgstr "İmaj için açıklayıcı ad" - -msgid "Disk format is not specified." -msgstr "Disk biçimi belirtilmemiş." - -#, python-format -msgid "" -"Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" -msgstr "" -"%(driver_name)s sürücüsü düzgün bir şekilde yapılandırılamadı. Nedeni: " -"%(reason)s" - -msgid "Error in store configuration. Adding images to store is disabled." -msgstr "" -"Depolama yapılandırmasında hata. Depolamak için imaj ekleme devre dışıdır." - -#, python-format -msgid "External sources are not supported: '%s'" -msgstr "Harici kaynaklar desteklenmiyor: '%s'" - -#, python-format -msgid "Failed to activate image. Got error: %s" -msgstr "İmaj etkinleştirme işlemi başarısız oldu. Alınan hata: %s" - -#, python-format -msgid "Failed to add image metadata. Got error: %s" -msgstr "İmaj metadata ekleme işlemi başarısız oldu. Alınan hata: %s" - -#, python-format -msgid "Failed to find image %(image_id)s to delete" -msgstr "Silinecek %(image_id)s imajını bulma işlemi başarısız oldu" - -#, python-format -msgid "Failed to find image to delete: %s" -msgstr "Silinecek imaj bulunamadı: %s" - -#, python-format -msgid "Failed to find image to update: %s" -msgstr "Güncellenecek imaj bulma işlemi başarısız oldu: %s" - -#, python-format -msgid "Failed to find resource type %(resourcetype)s to delete" -msgstr "Silinecek %(resourcetype)s kaynak türü bulma işlemi başarısız oldu" - -#, python-format -msgid "Failed to initialize the image cache database. Got error: %s" -msgstr "İmaj önbellek veritabanı başlatılamadı. Alınan hata: %s" - -#, python-format -msgid "Failed to read %s from config" -msgstr "Yapılandırmadan %s okunamadı" - -#, python-format -msgid "Failed to reserve image. Got error: %s" -msgstr "İmaj ayırma işlemi başarısız oldu. Alınan hata: %s" - -#, python-format -msgid "Failed to update image metadata. Got error: %s" -msgstr "İmaj metadata güncelleme işlemi başarısız oldu: Alınan hata: %s" - -#, python-format -msgid "Failed to upload image %s" -msgstr "%s imajı yükleme işlemi başarısız oldu" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to HTTP error: " -"%(error)s" -msgstr "" -"HTTP hatası nedeniyle %(image_id)s imajı için imaj verisi yüklenemedi: " -"%(error)s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to internal error: " -"%(error)s" -msgstr "" -"Dahili hata nedeniyle %(image_id)s imajı için imaj verisi yüklenemedi: " -"%(error)s" - -msgid "" -"File based imports are not allowed. Please use a non-local source of image " -"data." -msgstr "" -"Dosya tabanlı içeri aktarmlara izin verilmez. Lütfen imaj verilerinin yerel " -"olmayan bir kaynağını kullanın." - -#, python-format -msgid "Forbidden to delete a %s image." -msgstr "%s imajını silmek yasak." - -#, python-format -msgid "Forbidden to delete image: %s" -msgstr "İmaj silmek yasak: %s" - -msgid "Forbidden to reserve image." -msgstr "İmaj ayırmak yasak." - -msgid "Forbidden to update deleted image." -msgstr "Silinen imajın güncellenmesi yasak." - -#, python-format -msgid "Forbidden to update image: %s" -msgstr "İmaj güncellemek yasak: %s" - -#, python-format -msgid "Forbidden upload attempt: %s" -msgstr "Yükleme girişimi yasak: %s" - -#, python-format -msgid "Forbidding request, metadata definition namespace=%s is not visible." -msgstr "Yasak istek, üstveri tanım ad alanı=%s görünür değil." - -#, python-format -msgid "Forbidding request, task %s is not visible" -msgstr "Yasak istek, %s görevi görünür değil" - -msgid "Format of the container" -msgstr "Kabın biçimi" - -msgid "Format of the disk" -msgstr "Diskin biçimi" - -#, python-format -msgid "Host \"%s\" is not valid." -msgstr "İstemci \"%s\" geçersizdir." - -#, python-format -msgid "Host and port \"%s\" is not valid." -msgstr "İstemci ve bağlantı noktası \"%s\" geçersizdir." - -msgid "" -"Human-readable informative message only included when appropriate (usually " -"on failure)" -msgstr "" -"Okunabilir bilgilendirme iletisi sadece uygun olduğunda (genellikle " -"başarısızlıkta) dahildir" - -msgid "If true, image will not be deletable." -msgstr "Eğer seçiliyse, imaj silinemeyecektir." - -msgid "If true, namespace will not be deletable." -msgstr "Eğer seçiliyse, ad alanı silinemeyecektir." - -#, python-format -msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" -msgstr "%(id)s imajı kullanımda olduğundan dolayı silinemedi: %(exc)s" - -#, python-format -msgid "Image %(id)s not found" -msgstr "%(id)s imajı bulunamadı" - -#, python-format -msgid "" -"Image %(image_id)s could not be found after upload. The image may have been " -"deleted during the upload: %(error)s" -msgstr "" -"%(image_id)s imajı yüklemeden sonra bulunamadı. İmaj yükleme sırasında " -"silinmiş olabilir: %(error)s" - -#, python-format -msgid "Image %(image_id)s is protected and cannot be deleted." -msgstr "%(image_id)s imajı korumalıdır ve silinemez." - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload, cleaning up the chunks uploaded." -msgstr "" -"%s imajı yüklendikten sonra bulunamadı. İmaj yükleme sırasında silinmiş, " -"yüklenen parçalar temizlenmiş olabilir." - -#, python-format -msgid "Image %s is deactivated" -msgstr "%s imajı devrede değil" - -#, python-format -msgid "Image %s is not active" -msgstr "%s imajı etkin değil" - -#, python-format -msgid "Image %s not found." -msgstr "%s imajı bulunamadı." - -#, python-format -msgid "Image exceeds the storage quota: %s" -msgstr "İmaj depolama kotasını aşar: %s" - -msgid "Image id is required." -msgstr "İmaj kimliği gereklidir." - -msgid "Image is protected" -msgstr "İmaj korumalıdır" - -#, python-format -msgid "Image member limit exceeded for image %(id)s: %(e)s:" -msgstr "%(id)s imajı için üye sınırı aşıldı: %(e)s:" - -#, python-format -msgid "Image name too long: %d" -msgstr "İmaj adı çok uzun: %d" - -msgid "Image operation conflicts" -msgstr "İmaj işlem çatışmaları" - -#, python-format -msgid "" -"Image status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"%(cur_status)s durumundan %(new_status)s durumuna imaj durum geçişine izin " -"verilmez" - -#, python-format -msgid "Image storage media is full: %s" -msgstr "İmaj depolama ortamı dolu: %s" - -#, python-format -msgid "Image tag limit exceeded for image %(id)s: %(e)s:" -msgstr "%(id)s imajı için etiket sınırı aşıldı: %(e)s:" - -#, python-format -msgid "Image upload problem: %s" -msgstr "İmaj yükleme sorunu: %s" - -#, python-format -msgid "Image with identifier %s already exists!" -msgstr "%s tanımlayıcısı ile imaj zaten mevcut!" - -#, python-format -msgid "Image with identifier %s has been deleted." -msgstr "%s tanımlayıcılı imaj silindi." - -#, python-format -msgid "Image with identifier %s not found" -msgstr "%s tanımlayıcısı ile imaj bulunamadı" - -#, python-format -msgid "Image with the given id %(image_id)s was not found" -msgstr "Verilen %(image_id)s ile imaj bulunamadı" - -#, python-format -msgid "" -"Incorrect auth strategy, expected \"%(expected)s\" but received " -"\"%(received)s\"" -msgstr "" -"Hatalı yetki stratejisi, beklenen değer, \"%(expected)s\" ancak alınan " -"değer, \"%(received)s\"" - -#, python-format -msgid "Incorrect request: %s" -msgstr "Hatalı istek: %s" - -#, python-format -msgid "Input does not contain '%(key)s' field" -msgstr "Girdi '%(key)s' alanı içermez" - -#, python-format -msgid "Insufficient permissions on image storage media: %s" -msgstr "İmaj depolama ortamında yetersiz izinler: %s" - -#, python-format -msgid "Invalid JSON pointer for this resource: '/%s'" -msgstr "Bu kaynak için geçersiz JSON işaretçisi: '/%s'" - -#, python-format -msgid "Invalid checksum '%s': can't exceed 32 characters" -msgstr "Geçersiz sağlama '%s': 32 karakterden uzun olamaz" - -msgid "Invalid configuration in glance-swift conf file." -msgstr "glance-swift yapılandır dosyasında geçersiz yapılandırma." - -msgid "Invalid configuration in property protection file." -msgstr "Özellik koruma dosyasında geçersiz yapılandırma." - -#, python-format -msgid "Invalid container format '%s' for image." -msgstr "İmaj için geçersiz kap biçimi '%s'." - -#, python-format -msgid "Invalid content type %(content_type)s" -msgstr "Geçersiz içerik türü %(content_type)s" - -#, python-format -msgid "Invalid disk format '%s' for image." -msgstr "İmaj için geçersiz disk biçimi '%s'." - -msgid "Invalid image id format" -msgstr "Geçersiz imaj id biçimi" - -msgid "Invalid location" -msgstr "Geçersiz konum" - -#, python-format -msgid "Invalid location %s" -msgstr "Geçersiz konum %s" - -#, python-format -msgid "Invalid location: %s" -msgstr "Geçersiz konum: %s" - -#, python-format -msgid "" -"Invalid location_strategy option: %(name)s. The valid strategy option(s) " -"is(are): %(strategies)s" -msgstr "" -"Geçersiz location_strategy seçeneği: %(name)s. Geçerli strateji " -"seçenek(leri): %(strategies)s" - -msgid "Invalid locations" -msgstr "Geçersiz konumlar" - -#, python-format -msgid "Invalid locations: %s" -msgstr "Geçersiz konumlar: %s" - -msgid "Invalid marker format" -msgstr "Geçersiz işaretçi biçimi" - -msgid "Invalid marker. Image could not be found." -msgstr "Geçersiz işaretçi. İmaj bulunamadı." - -#, python-format -msgid "Invalid membership association: %s" -msgstr "Geçersiz üyelik ilişkisi: %s" - -msgid "" -"Invalid mix of disk and container formats. When setting a disk or container " -"format to one of 'aki', 'ari', or 'ami', the container and disk formats must " -"match." -msgstr "" -"Geçersiz disk ve kap biçimleri karışımı. Bir disk ya da kap biçimi 'aki', " -"'ari' ya da 'ami' biçimlerinden biri olarak ayarlanırsa, kap ve disk biçimi " -"eşleşmelidir." - -#, python-format -msgid "" -"Invalid operation: `%(op)s`. It must be one of the following: %(available)s." -msgstr "" -"Geçersiz işlem: `%(op)s`. Şu seçeneklerden biri olmalıdır: %(available)s." - -msgid "Invalid position for adding a location." -msgstr "Yer eklemek için geçersiz konum." - -msgid "Invalid position for removing a location." -msgstr "Yer kaldırmak için geçersiz konum." - -msgid "Invalid service catalog json." -msgstr "Geçersiz json servis katalogu." - -#, python-format -msgid "Invalid sort direction: %s" -msgstr "Geçersiz sıralama yönü: %s" - -#, python-format -msgid "" -"Invalid sort key: %(sort_key)s. It must be one of the following: " -"%(available)s." -msgstr "" -"Geçersiz sıralama anahtarı: %(sort_key)s. Şu seçeneklerden biri olmalıdır: " -"%(available)s." - -#, python-format -msgid "Invalid status value: %s" -msgstr "Geçersiz durum değeri: %s" - -#, python-format -msgid "Invalid status: %s" -msgstr "Geçersiz durum: %s" - -#, python-format -msgid "Invalid type value: %s" -msgstr "Geçersiz tür değeri: %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition namespace " -"with the same name of %s" -msgstr "" -"Geçersiz güncelleme. Aynı %s adıyla çift metadata tanım ad alanı ile " -"sonuçlanır" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Geçersiz güncelleme. Ad alanı=%(namespace_name)s içinde aynı ad=%(name)s " -"ile çift metadata tanım nesnesi olmasına neden olacaktır." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Geçersiz güncelleme. Ad alanında=%(namespace_name)s aynı ad=%(name)s ile " -"çift metadata tanım nesnesi ile sonuçlanır." - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition property " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"Geçersiz güncelleme. Ad alanı=%(namespace_name)s içinde aynı ad=%(name)s ile " -"çift metadata tanım özelliği olmasına neden olacaktır." - -#, python-format -msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" -msgstr "" -"'%(param)s' parametresi için '%(value)s' geçersiz değeri: %(extra_msg)s" - -#, python-format -msgid "Invalid value for option %(option)s: %(value)s" -msgstr "%(option)s seçeneği için geçersiz değer: %(value)s" - -#, python-format -msgid "Invalid visibility value: %s" -msgstr "Geçersiz görünürlük değeri: %s" - -msgid "It's invalid to provide multiple image sources." -msgstr "Birden fazla imaj kaynağı sağlamak için geçersizdir." - -msgid "List of strings related to the image" -msgstr "İmaj ile ilgili karakter dizilerinin listesi" - -msgid "Malformed JSON in request body." -msgstr "İstek gövdesinde bozuk JSON." - -#, python-format -msgid "Maximum redirects (%(redirects)s) was exceeded." -msgstr "Yeniden yönlendirmelerin sınırı (%(redirects)s) aşıldı." - -#, python-format -msgid "Member %(member_id)s is duplicated for image %(image_id)s" -msgstr "Üye %(member_id)s %(image_id)s imajı için çoğaltıldı" - -msgid "Member can't be empty" -msgstr "Üye boş olamaz" - -msgid "Member to be added not specified" -msgstr "Eklenecek üye belirtilmemiş" - -msgid "Membership could not be found." -msgstr "Üyelik bulunamadı." - -#, python-format -msgid "" -"Metadata definition namespace %(namespace)s is protected and cannot be " -"deleted." -msgstr "Metadata tanım ad alanı %(namespace)s korumalıdır ve silinemez." - -#, python-format -msgid "Metadata definition namespace not found for id=%s" -msgstr "id=%s için metadata tanım ad alanı bulunamadı" - -#, python-format -msgid "" -"Metadata definition object %(object_name)s is protected and cannot be " -"deleted." -msgstr "Metadata tanım nesnesi %(object_name)s korumalıdır ve silinemez." - -#, python-format -msgid "Metadata definition object not found for id=%s" -msgstr "id=%s için metadata tanım nesnesi bulunamadı" - -#, python-format -msgid "" -"Metadata definition property %(property_name)s is protected and cannot be " -"deleted." -msgstr "Metadata tanım özelliği %(property_name)s korumalıdır ve silinemez." - -#, python-format -msgid "Metadata definition property not found for id=%s" -msgstr "id=%s için metadata tanım özelliği bulunamadı" - -#, python-format -msgid "" -"Metadata definition resource-type %(resource_type_name)s is a seeded-system " -"type and cannot be deleted." -msgstr "" -" %(resource_type_name)s metadata tanım kaynak-türü sınıflanmış bir sistem " -"türüdür ve silinemez." - -#, python-format -msgid "" -"Metadata definition resource-type-association %(resource_type)s is protected " -"and cannot be deleted." -msgstr "" -"Metadata tanım kaynak-tür-ilişkisi %(resource_type)s korumalıdır ve " -"silinemez." - -#, python-format -msgid "" -"Metadata definition tag %(tag_name)s is protected and cannot be deleted." -msgstr "Metadata tanım etiketi %(tag_name)s korumalıdır ve silinemez." - -#, python-format -msgid "Metadata definition tag not found for id=%s" -msgstr "id=%s için metadata tanım etiketi bulunamadı" - -#, python-format -msgid "Missing required credential: %(required)s" -msgstr "Gerekli olan kimlik eksik: %(required)s" - -#, python-format -msgid "" -"Multiple 'image' service matches for region %(region)s. This generally means " -"that a region is required and you have not supplied one." -msgstr "" -"%(region)s bölgesi için birden fazla 'image' servisi eşleşir. Bu genellikle, " -"bir bölgenin gerekli olduğu ve sağlamadığınız anlamına gelir." - -msgid "No authenticated user" -msgstr "Kimlik denetimi yapılmamış kullanıcı" - -#, python-format -msgid "No image found with ID %s" -msgstr "%s bilgileri ile hiçbir imaj bulunamadı" - -#, python-format -msgid "No location found with ID %(loc)s from image %(img)s" -msgstr "%(img)s imajından %(loc)s bilgisi ile hiçbir konum bulunamadı" - -msgid "No permission to share that image" -msgstr "Bu imajı paylaşma izni yok" - -#, python-format -msgid "Not allowed to create members for image %s." -msgstr "%s imajı için üye oluşturulmasına izin verilmedi." - -#, python-format -msgid "Not allowed to deactivate image in status '%s'" -msgstr "'%s' durumundaki imajın etkinliğini kaldırmaya izin verilmez" - -#, python-format -msgid "Not allowed to delete members for image %s." -msgstr "%s imajı için üyelerin silinmesine izin verilmedi." - -#, python-format -msgid "Not allowed to delete tags for image %s." -msgstr "%s imajı için etiketlerin silinmesine izin verilmedi." - -#, python-format -msgid "Not allowed to list members for image %s." -msgstr "%s imajı için üyelerin listelenmesine izin verilmedi." - -#, python-format -msgid "Not allowed to reactivate image in status '%s'" -msgstr "'%s' durumundaki imajı yeniden etkinleştirmeye izin verilmez" - -#, python-format -msgid "Not allowed to update members for image %s." -msgstr "%s imajı için üyelerin güncellenmesine izin verilmedi." - -#, python-format -msgid "Not allowed to update tags for image %s." -msgstr "%s imajı için etiketlerin güncellenmesine izin verilmez." - -#, python-format -msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" -msgstr "" -"%(image_id)s imajı için imaj verisi yüklenmesine izin verilmedi: %(error)s" - -msgid "Number of sort dirs does not match the number of sort keys" -msgstr "" -"Sıralama dizinlerinin sayısı, sıralama anahtarlarının sayısıyla eşleşmez" - -msgid "Old and new sorting syntax cannot be combined" -msgstr "Eski ve yeni sıralama sözdizimi birleştirilemez" - -#, python-format -msgid "Operation \"%s\" requires a member named \"value\"." -msgstr "\"%s\" işlemi \"değer\" olarak adlandırılan bir üye ister." - -msgid "" -"Operation objects must contain exactly one member named \"add\", \"remove\", " -"or \"replace\"." -msgstr "" -"İşlem nesneleri \"ekle\", \"kaldır\" ya da \"değiştir\" olarak adlandırılan " -"tam olarak bir üye içermelidir." - -msgid "" -"Operation objects must contain only one member named \"add\", \"remove\", or " -"\"replace\"." -msgstr "" -"İşlem nesneleri, \"ekle\", \"kaldır\" ya da \"değiştir\" olarak adlandırılan " -"sadece bir üye içermelidir." - -msgid "Operations must be JSON objects." -msgstr "İşlemler JSON nesnesi olmalıdır." - -#, python-format -msgid "Original locations is not empty: %s" -msgstr "Özgün konumlar boş değil: %s" - -msgid "Owner must be specified to create a tag." -msgstr "Etiket oluşturmak için sahibi belirtilmelidir." - -msgid "Owner of the image" -msgstr "İmajın sahibi" - -msgid "Owner of the namespace." -msgstr "Ad alanı sahibi." - -msgid "Param values can't contain 4 byte unicode." -msgstr "Param değerleri 4 bayt unikod içermez." - -#, python-format -msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." -msgstr "" -"`%s` işaretçisi tanınmayan bir vazgeçme dizisinin parçası olmayan \"~\" " -"içerir." - -#, python-format -msgid "Pointer `%s` contains adjacent \"/\"." -msgstr "`%s` işaretçisi bitişik \"/\" içerir." - -#, python-format -msgid "Pointer `%s` does not contains valid token." -msgstr "`%s`işaretçisi geçerli jeton içermez." - -#, python-format -msgid "Pointer `%s` does not start with \"/\"." -msgstr "`%s` işaretçisi \"/\" ile başlamaz." - -#, python-format -msgid "Pointer `%s` end with \"/\"." -msgstr "`%s` işaretçisi \"/\" ile sonlanır." - -#, python-format -msgid "Port \"%s\" is not valid." -msgstr "Bağlantı noktası \"%s\" geçersizdir." - -#, python-format -msgid "Process %d not running" -msgstr "%d süreci çalışmıyor" - -#, python-format -msgid "Properties %s must be set prior to saving data." -msgstr "%s özellikleri veri kaydetmeden önce ayarlanmış olmalıdır." - -#, python-format -msgid "" -"Property %(property_name)s does not start with the expected resource type " -"association prefix of '%(prefix)s'." -msgstr "" -"%(property_name)s özelliği beklenen kaynak tür ilişkilendirme ön eki " -"'%(prefix)s' ile başlamaz." - -#, python-format -msgid "Property %s already present." -msgstr "Özellik %s zaten mevcut." - -#, python-format -msgid "Property %s does not exist." -msgstr "Özellik %s mevcut değil." - -#, python-format -msgid "Property %s may not be removed." -msgstr "Özellik %s kaldırılamayabilir." - -#, python-format -msgid "Property %s must be set prior to saving data." -msgstr "%s özelliği veri kaydetmeden önce ayarlanmış olmalıdır." - -#, python-format -msgid "Property '%s' is protected" -msgstr "'%s' özelliği korumalıdır" - -msgid "Property names can't contain 4 byte unicode." -msgstr "Özellik adları 4 bayt unicode içeremez." - -#, python-format -msgid "" -"Provided image size must match the stored image size. (provided size: " -"%(ps)d, stored size: %(ss)d)" -msgstr "" -"Sağlanan imaj boyutu depolanan imaj boyutu ile eşleşmelidir. (sağlanan " -"boyut: %(ps)d, depolanan boyut: %(ss)d)" - -#, python-format -msgid "Provided object does not match schema '%(schema)s': %(reason)s" -msgstr "Sağlanan nesne '%(schema)s' şeması ile eşleşmez: %(reason)s" - -#, python-format -msgid "Provided status of task is unsupported: %(status)s" -msgstr "Sağlanan görev durumu desteklenmiyor: %(status)s" - -#, python-format -msgid "Provided type of task is unsupported: %(type)s" -msgstr "Sağlanan görev türü desteklenmiyor: %(type)s" - -msgid "Provides a user friendly description of the namespace." -msgstr "Ad alanı için kullanıcı dostu bir açıklama sağlar." - -msgid "Received invalid HTTP redirect." -msgstr "Geçersiz HTTP yeniden yönlendirme isteği alındı." - -#, python-format -msgid "Redirecting to %(uri)s for authorization." -msgstr "Yetkilendirme için %(uri)s adresine yeniden yönlendiriliyor." - -#, python-format -msgid "Registry service can't use %s" -msgstr "Kayıt defteri servisi %s kullanamaz" - -#, python-format -msgid "Registry was not configured correctly on API server. Reason: %(reason)s" -msgstr "" -"Kayıt defteri API sunucusunda doğru bir şekilde yapılandırılamadı. Nedeni: " -"%(reason)s" - -#, python-format -msgid "Reload of %(serv)s not supported" -msgstr "%(serv)s yeniden yükleme desteklenmiyor" - -#, python-format -msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "(%(sig)s) sinyali ile %(serv)s (pid %(pid)s) yeniden yükleniyor" - -#, python-format -msgid "Removing stale pid file %s" -msgstr "Bozuk pid dosyası %s kaldırılıyor" - -msgid "Request body must be a JSON array of operation objects." -msgstr "İstek vücudu işlem nesnelerinin bir JSON dizisi olmalıdır." - -msgid "Request must be a list of commands" -msgstr "İstek komutların bir listesi olmalıdır" - -#, python-format -msgid "Required store %s is invalid" -msgstr "İstenen depo %s geçersizdir" - -msgid "" -"Resource type names should be aligned with Heat resource types whenever " -"possible: http://docs.openstack.org/developer/heat/template_guide/openstack." -"html" -msgstr "" -"Kaynak tür adları her fırsatta, Heat kaynak türleri ile hizalanmalıdır: " -"http://docs.openstack.org/developer/heat/template_guide/openstack.html" - -msgid "Response from Keystone does not contain a Glance endpoint." -msgstr "Keystone yanıtı bir Glance uç noktası içermiyor." - -msgid "Scope of image accessibility" -msgstr "İmaj erişilebilirlik kapsamı" - -msgid "Scope of namespace accessibility." -msgstr "Ad alanı erişebilirlik kapsamı." - -#, python-format -msgid "Server %(serv)s is stopped" -msgstr "Sunucu %(serv)s durdurulur" - -#, python-format -msgid "Server worker creation failed: %(reason)s." -msgstr "Sunucu işçisi oluşturma işlemi başarısız oldu: %(reason)s." - -msgid "" -"Some resource types allow more than one key / value pair per instance. For " -"example, Cinder allows user and image metadata on volumes. Only the image " -"properties metadata is evaluated by Nova (scheduling or drivers). This " -"property allows a namespace target to remove the ambiguity." -msgstr "" -"Bazı kaynak türleri her sunucu başına birden fazla anahtar / değer çiftine " -"izin verir. Örneğin, Cinder mantıksal sürücü üzerinde kullanıcı ve imaj " -"metadatalarına izin verir. Sadece imaj özellikleri metadataları Nova ile " -"değerlendirilir (zamanlama ya da sürücüler). Bu özellik belirsizliği " -"kaldırmak için bir ad alanı hedefine olanak sağlar." - -msgid "Sort direction supplied was not valid." -msgstr "Sağlanan sıralama yönü geçersizdir." - -msgid "Sort key supplied was not valid." -msgstr "Sağlanan sıralama anahtarı geçersizdir." - -msgid "" -"Specifies the prefix to use for the given resource type. Any properties in " -"the namespace should be prefixed with this prefix when being applied to the " -"specified resource type. Must include prefix separator (e.g. a colon :)." -msgstr "" -"Verilen kaynak türü için kullanılacak öneki belirtir. Ad alanındaki her " -"özellik belirtilen kaynak türüne uygulanırken önek eklenmelidir. Önek " -"ayıracı içermelidir (örneğin; :)." - -msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." -msgstr "Durum \"bekliyor\", \"kabul edildi\" ya da \"reddedildi\" olmalıdır." - -msgid "Status not specified" -msgstr "Durum belirtilmemiş" - -#, python-format -msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "" -"%(cur_status)s mevcut durumundan %(new_status)s yeni duruma geçişe izin " -"verilmez" - -#, python-format -msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "(%(sig)s) sinyali ile %(serv)s (pid %(pid)s) durduruluyor" - -#, python-format -msgid "Store for image_id not found: %s" -msgstr "image_id için depo bulunamadı: %s" - -#, python-format -msgid "Store for scheme %s not found" -msgstr "%s şeması için depo bulunamadı" - -#, python-format -msgid "" -"Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " -"(%(actual)s) did not match. Setting image status to 'killed'." -msgstr "" -"Verilen %(attr)s (%(supplied)s) ve yüklenen imajdan (%(actual)s) oluşturulan " -"%(attr)s uyuşmadı. Görüntü durumu ayarlama 'killed'." - -msgid "Supported values for the 'container_format' image attribute" -msgstr "'container_format' imaj özniteliği için desteklenen değerler" - -msgid "Supported values for the 'disk_format' image attribute" -msgstr "'disk_format' imaj özniteliği için desteklenen değerler" - -#, python-format -msgid "Suppressed respawn as %(serv)s was %(rsn)s." -msgstr "%(serv)s olarak yeniden oluşturulması durdurulan, %(rsn)s idi." - -msgid "System SIGHUP signal received." -msgstr "Sistem SIGHUP sinyali aldı." - -#, python-format -msgid "Task '%s' is required" -msgstr "'%s' görevi gereklidir" - -msgid "Task does not exist" -msgstr "Görev mevcut değil" - -msgid "Task failed due to Internal Error" -msgstr "Görev Dahili Hata nedeniyle başarısız oldu" - -msgid "Task was not configured properly" -msgstr "Görev düzgün bir şekilde yapılandırılmadı." - -#, python-format -msgid "Task with the given id %(task_id)s was not found" -msgstr "Verilen %(task_id)s ile görev bulunamadı" - -msgid "The \"changes-since\" filter is no longer available on v2." -msgstr "" -"\"belli bir zamandan sonraki değişiklikler\" süzgeci v2 sürümünde artık " -"mevcut değil." - -#, python-format -msgid "The CA file you specified %s does not exist" -msgstr "Belirtilen %s CA dosyası mevcut değil" - -#, python-format -msgid "" -"The Image %(image_id)s object being created by this task %(task_id)s, is no " -"longer in valid status for further processing." -msgstr "" -"%(task_id)s görevi ile oluşturulan %(image_id)s imaj nesnesi, artık ileri " -"işlem için geçerli durumda değildir." - -msgid "The Store URI was malformed." -msgstr "Depo URI'si bozulmuş." - -msgid "" -"The URL to the keystone service. If \"use_user_token\" is not in effect and " -"using keystone auth, then URL of keystone can be specified." -msgstr "" -"Keystone hizmeti için URL. Eğer \"use_user_token\" yürürlükte değilse ve " -"keystone kimlik doğrulaması kullanılıyorsa, o zaman keystone URL'i " -"belirtilebilir." - -msgid "" -"The administrators password. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"Yönetici parolası. Eğer \"use_user_token\" yürürlükte değilse, o zaman " -"yönetici kimlik bilgileri belirtilebilir." - -msgid "" -"The administrators user name. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "" -"Yönetici kullanıcı adı. Eğer \"use_user_token\" yürürlükte değilse, o zaman " -"yönetici kimlik bilgileri belirtilebilir." - -#, python-format -msgid "The cert file you specified %s does not exist" -msgstr "Belirtilen %s sertifika dosyası mevcut değil" - -msgid "The current status of this task" -msgstr "Görevin şu anki durumu" - -#, python-format -msgid "" -"The device housing the image cache directory %(image_cache_dir)s does not " -"support xattr. It is likely you need to edit your fstab and add the " -"user_xattr option to the appropriate line for the device housing the cache " -"directory." -msgstr "" -"İmaj önbellek dizininin %(image_cache_dir)s yer aldığı aygıt xattr " -"desteklemiyor. Önbellek dizini içeren aygıt için fstab düzenlemeniz ve uygun " -"satıra user_xattr seçeneği eklemeniz gerekebilir." - -#, python-format -msgid "" -"The given uri is not valid. Please specify a valid uri from the following " -"list of supported uri %(supported)s" -msgstr "" -"Verilen uri geçersizdir. Lütfen, desteklenen uri listesinden %(supported)s " -"geçerli bir uri belirtin" - -#, python-format -msgid "The incoming image is too large: %s" -msgstr "Gelen imaj çok büyük: %s" - -#, python-format -msgid "The key file you specified %s does not exist" -msgstr "Belirttiğiniz %s anahtar dosyası mevcut değil" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image locations. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"İzin verilen imaj konumlarının sayı sınırı aşıldı.Denenen: %(attempted)s, " -"Azami: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image members for this " -"image. Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"Bu imaj için izin verilen imaj üye sınırı aşıldı.Denenen: %(attempted)s, En " -"fazla: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"İzin verilen imaj özelliklerinin sayı sınırı aşıldı.Denenen: %(attempted)s, " -"Azami: %(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(num)s, Maximum: %(quota)s" -msgstr "" -"İmaj özelliklerinde izin verilen sınır aşıldı.Denenen: %(num)s, En fazla: " -"%(quota)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image tags. Attempted: " -"%(attempted)s, Maximum: %(maximum)s" -msgstr "" -"İzin verilen imaj etiketlerinin sayı sınırı aşıldı.Denenen: %(attempted)s, " -"Azami: %(maximum)s" - -#, python-format -msgid "The location %(location)s already exists" -msgstr "%(location)s konumu zaten mevcut" - -#, python-format -msgid "The location data has an invalid ID: %d" -msgstr "Konum verisi geçersiz bir kimliğe sahip: %d" - -#, python-format -msgid "" -"The metadata definition %(record_type)s with name=%(record_name)s not " -"deleted. Other records still refer to it." -msgstr "" -"Ad=%(record_name)s ile metadata tanımı %(record_type)s silinebilir değil. " -"Diğer kayıtlar hala onu gösteriyor." - -#, python-format -msgid "The metadata definition namespace=%(namespace_name)s already exists." -msgstr "Metadata tanım ad alanı=%(namespace_name)s zaten mevcut." - -#, python-format -msgid "" -"The metadata definition object with name=%(object_name)s was not found in " -"namespace=%(namespace_name)s." -msgstr "" -"Ad=%(object_name)s ile metadata tanım nesnesi ad alanında=%(namespace_name)s " -"bulunamadı." - -#, python-format -msgid "" -"The metadata definition property with name=%(property_name)s was not found " -"in namespace=%(namespace_name)s." -msgstr "" -"Ad=%(property_name)s ile metadata tanım özelliği ad alanında=" -"%(namespace_name)s bulunamadı." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s already exists." -msgstr "" -"Ad alanına=%(namespace_name)s kaynak türünün=%(resource_type_name)s metadata " -"tanım kaynak tür ilişkisi zaten mevcut." - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s, was not found." -msgstr "" -"Kaynak türünün=%(resource_type_name)s ad alanında=%(namespace_name)s, " -"metadata tanım kaynak-tür ilişkisi bulunamadı." - -#, python-format -msgid "" -"The metadata definition resource-type with name=%(resource_type_name)s, was " -"not found." -msgstr "Ad=%(resource_type_name)s ile metadata tanım kaynak-türü bulunamadı." - -#, python-format -msgid "" -"The metadata definition tag with name=%(name)s was not found in namespace=" -"%(namespace_name)s." -msgstr "" -"Ad=%(name)s ile metadata tanım etiketi ad alanında=%(namespace_name)s " -"bulunamadı." - -msgid "The parameters required by task, JSON blob" -msgstr "JSON blob, görev tarafından istenen parameteler" - -msgid "The provided image is too large." -msgstr "Getirilen imaj çok büyük." - -msgid "" -"The region for the authentication service. If \"use_user_token\" is not in " -"effect and using keystone auth, then region name can be specified." -msgstr "" -"Kimlik doğrulama servisi için bölge. Eğer \"use_user_token\" yürürlükte " -"değilse ve keystone kimlik doğrulaması kullanılıyorsa, bölge adı " -"belirtilebilir." - -msgid "The request returned 500 Internal Server Error." -msgstr "İstek geri 500 İç Sunucu Hatası döndürdü." - -msgid "" -"The request returned 503 Service Unavailable. This generally occurs on " -"service overload or other transient outage." -msgstr "" -"İstek 503 Hizmet Kullanılamıyor kodu döndürdü. Bu genellikle, hizmetin aşırı " -"yük altında olduğu ya da geçici kesintiler oluştuğu anlamına gelir." - -#, python-format -msgid "" -"The request returned a 302 Multiple Choices. This generally means that you " -"have not included a version indicator in a request URI.\n" -"\n" -"The body of response returned:\n" -"%(body)s" -msgstr "" -"İstek 302 Çok Seçenek kodu döndürdü. Bu genellikle, istek URI'sinin bir " -"sürüm göstergesi içermediği anlamına gelir.\n" -"\n" -"Dönen yanıtın gövdesi:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned a 413 Request Entity Too Large. This generally means " -"that rate limiting or a quota threshold was breached.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"İstek 413 Girilen Veri Çok Büyük kodu döndürdü. Bu genellikle, hız " -"sınırlayıcı ya da kota eşiği ihlali anlamına gelir.\n" -"\n" -"Yanıt gövdesi:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned an unexpected status: %(status)s.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"İstek beklenmeyen bir durum döndürdü: %(status)s.\n" -"\n" -"Yanıt:\n" -"%(body)s" - -msgid "" -"The requested image has been deactivated. Image data download is forbidden." -msgstr "İstenen imaj devrede değil. İmaj verisi indirmek yasak." - -msgid "The result of current task, JSON blob" -msgstr "Şu anki görevin sonucu, JSON blob" - -#, python-format -msgid "" -"The size of the data %(image_size)s will exceed the limit. %(remaining)s " -"bytes remaining." -msgstr "%(image_size)s veri boyutu sınırı aşacak. Kalan bayt %(remaining)s " - -#, python-format -msgid "The specified member %s could not be found" -msgstr "Belirtilen üye %s bulunamadı" - -#, python-format -msgid "The specified metadata object %s could not be found" -msgstr "Belirtilen metadata nesnesi %s bulunamadı" - -#, python-format -msgid "The specified metadata tag %s could not be found" -msgstr "Belirtilen metadata etiketi %s bulunamadı" - -#, python-format -msgid "The specified namespace %s could not be found" -msgstr "Belirtilen ad alanı %s bulunamadı" - -#, python-format -msgid "The specified property %s could not be found" -msgstr "Belirtilen özellik %s bulunamadı" - -#, python-format -msgid "The specified resource type %s could not be found " -msgstr "Belirtilen kaynak türü %s bulunamadı " - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'" -msgstr "" -"Silinen imaj konumunun durumu sadece 'pending_delete' ya da 'deleted' olarak " -"ayarlanabilir" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'." -msgstr "" -"Silinen imaj konum durumu sadece 'pending_delete' ya da 'deleted' olarak " -"ayarlanabilir." - -msgid "The status of this image member" -msgstr "Bu imaj üyesinin durumu" - -msgid "" -"The strategy to use for authentication. If \"use_user_token\" is not in " -"effect, then auth strategy can be specified." -msgstr "" -"Kimlik doğrulama için kullanılacak strateji. Eğer \"use_user_token\" " -"yürürlükte değilse, o zaman kimlik doğrulama stratejisi belirtilebilir." - -#, python-format -msgid "" -"The target member %(member_id)s is already associated with image " -"%(image_id)s." -msgstr "" -"Hedef üye %(member_id)s, %(image_id)s imajı ile zaten ilişkilendirilmiştir." - -msgid "" -"The tenant name of the administrative user. If \"use_user_token\" is not in " -"effect, then admin tenant name can be specified." -msgstr "" -"İdari kullaıcının kiracı adı. Eğer \"use_user_token\" yürürlükte değilse, o " -"zaman yönetici kiracı adı belirtilebilir." - -msgid "The type of task represented by this content" -msgstr "Bu içerik ile sunulan görev türü" - -msgid "The unique namespace text." -msgstr "Eşsiz ad alanı metni." - -msgid "The user friendly name for the namespace. Used by UI if available." -msgstr "" -"Kullanıcı dostu ad alanı adı. Eğer mevcut ise, kullanıcı arayüzü tarafından " -"kullanılır." - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. Error: %(ioe)s" -msgstr "" -"%(error_key_name)s %(error_filename)s ile ilgili bir sorun var. Lütfen " -"doğrulayın. Hata: %(ioe)s" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. OpenSSL error: %(ce)s" -msgstr "" -"%(error_key_name)s %(error_filename)s ile ilgili bir sorun var. Lütfen " -"doğrulayın. OpenSSL hatası: %(ce)s" - -#, python-format -msgid "" -"There is a problem with your key pair. Please verify that cert " -"%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" -msgstr "" -"Anahtar çiftiniz ile ilgili bir sorun var. Lütfen sertifika %(cert_file)s " -"ve anahtarın %(key_file)s birbirine ait olduğunu doğrulayın. OpenSSL hatası " -"%(ce)s" - -msgid "There was an error configuring the client." -msgstr "İstemci yapılandırılırken bir hata meydana geldi." - -msgid "There was an error connecting to a server" -msgstr "Sunucuya bağlanırken bir hata meydana geldi" - -msgid "" -"This operation is currently not permitted on Glance Tasks. They are auto " -"deleted after reaching the time based on their expires_at property." -msgstr "" -"Şu anda Glance Görevleri üzerinde bu işleme izin verilmiyor. Onlar " -"expires_at özellikliğine göre süreleri dolduktan sonra otomatik silinirler." - -msgid "This operation is currently not permitted on Glance images details." -msgstr "Bu işleme şu anda Glance imaj ayrıntılarında izin verilmez." - -msgid "" -"Time in hours for which a task lives after, either succeeding or failing" -msgstr "" -"Bir görevin başarılı ya da başarısız olarak sonuçlanmasından sonra saat " -"olarak yaşayacağı süre" - -msgid "Too few arguments." -msgstr "Çok fazla değişken." - -msgid "" -"URI cannot contain more than one occurrence of a scheme.If you have " -"specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " -"you need to change it to use the swift+http:// scheme, like so: swift+http://" -"user:pass@authurl.com/v1/container/obj" -msgstr "" -"URI bir şemanın birden fazla olayını içeremez. Eğer URI'yi swift://user:" -"pass@http://authurl.com/v1/container/obj gibi belirttiyseniz, swift+http:// " -"şemasını kullanmak için onu değiştirmeniz gerekir, şu şekilde: swift+http://" -"user:pass@authurl.com/v1/container/obj" - -#, python-format -msgid "" -"Unable to create pid file %(pid)s. Running as non-root?\n" -"Falling back to a temp file, you can stop %(service)s service using:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" -msgstr "" -"Pid dosyası %(pid)s oluşturulamadı. Root olmadan çalıştırılsın mı?\n" -"Geçici bir dosyaya geri düşüyor, şu komutları kullanarak %(service)s " -"servisini durdurabilirsiniz:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" - -msgid "Unable to filter on a range with a non-numeric value." -msgstr "Sayısal olmayan değer ile bir aralıkta süzme yapılamadı." - -msgid "Unable to filter using the specified range." -msgstr "Belirtilen aralık kullanılarak süzme yapılamadı." - -#, python-format -msgid "Unable to find '%s' in JSON Schema change" -msgstr "JSON Şema değişikliğinde '%s' bulunamadı" - -#, python-format -msgid "" -"Unable to find `op` in JSON Schema change. It must be one of the following: " -"%(available)s." -msgstr "" -"JSON Şema değişikliğinde `op` bulunamadı. Şu seçeneklerden biri olmalıdır: " -"%(available)s." - -msgid "Unable to increase file descriptor limit. Running as non-root?" -msgstr "Dosya tanıtıcı sınır arttırılamadı. Root olmadan çalıştırılsın mı?" - -#, python-format -msgid "" -"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" -"Got: %(e)r" -msgstr "" -"%(conf_file)s yapılandırma dosyasından %(app_name)s uygulaması yüklenemedi.\n" -"Alınan: %(e)r" - -#, python-format -msgid "Unable to load schema: %(reason)s" -msgstr "Şema yüklenemedi: %(reason)s" - -#, python-format -msgid "Unable to locate paste config file for %s." -msgstr "%s için yapıştırma yapılandırma dosyası yerleştirilemedi." - -#, python-format -msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" -msgstr "%(image_id)s imajı için çift imaj verisi yüklenemedi: %(error)s" - -msgid "Unauthorized image access" -msgstr "Yetkisiz imaj erişimi" - -#, python-format -msgid "Unexpected response: %s" -msgstr "Beklenmeyen yanıt: %s" - -#, python-format -msgid "Unknown auth strategy '%s'" -msgstr "Bilinmeyen kimlik doğrulama stratejisi '%s'" - -#, python-format -msgid "Unknown command: %s" -msgstr "Bilinmeyen komut: %s" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Bilinmeyen sıralama yönü, 'desc' or 'asc' olmalıdır" - -msgid "Unrecognized JSON Schema draft version" -msgstr "Tanınmayan JSON Şeması taslak sürümü" - -msgid "Unrecognized changes-since value" -msgstr "Belli bir zamandan sonraki tanınmayan değişiklik değeri" - -#, python-format -msgid "Unsupported sort_dir. Acceptable values: %s" -msgstr "Desteklenmeyen sort_dir. Kabul edilen değerler: %s" - -#, python-format -msgid "Unsupported sort_key. Acceptable values: %s" -msgstr "Desteklenmeyen sort_key. Kabul edilen değerler: %s" - -#, python-format -msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" -msgstr "" -"%(pid)s (%(file)s) pid'i öldürmek için 15 saniye beklendi; vazgeçiliyor" - -msgid "" -"When running server in SSL mode, you must specify both a cert_file and " -"key_file option value in your configuration file" -msgstr "" -"Sunucu SSL kipte çalışırken, cert_file ve key_file değerlerinin ikisinide " -"yapılandırma dosyanızda belirtmelisiniz" - -msgid "" -"Whether to pass through the user token when making requests to the registry. " -"To prevent failures with token expiration during big files upload, it is " -"recommended to set this parameter to False.If \"use_user_token\" is not in " -"effect, then admin credentials can be specified." -msgstr "" -"Kayıt defteri sunucusuna istek yaparken kullanıcı jetonunun geçirilip " -"geçirilmemesi. Büyük dosyaların yüklenmesi sırasında jetonun süresinin sona " -"ermesi ile oluşacak hataları engellemek için, bu parametrenin seçilmemiş " -"olarak ayarlanması önerilir. Eğer \"use_user_token\" yürürlükte değilse, o " -"zaman yönetici kimlik bilgileri belirtilebilir." - -#, python-format -msgid "Wrong command structure: %s" -msgstr "Hatalı komut yapısı: %s" - -msgid "You are not authenticated." -msgstr "Kimliğiniz doğrulanamadı." - -msgid "You are not authorized to complete this action." -msgstr "Bu eylemi tamamlamak için yetkili değilsiniz." - -#, python-format -msgid "You are not permitted to create a tag in the namespace owned by '%s'" -msgstr "'%s''ye ait ad alanında bir etiket oluşturma izniniz yok" - -msgid "You are not permitted to create image members for the image." -msgstr "İmaj için üye oluşturma izniniz yok." - -#, python-format -msgid "You are not permitted to create images owned by '%s'." -msgstr "'%s''ye ait imaj oluşturma izniniz yok." - -#, python-format -msgid "You are not permitted to create namespace owned by '%s'" -msgstr "'%s''ye ait ad alanı oluşturma izniniz yok" - -#, python-format -msgid "You are not permitted to create object owned by '%s'" -msgstr "'%s''ye ait nesne oluşturma izniniz yok" - -#, python-format -msgid "You are not permitted to create property owned by '%s'" -msgstr "'%s''ye ait özellik oluşturma izniniz yok." - -#, python-format -msgid "You are not permitted to create resource_type owned by '%s'" -msgstr "'%s''ye ait resource_type oluşturma izniniz yok." - -#, python-format -msgid "You are not permitted to create this task with owner as: %s" -msgstr "Sahibi olarak bu görevi oluşturma izniniz yok: %s" - -msgid "You are not permitted to delete this image." -msgstr "Bu imajı silme izniniz yok." - -msgid "You are not permitted to delete this meta_resource_type." -msgstr "meta_resource_type silme izniniz yok." - -msgid "You are not permitted to delete this namespace." -msgstr "Bu ad alanını silme izniniz yok." - -msgid "You are not permitted to delete this object." -msgstr "Bu nesneyi silme izniniz yok." - -msgid "You are not permitted to delete this property." -msgstr "Bu özelliği silme izniniz yok." - -msgid "You are not permitted to delete this tag." -msgstr "Bu etiketi silme izniniz yok." - -#, python-format -msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." -msgstr "Bu %(resource)s üzerinde '%(attr)s' değiştirme izniniz yok." - -#, python-format -msgid "You are not permitted to modify '%s' on this image." -msgstr "Bu imajda '%s' değiştirme izniniz yok." - -msgid "You are not permitted to modify locations for this image." -msgstr "Bu imajın konumunu değiştirme izniniz yok." - -msgid "You are not permitted to modify tags on this image." -msgstr "Bu imaj üzerindeki etiketleri değiştirme izniniz yok." - -msgid "You are not permitted to modify this image." -msgstr "Bu imajı değiştirme izniniz yok." - -msgid "You are not permitted to set status on this task." -msgstr "Bu görev üzerinde durum ayarlama izniniz yok." - -msgid "You are not permitted to update this namespace." -msgstr "Bu ad alanını güncelleme izniniz yok." - -msgid "You are not permitted to update this object." -msgstr "Bu nesneyi güncelleme izniniz yok." - -msgid "You are not permitted to update this property." -msgstr "Bu özelliği güncelleme izniniz yok." - -msgid "You are not permitted to update this tag." -msgstr "Bu etiketi güncelleme izniniz yok." - -msgid "You are not permitted to upload data for this image." -msgstr "Bu imaj için veri yükleme izniniz yok." - -#, python-format -msgid "You cannot add image member for %s" -msgstr "%s için imaj üyesi ekleyemiyorsunuz" - -#, python-format -msgid "You cannot delete image member for %s" -msgstr "%s için imaj üyesini silemiyorsunuz" - -#, python-format -msgid "You cannot get image member for %s" -msgstr "%s için imaj üyesini alamıyorsunuz" - -#, python-format -msgid "You cannot update image member %s" -msgstr "%s imaj üyesini güncelleyemiyorsunuz" - -msgid "You do not own this image" -msgstr "Bu imajın sahibi değilsiniz" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a cert, " -"however you have failed to supply either a key_file parameter or set the " -"GLANCE_CLIENT_KEY_FILE environ variable" -msgstr "" -"Bağlanırken SSL kullanmayı seçtiniz ve bir sertifika sağladınız, ancak ya " -"key_file parametresi sağlamayı ya da GLANCE_CLIENT_KEY_FILE değişkeni " -"ayarlama işlemini başaramadınız." - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a key, " -"however you have failed to supply either a cert_file parameter or set the " -"GLANCE_CLIENT_CERT_FILE environ variable" -msgstr "" -"Bağlanırken SSL kullanmayı seçtiniz ve bir anahtar sağladınız, ancak ya " -"cert_file parametresi sağlamayı ya da GLANCE_CLIENT_CERT_FILE değişkeni " -"ayarlama işlemini başaramadınız." - -msgid "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" -msgstr "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" - -#, python-format -msgid "__init__() got unexpected keyword argument '%s'" -msgstr "__init__() beklenmeyen anahtar sözcük değişkeni '%s' aldı" - -#, python-format -msgid "" -"cannot transition from %(current)s to %(next)s in update (wanted from_state=" -"%(from)s)" -msgstr "" -"güncellemede (istenen from_state=%(from)s), %(current)s mevcut durumundan " -"%(next)s sonrakine geçiş olamaz " - -#, python-format -msgid "custom properties (%(props)s) conflict with base properties" -msgstr "özel özellikler (%(props)s) temel özellikler ile çatışır" - -msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" -msgstr "" -"bu platformda eventlet 'poll' ya da 'selects' havuzları kullanılabilirdir" - -msgid "is_public must be None, True, or False" -msgstr "is_public Hiçbiri, Doğru ya da Yanlış olmalıdır" - -msgid "limit param must be an integer" -msgstr "Sınır parametresi tam sayı olmak zorunda" - -msgid "limit param must be positive" -msgstr "Sınır parametresi pozitif olmak zorunda" - -#, python-format -msgid "new_image() got unexpected keywords %s" -msgstr "new_image() beklenmeyen anahtar sözcük %s aldı" - -msgid "protected must be True, or False" -msgstr "korumalı Doğru ya da Yanlış olmalıdır" - -#, python-format -msgid "unable to launch %(serv)s. Got error: %(e)s" -msgstr "%(serv)s başlatılamadı. Alınan hata: %(e)s" diff --git a/glance/locale/zh_CN/LC_MESSAGES/glance.po b/glance/locale/zh_CN/LC_MESSAGES/glance.po deleted file mode 100644 index 49508c4a..00000000 --- a/glance/locale/zh_CN/LC_MESSAGES/glance.po +++ /dev/null @@ -1,2026 +0,0 @@ -# Translations template for glance. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the glance project. -# -# Translators: -# blkart , 2015 -# Dongliang Yu , 2013 -# Kecheng Bi , 2014 -# Tom Fifield , 2013 -# 颜海峰 , 2014 -# Andreas Jaeger , 2016. #zanata -# howard lee , 2016. #zanata -# blkart , 2017. #zanata -msgid "" -msgstr "" -"Project-Id-Version: glance 15.0.0.0b3.dev29\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-06-23 20:54+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2017-06-24 04:45+0000\n" -"Last-Translator: blkart \n" -"Language: zh-CN\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: Chinese (China)\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "%(cls)s exception was raised in the last rpc call: %(val)s" -msgstr "最后一个 RPC 调用中发生 %(cls)s 异常:%(val)s" - -#, python-format -msgid "%(m_id)s not found in the member list of the image %(i_id)s." -msgstr "在映像 %(i_id)s 的成员列表中找不到 %(m_id)s。" - -#, python-format -msgid "%(serv)s (pid %(pid)s) is running..." -msgstr "%(serv)s (pid %(pid)s) 正在运行..." - -#, python-format -msgid "%(serv)s appears to already be running: %(pid)s" -msgstr "%(serv)s 似乎已在运行:%(pid)s" - -#, python-format -msgid "" -"%(strategy)s is registered as a module twice. %(module)s is not being used." -msgstr "已两次将 %(strategy)s 注册为模块。未在使用 %(module)s。" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Could not load the " -"filesystem store" -msgstr "%(task_id)s(类型为 %(task_type)s)未正确配置。未能装入文件系统存储器" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Missing work dir: " -"%(work_dir)s" -msgstr "" -"%(task_id)s(类型为 %(task_type)s)未正确配置。缺少工作目录:%(work_dir)s" - -#, python-format -msgid "%(verb)sing %(serv)s" -msgstr "正在%(verb)s %(serv)s" - -#, python-format -msgid "%(verb)sing %(serv)s with %(conf)s" -msgstr "正在%(verb)s %(serv)s(借助 %(conf)s)" - -#, python-format -msgid "" -"%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " -"address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " -"separately from the port (i.e., \"[fe80::a:b:c]:9876\")." -msgstr "" -"%s 请指定 host:port 对,其中 host 是 IPv4 地址、IPv6 地址、主机名或 FQDN。如" -"果使用 IPv6 地址,请将其括在方括号中并与端口隔开(即,“[fe80::a:b:" -"c]:9876”)。" - -#, python-format -msgid "%s can't contain 4 byte unicode characters." -msgstr "%s 不能包含 4 字节 Unicode 字符。" - -#, python-format -msgid "%s is already stopped" -msgstr "%s 已停止" - -#, python-format -msgid "%s is stopped" -msgstr "%s 已停止" - -msgid "" -"--os_auth_url option or OS_AUTH_URL environment variable required when " -"keystone authentication strategy is enabled\n" -msgstr "" -"当启用了 keystone 认证策略时,需要 --os_auth_url 选项或 OS_AUTH_URL 环境变" -"量\n" - -msgid "A body is not expected with this request." -msgstr "此请求不应有主体。" - -#, python-format -msgid "" -"A metadata definition object with name=%(object_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"在名称空间 %(namespace_name)s 中,已存在名称为 %(object_name)s 的元数据定义对" -"象。" - -#, python-format -msgid "" -"A metadata definition property with name=%(property_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"在名称空间 %(namespace_name)s 中,已存在名称为 %(property_name)s 的元数据定义" -"属性。" - -#, python-format -msgid "" -"A metadata definition resource-type with name=%(resource_type_name)s already " -"exists." -msgstr "已存在名称为 %(resource_type_name)s 的元数据定义资源类型。" - -msgid "A set of URLs to access the image file kept in external store" -msgstr "用于访问外部存储器中保留的映像文件的 URL集合" - -msgid "Amount of disk space (in GB) required to boot image." -msgstr "引导映像所需的磁盘空间量(以 GB 计)。" - -msgid "Amount of ram (in MB) required to boot image." -msgstr "引导映像所需的 ram 量(以 MB 计)。" - -msgid "An identifier for the image" -msgstr "映像的标识" - -msgid "An identifier for the image member (tenantId)" -msgstr "映像成员的标识 (tenantId)" - -msgid "An identifier for the owner of this task" -msgstr "此任务的所有者的标识" - -msgid "An identifier for the task" -msgstr "任务的标识" - -msgid "An image file url" -msgstr "映像文件的 URL" - -msgid "An image schema url" -msgstr "映像模式的 URL" - -msgid "An image self url" -msgstr "映像本身的 URL" - -#, python-format -msgid "An image with identifier %s already exists" -msgstr "具有标识 %s 的映像已存在" - -msgid "An import task exception occurred" -msgstr "发生了导入任务异常。" - -msgid "An object with the same identifier already exists." -msgstr "具有同一标识的对象已存在。" - -msgid "An object with the same identifier is currently being operated on." -msgstr "当前正在对具有同一标识的对象进行操作。" - -msgid "An object with the specified identifier was not found." -msgstr "找不到具有指定标识的对象。" - -msgid "An unknown exception occurred" -msgstr "发生未知异常" - -msgid "An unknown task exception occurred" -msgstr "发生未知任务异常" - -#, python-format -msgid "Attempt to upload duplicate image: %s" -msgstr "请尝试上载重复映像:%s" - -msgid "Attempted to update Location field for an image not in queued status." -msgstr "已尝试更新处于未排队状态的映像的“位置”字段。" - -#, python-format -msgid "Attribute '%(property)s' is read-only." -msgstr "属性“%(property)s”是只读的。" - -#, python-format -msgid "Attribute '%(property)s' is reserved." -msgstr "属性“%(property)s”已保留。" - -#, python-format -msgid "Attribute '%s' is read-only." -msgstr "属性“%s”是只读的。" - -#, python-format -msgid "Attribute '%s' is reserved." -msgstr "属性“%s”已保留。" - -msgid "Attribute container_format can be only replaced for a queued image." -msgstr "只能为已排队的映像替换属性 container_format。" - -msgid "Attribute disk_format can be only replaced for a queued image." -msgstr "只能为已排队的映像替换属性 disk_format。" - -#, python-format -msgid "Auth service at URL %(url)s not found." -msgstr "找不到 URL %(url)s 处的授权服务。" - -#, python-format -msgid "" -"Authentication error - the token may have expired during file upload. " -"Deleting image data for %s." -msgstr "认证错误 - 文件上传期间此令牌可能已到期。正在删除 %s 的映像数据。" - -msgid "Authorization failed." -msgstr "授权失败。" - -msgid "Available categories:" -msgstr "可用的类别:" - -#, python-format -msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." -msgstr "无效“%s”查询过滤器格式。请使用 ISO 8601 日期时间注释。" - -#, python-format -msgid "Bad Command: %s" -msgstr "命令 %s 不正确" - -#, python-format -msgid "Bad header: %(header_name)s" -msgstr "头 %(header_name)s 不正确" - -#, python-format -msgid "Bad value passed to filter %(filter)s got %(val)s" -msgstr "传递至过滤器 %(filter)s 的值不正确,已获取 %(val)s" - -#, python-format -msgid "Badly formed S3 URI: %(uri)s" -msgstr "S3 URI %(uri)s 的格式不正确" - -#, python-format -msgid "Badly formed credentials '%(creds)s' in Swift URI" -msgstr "Swift URI 中凭证“%(creds)s”的格式不正确" - -msgid "Badly formed credentials in Swift URI." -msgstr "Swift URI 中凭证的格式不正确。" - -msgid "Body expected in request." -msgstr "请求中需要主体。" - -msgid "Cannot be a negative value" -msgstr "不能为负值" - -msgid "Cannot be a negative value." -msgstr "不得为负值。" - -#, python-format -msgid "Cannot convert image %(key)s '%(value)s' to an integer." -msgstr "无法将映像 %(key)s“%(value)s”转换为整数。" - -msgid "Cannot remove last location in the image." -msgstr "不能移除映像中的最后一个位置。" - -#, python-format -msgid "Cannot save data for image %(image_id)s: %(error)s" -msgstr "无法为镜像%(image_id)s保存数据: %(error)s" - -msgid "Cannot set locations to empty list." -msgstr "不能将位置设置为空列表。" - -msgid "Cannot upload to an unqueued image" -msgstr "无法上载至未排队的映像" - -#, python-format -msgid "Checksum verification failed. Aborted caching of image '%s'." -msgstr "校验和验证失败。已异常中止映像“%s”的高速缓存。" - -msgid "Client disconnected before sending all data to backend" -msgstr "客户端在发送所有数据到后端时断开了连接" - -msgid "Command not found" -msgstr "找不到命令" - -msgid "Configuration option was not valid" -msgstr "配置选项无效" - -#, python-format -msgid "Connect error/bad request to Auth service at URL %(url)s." -msgstr "发生连接错误,或者对 URL %(url)s 处的授权服务的请求不正确。" - -#, python-format -msgid "Constructed URL: %s" -msgstr "已构造 URL:%s" - -msgid "Container format is not specified." -msgstr "未指定容器格式。" - -msgid "Content-Type must be application/octet-stream" -msgstr "Content-Type 必须是 application/octet-stream" - -#, python-format -msgid "Corrupt image download for image %(image_id)s" -msgstr "对于映像 %(image_id)s,映像下载已损坏" - -#, python-format -msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" -msgstr "在尝试时间达到 30 秒之后未能绑定至 %(host)s:%(port)s" - -msgid "Could not find OVF file in OVA archive file." -msgstr "在 OVA 归档文件中找不到 OVF 文件。" - -#, python-format -msgid "Could not find metadata object %s" -msgstr "找不到元数据对象 %s" - -#, python-format -msgid "Could not find metadata tag %s" -msgstr "找不到元数据标记 %s" - -#, python-format -msgid "Could not find namespace %s" -msgstr "找不到名称空间 %s" - -#, python-format -msgid "Could not find property %s" -msgstr "找不到属性 %s" - -msgid "Could not find required configuration option" -msgstr "找不到必需的配置选项" - -#, python-format -msgid "Could not find task %s" -msgstr "找不到任务 %s" - -#, python-format -msgid "Could not update image: %s" -msgstr "未能更新映像:%s" - -#, python-format -msgid "Couldn't create metadata namespace: %s" -msgstr "无法创建元数据命名空间:%s" - -#, python-format -msgid "Couldn't create metadata object: %s" -msgstr "无法创建元数据对象:%s" - -#, python-format -msgid "Couldn't create metadata property: %s" -msgstr "无法创建元数据属性:%s" - -#, python-format -msgid "Couldn't create metadata tag: %s" -msgstr "无法创建元数据标签:%s" - -#, python-format -msgid "Couldn't update metadata namespace: %s" -msgstr "无法更新元数据命名空间:%s" - -#, python-format -msgid "Couldn't update metadata object: %s" -msgstr "无法更新元数据对象:%s" - -#, python-format -msgid "Couldn't update metadata property: %s" -msgstr "无法更新元数据属性:%s" - -#, python-format -msgid "Couldn't update metadata tag: %s" -msgstr "无法更新元数据标签:%s" - -msgid "Currently, OVA packages containing multiple disk are not supported." -msgstr "当前包含多个磁盘的 OVA 包不受支持。" - -#, python-format -msgid "Data for image_id not found: %s" -msgstr "找不到 image_id 的数据:%s" - -msgid "Data supplied was not valid." -msgstr "提供的数据无效。" - -msgid "Date and time of image member creation" -msgstr "创建映像成员的日期和时间" - -msgid "Date and time of image registration" -msgstr "注册映像的日期和时间" - -msgid "Date and time of last modification of image member" -msgstr "最近一次修改映像成员的日期和时间" - -msgid "Date and time of namespace creation" -msgstr "创建名称空间的日期和时间" - -msgid "Date and time of object creation" -msgstr "创建对象的日期和时间" - -msgid "Date and time of resource type association" -msgstr "关联资源类型的日期和时间" - -msgid "Date and time of tag creation" -msgstr "创建标记的日期和时间" - -msgid "Date and time of the last image modification" -msgstr "最近一次修改映像的日期和时间" - -msgid "Date and time of the last namespace modification" -msgstr "最近一次修改名称空间的日期和时间" - -msgid "Date and time of the last object modification" -msgstr "最近一次修改对象的日期和时间" - -msgid "Date and time of the last resource type association modification" -msgstr "最近一次修改资源类型关联的日期和时间" - -msgid "Date and time of the last tag modification" -msgstr "最近一次修改标记的日期和时间" - -msgid "Datetime when this resource was created" -msgstr "此资源的创建日期时间" - -msgid "Datetime when this resource was updated" -msgstr "此资源的更新日期时间" - -msgid "Datetime when this resource would be subject to removal" -msgstr "将会移除此资源的日期时间" - -#, python-format -msgid "Denying attempt to upload image because it exceeds the quota: %s" -msgstr "正在拒绝尝试上载映像,因为它超过配额:%s" - -#, python-format -msgid "Denying attempt to upload image larger than %d bytes." -msgstr "正在拒绝尝试上载大小超过 %d 字节的映像。" - -msgid "Descriptive name for the image" -msgstr "映像的描述性名称" - -msgid "Disk format is not specified." -msgstr "未指定磁盘格式。" - -#, python-format -msgid "" -"Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" -msgstr "未能正确配置驱动程序 %(driver_name)s。原因:%(reason)s" - -msgid "" -"Error decoding your request. Either the URL or the request body contained " -"characters that could not be decoded by Glance" -msgstr "对请求解码时出错。Glance 无法对 URL 或请求主体包含的字符进行解码。" - -#, python-format -msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" -msgstr "访存映像 %(image_id)s 的成员时出错:%(inner_msg)s" - -msgid "Error in store configuration. Adding images to store is disabled." -msgstr "存储配置中出错。已禁止将映像添加至存储器。" - -#, python-format -msgid "Error: %(exc_type)s: %(e)s" -msgstr "错误: %(exc_type)s: %(e)s" - -msgid "Expected a member in the form: {\"member\": \"image_id\"}" -msgstr "成员应为以下格式:{\"member\": \"image_id\"}" - -msgid "Expected a status in the form: {\"status\": \"status\"}" -msgstr "状态应为以下格式:{\"status\": \"status\"}" - -msgid "External source should not be empty" -msgstr "外部源不应为空。" - -#, python-format -msgid "External sources are not supported: '%s'" -msgstr "外部源不受支持:“%s”" - -#, python-format -msgid "Failed to activate image. Got error: %s" -msgstr "未能激活映像。发生错误:%s" - -#, python-format -msgid "Failed to add image metadata. Got error: %s" -msgstr "未能添加映像元数据。发生错误:%s" - -#, python-format -msgid "Failed to find image %(image_id)s to delete" -msgstr "未能找到要删除的映像 %(image_id)s" - -#, python-format -msgid "Failed to find image to delete: %s" -msgstr "未能找到要删除的映像:%s" - -#, python-format -msgid "Failed to find image to update: %s" -msgstr "找不到要更新的映像:%s" - -#, python-format -msgid "Failed to find resource type %(resourcetype)s to delete" -msgstr "找不到要删除的资源类型 %(resourcetype)s" - -#, python-format -msgid "Failed to initialize the image cache database. Got error: %s" -msgstr "未能初始化映像高速缓存数据库。发生错误:%s" - -#, python-format -msgid "Failed to read %s from config" -msgstr "未能从配置读取 %s" - -#, python-format -msgid "Failed to reserve image. Got error: %s" -msgstr "未能保留映像。发生错误:%s" - -#, python-format -msgid "Failed to update image metadata. Got error: %s" -msgstr "未能更新映像元数据。发生错误:%s" - -#, python-format -msgid "Failed to upload image %s" -msgstr "上传镜像 %s失败" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to HTTP error: " -"%(error)s" -msgstr "由于 HTTP 错误,未能上载映像 %(image_id)s 的映像数据:%(error)s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to internal error: " -"%(error)s" -msgstr "由于内部错误,未能上载映像 %(image_id)s 的映像数据:%(error)s" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "文件 %(path)s 具有无效支持文件 %(bfile)s,正在异常中止。" - -msgid "" -"File based imports are not allowed. Please use a non-local source of image " -"data." -msgstr "不允许基于文件的导入。请使用映像数据的非本地源。" - -msgid "Forbidden image access" -msgstr "禁止访问映像" - -#, python-format -msgid "Forbidden to delete a %s image." -msgstr "已禁止对映像%s进行删除。" - -#, python-format -msgid "Forbidden to delete image: %s" -msgstr "已禁止删除映像:%s" - -#, python-format -msgid "Forbidden to modify '%(key)s' of %(status)s image." -msgstr "禁止修改 %(status)s 映像的“%(key)s”" - -#, python-format -msgid "Forbidden to modify '%s' of image." -msgstr "已禁止修改映像的“%s”。" - -msgid "Forbidden to reserve image." -msgstr "已禁止保留映像。" - -msgid "Forbidden to update deleted image." -msgstr "已禁止更新删除的映像。" - -#, python-format -msgid "Forbidden to update image: %s" -msgstr "已禁止更新映像:%s" - -#, python-format -msgid "Forbidden upload attempt: %s" -msgstr "已禁止进行上载尝试:%s" - -#, python-format -msgid "Forbidding request, metadata definition namespace=%s is not visible." -msgstr "正在禁止请求,元数据定义名称空间 %s 不可视。" - -#, python-format -msgid "Forbidding request, task %s is not visible" -msgstr "正在禁止请求,任务 %s 不可视" - -msgid "Format of the container" -msgstr "容器的格式" - -msgid "Format of the disk" -msgstr "磁盘格式" - -#, python-format -msgid "Host \"%s\" is not valid." -msgstr "主机“%s”无效。" - -#, python-format -msgid "Host and port \"%s\" is not valid." -msgstr "主机和端口“%s”无效。" - -msgid "" -"Human-readable informative message only included when appropriate (usually " -"on failure)" -msgstr "人工可读的信息性消息,仅在适当时(通常在发生故障时)才包括" - -msgid "If true, image will not be deletable." -msgstr "如果为 true,那么映像将不可删除。" - -msgid "If true, namespace will not be deletable." -msgstr "如果为 true,那么名称空间将不可删除。" - -#, python-format -msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" -msgstr "映像 %(id)s 未能删除,因为它正在使用中:%(exc)s" - -#, python-format -msgid "Image %(id)s not found" -msgstr "找不到映像 %(id)s" - -#, python-format -msgid "" -"Image %(image_id)s could not be found after upload. The image may have been " -"deleted during the upload: %(error)s" -msgstr "镜像%(image_id)s上传后无法找到。镜像在上传过程中可能被删除: %(error)s" - -#, python-format -msgid "Image %(image_id)s is protected and cannot be deleted." -msgstr "映像 %(image_id)s 受保护,无法删除。" - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload, cleaning up the chunks uploaded." -msgstr "" -"在上载之后,找不到映像 %s。可能已在上载期间删除该映像,正在清除已上载的区块。" - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload." -msgstr "上传后找不到映像 %s。此映像可能已在上传期间删除。" - -#, python-format -msgid "Image %s is deactivated" -msgstr "映像 %s 已取消激活" - -#, python-format -msgid "Image %s is not active" -msgstr "映像 %s 处于不活动状态" - -#, python-format -msgid "Image %s not found." -msgstr "找不到映像 %s " - -#, python-format -msgid "Image exceeds the storage quota: %s" -msgstr "镜像超出存储限额: %s" - -msgid "Image id is required." -msgstr "需要映像标识。" - -msgid "Image is protected" -msgstr "映像受保护" - -#, python-format -msgid "Image member limit exceeded for image %(id)s: %(e)s:" -msgstr "对于映像 %(id)s,超过映像成员限制:%(e)s:" - -#, python-format -msgid "Image name too long: %d" -msgstr "映像名称太长:%d" - -msgid "Image operation conflicts" -msgstr "映像操作发生冲突" - -#, python-format -msgid "" -"Image status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "不允许映像状态从 %(cur_status)s 转变为 %(new_status)s" - -#, python-format -msgid "Image storage media is full: %s" -msgstr "映像存储介质已满:%s" - -#, python-format -msgid "Image tag limit exceeded for image %(id)s: %(e)s:" -msgstr "对于映像 %(id)s,超过映像标记限制:%(e)s:" - -#, python-format -msgid "Image upload problem: %s" -msgstr "发生映像上载问题:%s" - -#, python-format -msgid "Image with identifier %s already exists!" -msgstr "具有标识 %s 的映像已存在!" - -#, python-format -msgid "Image with identifier %s has been deleted." -msgstr "已删除具有标识 %s 的映像。" - -#, python-format -msgid "Image with identifier %s not found" -msgstr "找不到具有标识 %s 的映像" - -#, python-format -msgid "Image with the given id %(image_id)s was not found" -msgstr "找不到具有所给定标识 %(image_id)s 的映像" - -#, python-format -msgid "" -"Incorrect auth strategy, expected \"%(expected)s\" but received " -"\"%(received)s\"" -msgstr "授权策略不正确,期望的是“%(expected)s”,但接收到的是“%(received)s”" - -#, python-format -msgid "Incorrect request: %s" -msgstr "以下请求不正确:%s" - -#, python-format -msgid "Input does not contain '%(key)s' field" -msgstr "输入没有包含“%(key)s”字段" - -#, python-format -msgid "Insufficient permissions on image storage media: %s" -msgstr "对映像存储介质的许可权不足:%s" - -#, python-format -msgid "Invalid JSON pointer for this resource: '/%s'" -msgstr "这个资源无效的JSON指针: '/%s'" - -#, python-format -msgid "Invalid checksum '%s': can't exceed 32 characters" -msgstr "校验和“%s”无效:不得超过 32 个字符" - -msgid "Invalid configuration in glance-swift conf file." -msgstr "glance-swift 配置文件中的配置无效。" - -msgid "Invalid configuration in property protection file." -msgstr "属性保护文件中的配置无效。" - -#, python-format -msgid "Invalid container format '%s' for image." -msgstr "对于映像,容器格式“%s”无效。" - -#, python-format -msgid "Invalid content type %(content_type)s" -msgstr "内容类型 %(content_type)s 无效" - -#, python-format -msgid "Invalid disk format '%s' for image." -msgstr "对于映像,磁盘格式“%s”无效。" - -#, python-format -msgid "Invalid filter value %s. The quote is not closed." -msgstr "无效过滤器值 %s。缺少右引号。" - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma after closing quotation mark." -msgstr "无效过滤器值 %s。右引号之后没有逗号。" - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma before opening quotation mark." -msgstr "无效过滤器值 %s。左引号之前没有逗号。" - -msgid "Invalid image id format" -msgstr "映像标识格式无效" - -#, python-format -msgid "Invalid int value for age_in_days: %(age_in_days)s" -msgstr "age_in_days的无效整形值:%(age_in_days)s" - -#, python-format -msgid "Invalid int value for max_rows: %(max_rows)s" -msgstr "max_rows的无效整形值:%(max_rows)s" - -msgid "Invalid location" -msgstr "无效的位置" - -#, python-format -msgid "Invalid location %s" -msgstr "位置 %s 无效" - -#, python-format -msgid "Invalid location: %s" -msgstr "以下位置无效:%s" - -#, python-format -msgid "" -"Invalid location_strategy option: %(name)s. The valid strategy option(s) " -"is(are): %(strategies)s" -msgstr "location_strategy 选项 %(name)s 无效。有效策略选项如下:%(strategies)s" - -msgid "Invalid locations" -msgstr "无效的位置" - -#, python-format -msgid "Invalid locations: %s" -msgstr "无效的位置:%s" - -msgid "Invalid marker format" -msgstr "标记符格式无效" - -msgid "Invalid marker. Image could not be found." -msgstr "标记符无效。找不到映像。" - -#, python-format -msgid "Invalid membership association: %s" -msgstr "成员资格关联无效:%s" - -msgid "" -"Invalid mix of disk and container formats. When setting a disk or container " -"format to one of 'aki', 'ari', or 'ami', the container and disk formats must " -"match." -msgstr "" -"磁盘格式与容器格式的混合无效。将磁盘格式或容器格式设置" -"为“aki”、“ari”或“ami”时,容器格式与磁盘格式必须匹配。" - -#, python-format -msgid "" -"Invalid operation: `%(op)s`. It must be one of the following: %(available)s." -msgstr "操作“%(op)s”无效。它必须是下列其中一项:%(available)s。" - -msgid "Invalid position for adding a location." -msgstr "用于添加位置 (location) 的位置 (position) 无效。" - -msgid "Invalid position for removing a location." -msgstr "用于移除位置 (location) 的位置 (position) 无效。" - -msgid "Invalid service catalog json." -msgstr "服务目录 json 无效。" - -#, python-format -msgid "Invalid sort direction: %s" -msgstr "排序方向无效:%s" - -#, python-format -msgid "" -"Invalid sort key: %(sort_key)s. It must be one of the following: " -"%(available)s." -msgstr "以下排序键无效:%(sort_key)s。它必须是下列其中一项:%(available)s。" - -#, python-format -msgid "Invalid status value: %s" -msgstr "状态值 %s 无效" - -#, python-format -msgid "Invalid status: %s" -msgstr "状态无效:%s" - -#, python-format -msgid "Invalid time format for %s." -msgstr "对于 %s,此时间格式无效。" - -#, python-format -msgid "Invalid type value: %s" -msgstr "类型值 %s 无效" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition namespace " -"with the same name of %s" -msgstr "" -"更新无效。它将导致出现重复的元数据定义名称空间,该名称空间具有同一名称 %s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"更新无效。它将导致在名称空间 %(namespace_name)s 中出现重复的元数据定义对象," -"该对象具有同一名称 %(name)s。" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"更新无效。它将导致在名称空间 %(namespace_name)s 中出现重复的元数据定义对象," -"该对象具有同一名称 %(name)s。" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition property " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"更新无效。它将导致在名称空间 %(namespace_name)s 中出现重复的元数据定义属性," -"该属性具有同一名称 %(name)s。" - -#, python-format -msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" -msgstr "参数“%(param)s”的值“%(value)s”无效:%(extra_msg)s" - -#, python-format -msgid "Invalid value for option %(option)s: %(value)s" -msgstr "选项 %(option)s 的以下值无效:%(value)s" - -#, python-format -msgid "Invalid visibility value: %s" -msgstr "可视性值无效:%s" - -msgid "It's invalid to provide multiple image sources." -msgstr "提供多个镜像源无效" - -#, python-format -msgid "It's not allowed to add locations if image status is %s." -msgstr "如果镜像状态为 %s,则不允许添加位置。" - -msgid "It's not allowed to add locations if locations are invisible." -msgstr "不允许添加不可视的位置。" - -msgid "It's not allowed to remove locations if locations are invisible." -msgstr "不允许移除不可视的位置。" - -msgid "It's not allowed to update locations if locations are invisible." -msgstr "不允许更新不可视的位置。" - -msgid "List of strings related to the image" -msgstr "与映像相关的字符串的列表" - -msgid "Malformed JSON in request body." -msgstr "请求主体中 JSON 的格式不正确。" - -msgid "Maximal age is count of days since epoch." -msgstr "最大年龄是自新纪元开始计算的天数。" - -#, python-format -msgid "Maximum redirects (%(redirects)s) was exceeded." -msgstr "已超过最大重定向次数 (%(redirects)s)。" - -#, python-format -msgid "Member %(member_id)s is duplicated for image %(image_id)s" -msgstr "对于映像 %(image_id)s,已复制成员 %(member_id)s" - -msgid "Member can't be empty" -msgstr "成员不能为空" - -msgid "Member to be added not specified" -msgstr "未指定要添加的成员" - -msgid "Membership could not be found." -msgstr "找不到成员资格。" - -#, python-format -msgid "" -"Metadata definition namespace %(namespace)s is protected and cannot be " -"deleted." -msgstr "元数据定义名称空间 %(namespace)s 受保护,无法删除。" - -#, python-format -msgid "Metadata definition namespace not found for id=%s" -msgstr "对于标识 %s,找不到元数据定义名称空间" - -#, python-format -msgid "" -"Metadata definition object %(object_name)s is protected and cannot be " -"deleted." -msgstr "元数据定义对象 %(object_name)s 受保护,无法删除。" - -#, python-format -msgid "Metadata definition object not found for id=%s" -msgstr "对于标识 %s,找不到元数据定义对象" - -#, python-format -msgid "" -"Metadata definition property %(property_name)s is protected and cannot be " -"deleted." -msgstr "元数据定义属性 %(property_name)s 受保护,无法删除。" - -#, python-format -msgid "Metadata definition property not found for id=%s" -msgstr "对于标识 %s,找不到元数据定义属性" - -#, python-format -msgid "" -"Metadata definition resource-type %(resource_type_name)s is a seeded-system " -"type and cannot be deleted." -msgstr "元数据定义资源类型 %(resource_type_name)s 是种子型系统类型,无法删除。" - -#, python-format -msgid "" -"Metadata definition resource-type-association %(resource_type)s is protected " -"and cannot be deleted." -msgstr "元数据定义资源类型关联 %(resource_type)s 受保护,无法删除。" - -#, python-format -msgid "" -"Metadata definition tag %(tag_name)s is protected and cannot be deleted." -msgstr "元数据定义标记 %(tag_name)s 受保护,无法删除。" - -#, python-format -msgid "Metadata definition tag not found for id=%s" -msgstr "对于标识 %s,找不到元数据定义标记" - -msgid "Minimal rows limit is 1." -msgstr "最小行数限制为 1。" - -#, python-format -msgid "Missing required credential: %(required)s" -msgstr "缺少必需凭证:%(required)s" - -#, python-format -msgid "" -"Multiple 'image' service matches for region %(region)s. This generally means " -"that a region is required and you have not supplied one." -msgstr "" -"对于区域 %(region)s,存在多个“映像”服务匹配项。这通常意味着需要区域并且尚未提" -"供一个区域。" - -msgid "No authenticated user" -msgstr "不存在任何已认证的用户" - -#, python-format -msgid "No image found with ID %s" -msgstr "找不到任何具有标识 %s 的映像" - -#, python-format -msgid "No location found with ID %(loc)s from image %(img)s" -msgstr "在映像 %(img)s 中找不到标识为 %(loc)s 的位置" - -msgid "No permission to share that image" -msgstr "不存在任何用于共享该映像的许可权" - -#, python-format -msgid "Not allowed to create members for image %s." -msgstr "不允许为映像 %s 创建成员。" - -#, python-format -msgid "Not allowed to deactivate image in status '%s'" -msgstr "不允许取消激活状态为“%s”的映像" - -#, python-format -msgid "Not allowed to delete members for image %s." -msgstr "不允许为映像 %s 删除成员。" - -#, python-format -msgid "Not allowed to delete tags for image %s." -msgstr "不允许为映像 %s 删除标记。" - -#, python-format -msgid "Not allowed to list members for image %s." -msgstr "不允许为映像 %s 列示成员。" - -#, python-format -msgid "Not allowed to reactivate image in status '%s'" -msgstr "不允许重新激活状态为“%s”的映像" - -#, python-format -msgid "Not allowed to update members for image %s." -msgstr "不允许为映像 %s 更新成员。" - -#, python-format -msgid "Not allowed to update tags for image %s." -msgstr "不允许为映像 %s 更新标记。" - -#, python-format -msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" -msgstr "不允许为镜像%(image_id)s上传数据:%(error)s" - -msgid "Number of sort dirs does not match the number of sort keys" -msgstr "排序方向数与排序键数不匹配" - -msgid "OVA extract is limited to admin" -msgstr "OVA 抽取操作仅限管理员执行" - -msgid "Old and new sorting syntax cannot be combined" -msgstr "无法组合新旧排序语法" - -msgid "Only shared images have members." -msgstr "只有已共享的镜像拥有成员." - -#, python-format -msgid "Operation \"%s\" requires a member named \"value\"." -msgstr "操作“%s”需要名为“value”的成员。" - -msgid "" -"Operation objects must contain exactly one member named \"add\", \"remove\", " -"or \"replace\"." -msgstr "操作对象必须刚好包含一个名为“add”、“remove”或“replace”的成员。" - -msgid "" -"Operation objects must contain only one member named \"add\", \"remove\", or " -"\"replace\"." -msgstr "操作对象必须仅包含一个名为“add”、“remove”或“replace”的成员。" - -msgid "Operations must be JSON objects." -msgstr "操作必须是 JSON 对象。" - -#, python-format -msgid "Original locations is not empty: %s" -msgstr "原位置不为空: %s" - -msgid "Owner can't be updated by non admin." -msgstr "非管理员无法更新所有者。" - -msgid "Owner must be specified to create a tag." -msgstr "必须指定所有者,才能创建标记。" - -msgid "Owner of the image" -msgstr "映像的所有者" - -msgid "Owner of the namespace." -msgstr "名称空间的所有者。" - -msgid "Param values can't contain 4 byte unicode." -msgstr "参数值不能包含 4 字节 Unicode。" - -#, python-format -msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." -msgstr "指针“%s”包含并非可识别转义序列的一部分的“~”。" - -#, python-format -msgid "Pointer `%s` contains adjacent \"/\"." -msgstr "指针`%s` 包含连接符\"/\"." - -#, python-format -msgid "Pointer `%s` does not contains valid token." -msgstr "指针`%s` 没有包含有效的口令" - -#, python-format -msgid "Pointer `%s` does not start with \"/\"." -msgstr "指针“%s”没有以“/”开头。" - -#, python-format -msgid "Pointer `%s` end with \"/\"." -msgstr "指针`%s` 以\"/\"结束." - -#, python-format -msgid "Port \"%s\" is not valid." -msgstr "端口“%s”无效。" - -#, python-format -msgid "Process %d not running" -msgstr "进程 %d 未在运行" - -#, python-format -msgid "Properties %s must be set prior to saving data." -msgstr "必须在保存数据之前设置属性 %s。" - -#, python-format -msgid "" -"Property %(property_name)s does not start with the expected resource type " -"association prefix of '%(prefix)s'." -msgstr "属性 %(property_name)s 未以需要的资源类型关联前缀“%(prefix)s”开头。" - -#, python-format -msgid "Property %s already present." -msgstr "属性 %s 已存在。" - -#, python-format -msgid "Property %s does not exist." -msgstr "属性 %s 不存在。" - -#, python-format -msgid "Property %s may not be removed." -msgstr "无法除去属性 %s。" - -#, python-format -msgid "Property %s must be set prior to saving data." -msgstr "必须在保存数据之前设置属性 %s。" - -#, python-format -msgid "Property '%s' is protected" -msgstr "属性“%s”受保护" - -msgid "Property names can't contain 4 byte unicode." -msgstr "属性名称不能包含 4 字节 Unicode。" - -#, python-format -msgid "" -"Provided image size must match the stored image size. (provided size: " -"%(ps)d, stored size: %(ss)d)" -msgstr "" -"提供的映像大小必须与存储的映像大小匹配。(提供的大小为 %(ps)d,存储的大小为 " -"%(ss)d)" - -#, python-format -msgid "Provided object does not match schema '%(schema)s': %(reason)s" -msgstr "提供的对象与模式“%(schema)s”不匹配:%(reason)s" - -#, python-format -msgid "Provided status of task is unsupported: %(status)s" -msgstr "不支持任务的所提供状态:%(status)s" - -#, python-format -msgid "Provided type of task is unsupported: %(type)s" -msgstr "不支持任务的所提供类型:%(type)s" - -msgid "Provides a user friendly description of the namespace." -msgstr "提供名称空间的用户友好描述。" - -msgid "Received invalid HTTP redirect." -msgstr "接收到无效 HTTP 重定向。" - -#, python-format -msgid "Redirecting to %(uri)s for authorization." -msgstr "对于授权,正在重定向至 %(uri)s。" - -#, python-format -msgid "Registry service can't use %s" -msgstr "注册服务无法使用 %s" - -#, python-format -msgid "Registry was not configured correctly on API server. Reason: %(reason)s" -msgstr "API 服务器上未正确配置注册表。原因:%(reason)s" - -#, python-format -msgid "Reload of %(serv)s not supported" -msgstr "不支持重新装入 %(serv)s" - -#, python-format -msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "正在重新装入 %(serv)s(pid 为 %(pid)s),信号为 (%(sig)s)" - -#, python-format -msgid "Removing stale pid file %s" -msgstr "移除原有pid文件%s" - -msgid "Request body must be a JSON array of operation objects." -msgstr "请求主体必须是由操作对象组成的 JSON 数组。" - -msgid "Request must be a list of commands" -msgstr "请求必须为命令列表" - -#, python-format -msgid "Required store %s is invalid" -msgstr "必需的存储器 %s 无效" - -msgid "" -"Resource type names should be aligned with Heat resource types whenever " -"possible: http://docs.openstack.org/developer/heat/template_guide/openstack." -"html" -msgstr "" -"资源类型名称应该尽可能与 Heat 资源类型对齐:http://docs.openstack.org/" -"developer/heat/template_guide/openstack.html" - -msgid "Response from Keystone does not contain a Glance endpoint." -msgstr "来自 Keystone 的响应没有包含 Glance 端点。" - -msgid "Scope of image accessibility" -msgstr "映像辅助功能选项的作用域" - -msgid "Scope of namespace accessibility." -msgstr "名称空间辅助功能选项的作用域。" - -#, python-format -msgid "Server %(serv)s is stopped" -msgstr "服务器 %(serv)s 已停止" - -#, python-format -msgid "Server worker creation failed: %(reason)s." -msgstr "服务器工作程序创建失败:%(reason)s。" - -msgid "Signature verification failed" -msgstr "签名认证失败" - -msgid "Size of image file in bytes" -msgstr "映像文件的大小,以字节计" - -msgid "" -"Some resource types allow more than one key / value pair per instance. For " -"example, Cinder allows user and image metadata on volumes. Only the image " -"properties metadata is evaluated by Nova (scheduling or drivers). This " -"property allows a namespace target to remove the ambiguity." -msgstr "" -"一些资源类型允许每个实例具有多个“键/值”对。例如,Cinder 允许卷上的用户元数据" -"和映像元数据。仅映像属性元数据是通过 Nova(调度或驱动程序)求值。此属性允许名" -"称空间目标除去不确定性。" - -msgid "Sort direction supplied was not valid." -msgstr "提供的排序方向无效。" - -msgid "Sort key supplied was not valid." -msgstr "提供的排序键无效。" - -msgid "" -"Specifies the prefix to use for the given resource type. Any properties in " -"the namespace should be prefixed with this prefix when being applied to the " -"specified resource type. Must include prefix separator (e.g. a colon :)." -msgstr "" -"指定要用于给定的资源类型的前缀。当应用于指定的资源类型时,名称空间中的任何属" -"性都应该使用此前缀作为前缀。必须包括前缀分隔符(例如冒号 :)。" - -msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." -msgstr "状态必须为“暂挂”、“已接受”或“已拒绝”。" - -msgid "Status not specified" -msgstr "未指定状态" - -msgid "Status of the image" -msgstr "映像的状态" - -#, python-format -msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "不允许状态从 %(cur_status)s 转变为 %(new_status)s" - -#, python-format -msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "正在通过信号 (%(sig)s) 停止 %(serv)s (pid %(pid)s)" - -#, python-format -msgid "Store for image_id not found: %s" -msgstr "找不到用于 image_id 的存储器:%s" - -#, python-format -msgid "Store for scheme %s not found" -msgstr "找不到用于方案 %s 的存储器" - -#, python-format -msgid "" -"Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " -"(%(actual)s) did not match. Setting image status to 'killed'." -msgstr "" -"提供的 %(attr)s (%(supplied)s) 与所上载映像 (%(actual)s) 生成的 %(attr)s 不匹" -"配。正在将映像状态设置为“已终止”。" - -msgid "Supported values for the 'container_format' image attribute" -msgstr "“container_format”映像属性支持的值" - -msgid "Supported values for the 'disk_format' image attribute" -msgstr "“disk_format”映像属性支持的值" - -#, python-format -msgid "Suppressed respawn as %(serv)s was %(rsn)s." -msgstr "已阻止重新衍生,因为 %(serv)s 为 %(rsn)s。" - -msgid "System SIGHUP signal received." -msgstr "接收到系统 SIGHUP 信号。" - -#, python-format -msgid "Task '%s' is required" -msgstr "需要任务“%s”" - -msgid "Task does not exist" -msgstr "任务不存在" - -msgid "Task failed due to Internal Error" -msgstr "由于发生内部错误而导致任务失败" - -msgid "Task was not configured properly" -msgstr "任务未正确配置" - -#, python-format -msgid "Task with the given id %(task_id)s was not found" -msgstr "找不到具有给定标识 %(task_id)s 的任务" - -msgid "The \"changes-since\" filter is no longer available on v2." -msgstr "“changes-since”过滤器在 v2 上不再可用。" - -#, python-format -msgid "The CA file you specified %s does not exist" -msgstr "已指定的 CA 文件 %s 不存在" - -#, python-format -msgid "" -"The Image %(image_id)s object being created by this task %(task_id)s, is no " -"longer in valid status for further processing." -msgstr "" -"此任务 %(task_id)s 正在创建的映像 %(image_id)s 对象不再处于有效状态,无法进一" -"步处理。" - -msgid "The Store URI was malformed." -msgstr "存储器 URI 的格式不正确。" - -msgid "" -"The URL to the keystone service. If \"use_user_token\" is not in effect and " -"using keystone auth, then URL of keystone can be specified." -msgstr "" -"keystone 服务的 URL。如果“use_user_token”没有生效并且正在使用 keystone 认证," -"那么可指定 keystone 的 URL。" - -msgid "" -"The administrators password. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "管理员密码。如果“use_user_token”没有生效,那么可指定管理凭证。" - -msgid "" -"The administrators user name. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "管理员用户名。如果“use_user_token”没有生效,那么可指定管理凭证。" - -#, python-format -msgid "The cert file you specified %s does not exist" -msgstr "已指定的证书文件 %s 不存在" - -msgid "The current status of this task" -msgstr "此任务的当前状态" - -#, python-format -msgid "" -"The device housing the image cache directory %(image_cache_dir)s does not " -"support xattr. It is likely you need to edit your fstab and add the " -"user_xattr option to the appropriate line for the device housing the cache " -"directory." -msgstr "" -"存放映像高速缓存目录 %(image_cache_dir)s 的设备不支持 xattr。您可能需要编辑 " -"fstab 并将 user_xattr 选项添加至存放该高速缓存目录的设备的相应行。" - -#, python-format -msgid "" -"The given uri is not valid. Please specify a valid uri from the following " -"list of supported uri %(supported)s" -msgstr "" -"给定的 URI 无效。请从受支持的 URI %(supported)s 的以下列表中指定有效 URI" - -#, python-format -msgid "The incoming image is too large: %s" -msgstr "引入的映像太大:%s" - -#, python-format -msgid "The key file you specified %s does not exist" -msgstr "已指定的密钥文件 %s 不存在" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image locations. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"已超过关于允许的映像位置数的限制。已尝试:%(attempted)s,最大值:%(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image members for this " -"image. Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"已超过关于允许的映像成员数(对于此映像)的限制。已尝试:%(attempted)s,最大" -"值:%(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"已超过关于允许的映像属性数的限制。已尝试:%(attempted)s,最大值:%(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(num)s, Maximum: %(quota)s" -msgstr "已超过关于允许的映像属性数的限制。已尝试:%(num)s,最大值:%(quota)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image tags. Attempted: " -"%(attempted)s, Maximum: %(maximum)s" -msgstr "" -"已超过关于允许的映像标记数的限制。已尝试:%(attempted)s,最大值:%(maximum)s" - -#, python-format -msgid "The location %(location)s already exists" -msgstr "位置 %(location)s 已存在" - -#, python-format -msgid "The location data has an invalid ID: %d" -msgstr "位置数据具有无效标识:%d" - -#, python-format -msgid "" -"The metadata definition %(record_type)s with name=%(record_name)s not " -"deleted. Other records still refer to it." -msgstr "" -"未删除名称为 %(record_name)s 的元数据定义 %(record_type)s。其他记录仍然对其进" -"行引用。" - -#, python-format -msgid "The metadata definition namespace=%(namespace_name)s already exists." -msgstr "元数据定义名称空间 %(namespace_name)s 已存在。" - -#, python-format -msgid "" -"The metadata definition object with name=%(object_name)s was not found in " -"namespace=%(namespace_name)s." -msgstr "" -"在名称空间 %(namespace_name)s 中,找不到名称为 %(object_name)s 的元数据定义对" -"象。" - -#, python-format -msgid "" -"The metadata definition property with name=%(property_name)s was not found " -"in namespace=%(namespace_name)s." -msgstr "" -"在名称空间 %(namespace_name)s 中,找不到名称为 %(property_name)s 的元数据定义" -"属性。" - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s already exists." -msgstr "" -"已存在以下两者的元数据定义资源类型关联:资源类型 %(resource_type_name)s 与名" -"称空间 %(namespace_name)s。" - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s, was not found." -msgstr "" -"找不到以下两者的元数据定义资源类型关联:资源类型 %(resource_type_name)s 与名" -"称空间 %(namespace_name)s。" - -#, python-format -msgid "" -"The metadata definition resource-type with name=%(resource_type_name)s, was " -"not found." -msgstr "找不到名称为 %(resource_type_name)s 的元数据定义资源类型。" - -#, python-format -msgid "" -"The metadata definition tag with name=%(name)s was not found in namespace=" -"%(namespace_name)s." -msgstr "" -"在名称空间 %(namespace_name)s 中,找不到名称为 %(name)s 的元数据定义标记。" - -msgid "The parameters required by task, JSON blob" -msgstr "任务 JSON blob 所需的参数" - -msgid "The provided image is too large." -msgstr "提供的映像太大。" - -msgid "" -"The region for the authentication service. If \"use_user_token\" is not in " -"effect and using keystone auth, then region name can be specified." -msgstr "" -"用于认证服务的区域。如果“use_user_token”没有生效并且正在使用 keystone 认证," -"那么可指定区域名称。" - -msgid "The request returned 500 Internal Server Error." -msgstr "该请求返回了“500 内部服务器错误”。" - -msgid "" -"The request returned 503 Service Unavailable. This generally occurs on " -"service overload or other transient outage." -msgstr "" -"该请求返回了“503 服务不可用”。这通常在服务超负荷或其他瞬态停止运行时发生。" - -#, python-format -msgid "" -"The request returned a 302 Multiple Choices. This generally means that you " -"have not included a version indicator in a request URI.\n" -"\n" -"The body of response returned:\n" -"%(body)s" -msgstr "" -"该请求返回了“302 多选项”。这通常意味着您尚未将版本指示器包括在请求 URI 中。\n" -"\n" -"返回了响应的主体:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned a 413 Request Entity Too Large. This generally means " -"that rate limiting or a quota threshold was breached.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"该请求返回了“413 请求实体太大”。这通常意味着已违反比率限制或配额阈值。\n" -"\n" -"响应主体:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned an unexpected status: %(status)s.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"该请求返回了意外状态:%(status)s。\n" -"\n" -"响应主体:\n" -"%(body)s" - -msgid "" -"The requested image has been deactivated. Image data download is forbidden." -msgstr "所请求映像已取消激活。已禁止下载映像数据。" - -msgid "The result of current task, JSON blob" -msgstr "当前任务 JSON blob 的结果" - -#, python-format -msgid "" -"The size of the data %(image_size)s will exceed the limit. %(remaining)s " -"bytes remaining." -msgstr "数据大小 %(image_size)s 将超过限制。将剩余 %(remaining)s 个字节。" - -#, python-format -msgid "The specified member %s could not be found" -msgstr "找不到指定的成员 %s" - -#, python-format -msgid "The specified metadata object %s could not be found" -msgstr "找不到指定的元数据对象 %s" - -#, python-format -msgid "The specified metadata tag %s could not be found" -msgstr "找不到指定的元数据标记 %s" - -#, python-format -msgid "The specified namespace %s could not be found" -msgstr "找不到指定的名称空间 %s" - -#, python-format -msgid "The specified property %s could not be found" -msgstr "找不到指定的属性 %s" - -#, python-format -msgid "The specified resource type %s could not be found " -msgstr "找不到指定的资源类型 %s" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'" -msgstr "已删除映像位置的状态只能设置为“pending_delete”或“deleted”" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'." -msgstr "已删除映像位置的状态只能设置为“pending_delete”或“deleted”。" - -msgid "The status of this image member" -msgstr "此映像成员的状态" - -msgid "" -"The strategy to use for authentication. If \"use_user_token\" is not in " -"effect, then auth strategy can be specified." -msgstr "要用于认证的策略。如果“use_user_token”没有生效,那么可指定认证策略。" - -#, python-format -msgid "" -"The target member %(member_id)s is already associated with image " -"%(image_id)s." -msgstr "目标成员 %(member_id)s 已关联映像 %(image_id)s。" - -msgid "" -"The tenant name of the administrative user. If \"use_user_token\" is not in " -"effect, then admin tenant name can be specified." -msgstr "" -"管理用户的租户名称。如果“use_user_token”没有生效,那么可指定管理员租户名称。" - -msgid "The type of task represented by this content" -msgstr "此内容表示的任务的类型" - -msgid "The unique namespace text." -msgstr "唯一名称空间文本。" - -msgid "The user friendly name for the namespace. Used by UI if available." -msgstr "名称空间的用户友好名称。由 UI 使用(如果可用)。" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. Error: %(ioe)s" -msgstr "" -"%(error_key_name)s %(error_filename)s 存在问题。请对它进行验证。发生错误:" -"%(ioe)s" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. OpenSSL error: %(ce)s" -msgstr "" -"%(error_key_name)s %(error_filename)s 存在问题。请对它进行验证。发生 OpenSSL " -"错误:%(ce)s" - -#, python-format -msgid "" -"There is a problem with your key pair. Please verify that cert " -"%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" -msgstr "" -"密钥对存在问题。请验证证书 %(cert_file)s 和密钥 %(key_file)s 是否应该在一起。" -"发生 OpenSSL 错误 %(ce)s" - -msgid "There was an error configuring the client." -msgstr "配置客户机时出错。" - -msgid "There was an error connecting to a server" -msgstr "连接至服务器时出错" - -msgid "" -"This operation is currently not permitted on Glance Tasks. They are auto " -"deleted after reaching the time based on their expires_at property." -msgstr "" -"当前不允许对 Glance 任务执行此操作。到达基于 expires_at 属性的时间后,它们会" -"自动删除。" - -msgid "This operation is currently not permitted on Glance images details." -msgstr "当前不允许对 Glance 映像详细信息执行此操作。" - -msgid "" -"Time in hours for which a task lives after, either succeeding or failing" -msgstr "任务在成功或失败之后生存的时间(以小时计)" - -msgid "Too few arguments." -msgstr "太少参数" - -#, python-format -msgid "" -"Total size is %(size)d bytes (%(human_size)s) across %(img_count)d images" -msgstr "总大小为 %(size)d 字节(%(human_size)s)(在 %(img_count)d 个映像上)" - -msgid "" -"URI cannot contain more than one occurrence of a scheme.If you have " -"specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " -"you need to change it to use the swift+http:// scheme, like so: swift+http://" -"user:pass@authurl.com/v1/container/obj" -msgstr "" -"URI 不能包含方案的多个实例。如果已指定类似于 swift://user:pass@http://" -"authurl.com/v1/container/obj 的 URI,那么需要将它更改为使用 swift+http:// 方" -"案,类似于以下:swift+http://user:pass@authurl.com/v1/container/obj" - -msgid "URL to access the image file kept in external store" -msgstr "用于访问外部存储器中保留的映像文件的 URL" - -#, python-format -msgid "" -"Unable to create pid file %(pid)s. Running as non-root?\n" -"Falling back to a temp file, you can stop %(service)s service using:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" -msgstr "" -"无法创建 pid 文件 %(pid)s。正在以非 root 用户身份运行吗?\n" -"正在回退至临时文件,可使用以下命令停止 %(service)s 服务:\n" -"%(file)s %(server)s stop --pid-file %(fb)s" - -#, python-format -msgid "Unable to filter by unknown operator '%s'." -msgstr "无法按未知运算符“%s”进行过滤。" - -msgid "Unable to filter on a range with a non-numeric value." -msgstr "无法对具有非数字值的范围进行过滤。" - -msgid "Unable to filter on a unknown operator." -msgstr "无法针对未知运算符进行过滤。" - -msgid "Unable to filter using the specified operator." -msgstr "无法使用指定运算符进行过滤。" - -msgid "Unable to filter using the specified range." -msgstr "无法使用指定的范围进行过滤。" - -#, python-format -msgid "Unable to find '%s' in JSON Schema change" -msgstr "在 JSON 模式更改中找不到“%s”" - -#, python-format -msgid "" -"Unable to find `op` in JSON Schema change. It must be one of the following: " -"%(available)s." -msgstr "在 JSON 模式更改中找不到“op”。它必须是下列其中一项:%(available)s。" - -msgid "Unable to increase file descriptor limit. Running as non-root?" -msgstr "无法增大文件描述符限制。正在以非 root 用户身份运行吗?" - -#, python-format -msgid "" -"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" -"Got: %(e)r" -msgstr "" -"无法从配置文件 %(conf_file)s 装入 %(app_name)s。\n" -"发生错误:%(e)r" - -#, python-format -msgid "Unable to load schema: %(reason)s" -msgstr "无法装入模式:%(reason)s" - -#, python-format -msgid "Unable to locate paste config file for %s." -msgstr "对于 %s,找不到粘贴配置文件。" - -#, python-format -msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" -msgstr "无法为镜像%(image_id)s上传重复的数据: %(error)s" - -msgid "Unauthorized image access" -msgstr "无权访问映像" - -msgid "Unexpected body type. Expected list/dict." -msgstr "意外主体类型。应该为 list/dict。" - -#, python-format -msgid "Unexpected response: %s" -msgstr "接收到意外响应:%s" - -#, python-format -msgid "Unknown auth strategy '%s'" -msgstr "授权策略“%s”未知" - -#, python-format -msgid "Unknown command: %s" -msgstr "未知命令%s" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "排序方向未知,必须为“降序”或“升序”" - -msgid "Unrecognized JSON Schema draft version" -msgstr "无法识别 JSON 模式草稿版本" - -msgid "Unrecognized changes-since value" -msgstr "无法识别 changes-since 值" - -#, python-format -msgid "Unsupported sort_dir. Acceptable values: %s" -msgstr "sort_dir 不受支持。可接受值:%s" - -#, python-format -msgid "Unsupported sort_key. Acceptable values: %s" -msgstr "sort_key 不受支持。可接受值:%s" - -msgid "Virtual size of image in bytes" -msgstr "映像的虚拟大小,以字节计" - -#, python-format -msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" -msgstr "用来等待 pid %(pid)s (%(file)s) 终止的时间已达到 15 秒;正在放弃" - -msgid "" -"When running server in SSL mode, you must specify both a cert_file and " -"key_file option value in your configuration file" -msgstr "" -"以 SSL 方式运行服务器时,必须在配置文件中同时指定 cert_file 和 key_file 选项" -"值" - -msgid "" -"Whether to pass through the user token when making requests to the registry. " -"To prevent failures with token expiration during big files upload, it is " -"recommended to set this parameter to False.If \"use_user_token\" is not in " -"effect, then admin credentials can be specified." -msgstr "" -"向注册表进行请求时是否传递用户令牌。为了防止在上载大文件期间因令牌到期而产生" -"故障,建议将此参数设置为 False。如果“use_user_token”未生效,那么可以指定管理" -"凭证。" - -#, python-format -msgid "Wrong command structure: %s" -msgstr "命令结构 %s 不正确" - -msgid "You are not authenticated." -msgstr "您未经认证。" - -msgid "You are not authorized to complete this action." -msgstr "您无权完成此操作。" - -#, python-format -msgid "You are not authorized to lookup image %s." -msgstr "未授权您查询映像 %s。" - -#, python-format -msgid "You are not authorized to lookup the members of the image %s." -msgstr "未授权您查询映像 %s 的成员。" - -#, python-format -msgid "You are not permitted to create a tag in the namespace owned by '%s'" -msgstr "不允许在由“%s”拥有的名称空间中创建标记" - -msgid "You are not permitted to create image members for the image." -msgstr "不允许为映像创建映像成员。" - -#, python-format -msgid "You are not permitted to create images owned by '%s'." -msgstr "不允许创建由“%s”拥有的映像。" - -#, python-format -msgid "You are not permitted to create namespace owned by '%s'" -msgstr "不允许创建由“%s”拥有的名称空间" - -#, python-format -msgid "You are not permitted to create object owned by '%s'" -msgstr "不允许创建由“%s”拥有的对象" - -#, python-format -msgid "You are not permitted to create property owned by '%s'" -msgstr "不允许创建由“%s”拥有的属性" - -#, python-format -msgid "You are not permitted to create resource_type owned by '%s'" -msgstr "不允许创建由“%s”拥有的 resource_type" - -#, python-format -msgid "You are not permitted to create this task with owner as: %s" -msgstr "不允许采用以下身份作为所有者来创建此任务:%s" - -msgid "You are not permitted to deactivate this image." -msgstr "不允许取消激活此映像。" - -msgid "You are not permitted to delete this image." -msgstr "不允许删除此映像。" - -msgid "You are not permitted to delete this meta_resource_type." -msgstr "你不被允许删除meta_resource_type。" - -msgid "You are not permitted to delete this namespace." -msgstr "不允许删除此名称空间。" - -msgid "You are not permitted to delete this object." -msgstr "你不被允许删除这个对象。" - -msgid "You are not permitted to delete this property." -msgstr "不允许删除此属性。" - -msgid "You are not permitted to delete this tag." -msgstr "不允许删除此标记。" - -#, python-format -msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." -msgstr "不允许对此 %(resource)s 修改“%(attr)s”。" - -#, python-format -msgid "You are not permitted to modify '%s' on this image." -msgstr "不允许对此映像修改“%s”。" - -msgid "You are not permitted to modify locations for this image." -msgstr "不允许为此映像修改位置。" - -msgid "You are not permitted to modify tags on this image." -msgstr "不允许对此映像修改标记。" - -msgid "You are not permitted to modify this image." -msgstr "不允许修改此映像。" - -msgid "You are not permitted to reactivate this image." -msgstr "不允许重新激活此映像。" - -msgid "You are not permitted to set status on this task." -msgstr "你不被允许设置这个任务的状态。" - -msgid "You are not permitted to update this namespace." -msgstr "不允许更新此名称空间。" - -msgid "You are not permitted to update this object." -msgstr "你不被允许更新这个对象。" - -msgid "You are not permitted to update this property." -msgstr "不允许更新此属性。" - -msgid "You are not permitted to update this tag." -msgstr "不允许更新此标记。" - -msgid "You are not permitted to upload data for this image." -msgstr "不允许为此映像上载数据。" - -#, python-format -msgid "You cannot add image member for %s" -msgstr "无法为 %s 添加映像成员" - -#, python-format -msgid "You cannot delete image member for %s" -msgstr "无法为 %s 删除映像成员" - -#, python-format -msgid "You cannot get image member for %s" -msgstr "无法为 %s 获取映像成员" - -#, python-format -msgid "You cannot update image member %s" -msgstr "无法更新映像成员 %s" - -msgid "You do not own this image" -msgstr "您未拥有此映像" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a cert, " -"however you have failed to supply either a key_file parameter or set the " -"GLANCE_CLIENT_KEY_FILE environ variable" -msgstr "" -"已选择在连接中使用 SSL,并且已提供证书,但是未能提供 key_file 参数或设置 " -"GLANCE_CLIENT_KEY_FILE 环境变量" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a key, " -"however you have failed to supply either a cert_file parameter or set the " -"GLANCE_CLIENT_CERT_FILE environ variable" -msgstr "" -"已选择在连接中使用 SSL,并且已提供密钥,但是未能提供 cert_file 参数或设置 " -"GLANCE_CLIENT_CERT_FILE 环境变量" - -msgid "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" -msgstr "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" - -#, python-format -msgid "__init__() got unexpected keyword argument '%s'" -msgstr "__init__() 已获取意外的关键字自变量“%s”" - -#, python-format -msgid "" -"cannot transition from %(current)s to %(next)s in update (wanted from_state=" -"%(from)s)" -msgstr "" -"在更新中,无法从 %(current)s 转变为 %(next)s(需要 from_state=%(from)s)" - -#, python-format -msgid "custom properties (%(props)s) conflict with base properties" -msgstr "定制属性 (%(props)s) 与基本基准冲突" - -msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" -msgstr "在此平台上,eventlet“poll”和“selects”主数据中心都不可用" - -msgid "is_public must be None, True, or False" -msgstr "is_public 必须为“无”、True 或 False" - -msgid "limit param must be an integer" -msgstr "limit 参数必须为整数" - -msgid "limit param must be positive" -msgstr "limit 参数必须为正数" - -msgid "md5 hash of image contents." -msgstr "映像内容的 md5 散列。" - -#, python-format -msgid "new_image() got unexpected keywords %s" -msgstr "new_image() 已获取意外的关键字 %s" - -msgid "protected must be True, or False" -msgstr "protected 必须为 True 或 False" - -#, python-format -msgid "unable to launch %(serv)s. Got error: %(e)s" -msgstr "无法启动 %(serv)s。发生错误:%(e)s" - -#, python-format -msgid "x-openstack-request-id is too long, max size %s" -msgstr "x-openstack-request-id 太长,最大大小为 %s" diff --git a/glance/locale/zh_TW/LC_MESSAGES/glance.po b/glance/locale/zh_TW/LC_MESSAGES/glance.po deleted file mode 100644 index 4f2cfa22..00000000 --- a/glance/locale/zh_TW/LC_MESSAGES/glance.po +++ /dev/null @@ -1,1967 +0,0 @@ -# Translations template for glance. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the glance project. -# -# Translators: -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: glance 15.0.0.0b3.dev29\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-06-23 20:54+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 05:23+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language: zh-TW\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: Chinese (Taiwan)\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "%(cls)s exception was raised in the last rpc call: %(val)s" -msgstr "前一個 RPC 呼叫已發出 %(cls)s 異常狀況:%(val)s" - -#, python-format -msgid "%(m_id)s not found in the member list of the image %(i_id)s." -msgstr "在映像檔 %(i_id)s 的成員清單中找不到 %(m_id)s。" - -#, python-format -msgid "%(serv)s (pid %(pid)s) is running..." -msgstr "%(serv)s (pid %(pid)s) 正在執行中..." - -#, python-format -msgid "%(serv)s appears to already be running: %(pid)s" -msgstr "%(serv)s 似乎已在執行中:%(pid)s" - -#, python-format -msgid "" -"%(strategy)s is registered as a module twice. %(module)s is not being used." -msgstr "%(strategy)s 已登錄作為模組兩次。%(module)s 未使用。" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Could not load the " -"filesystem store" -msgstr "" -"未適當地配置 %(task_id)s(類型為 %(task_type)s)。無法載入檔案系統儲存庫" - -#, python-format -msgid "" -"%(task_id)s of %(task_type)s not configured properly. Missing work dir: " -"%(work_dir)s" -msgstr "" -"未適當地配置 %(task_id)s(類型為 %(task_type)s)。遺漏工作目錄:%(work_dir)s" - -#, python-format -msgid "%(verb)sing %(serv)s" -msgstr "正在對 %(serv)s 執行 %(verb)s 作業" - -#, python-format -msgid "%(verb)sing %(serv)s with %(conf)s" -msgstr "透過 %(conf)s,正在對 %(serv)s 執行 %(verb)s 作業" - -#, python-format -msgid "" -"%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " -"address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " -"separately from the port (i.e., \"[fe80::a:b:c]:9876\")." -msgstr "" -"%s 請指定 host:port 組,其中 host 是 IPv4 位址、IPv6 位址、主機名稱或 FQDN。" -"如果使用 IPv6 位址,請將其單獨括在方括弧內,以與埠區別開(例如 \"[fe80::a:b:" -"c]:9876\")。" - -#, python-format -msgid "%s can't contain 4 byte unicode characters." -msgstr "%s 不能包含 4 位元組 Unicode 字元。" - -#, python-format -msgid "%s is already stopped" -msgstr "已停止 %s" - -#, python-format -msgid "%s is stopped" -msgstr "%s 已停止" - -msgid "" -"--os_auth_url option or OS_AUTH_URL environment variable required when " -"keystone authentication strategy is enabled\n" -msgstr "" -"--os_auth_url 選項或 OS_AUTH_URL 環境變數(啟用 Keystone 鑑別策略時需要)\n" - -msgid "A body is not expected with this request." -msgstr "此要求預期不含內文。" - -#, python-format -msgid "" -"A metadata definition object with name=%(object_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"名稱為 %(object_name)s 的 meta 資料定義物件已經存在於名稱空間 " -"%(namespace_name)s 中。" - -#, python-format -msgid "" -"A metadata definition property with name=%(property_name)s already exists in " -"namespace=%(namespace_name)s." -msgstr "" -"名稱為 %(property_name)s 的 meta 資料定義內容已經存在於名稱空間 " -"%(namespace_name)s 中。" - -#, python-format -msgid "" -"A metadata definition resource-type with name=%(resource_type_name)s already " -"exists." -msgstr "名稱為 %(resource_type_name)s 的 meta 資料定義資源類型已存在。" - -msgid "A set of URLs to access the image file kept in external store" -msgstr "用來存取外部儲存庫中所保留映像檔的 URL 集" - -msgid "Amount of disk space (in GB) required to boot image." -msgstr "啟動映像檔所需的磁碟空間數量(以 GB 為單位)。" - -msgid "Amount of ram (in MB) required to boot image." -msgstr "啟動映像檔所需的 RAM 數量(以 MB 為單位)。" - -msgid "An identifier for the image" -msgstr "映像檔的 ID" - -msgid "An identifier for the image member (tenantId)" -msgstr "映像檔成員的 ID (tenantId)" - -msgid "An identifier for the owner of this task" -msgstr "此作業的擁有者 ID" - -msgid "An identifier for the task" -msgstr "作業的 ID" - -msgid "An image file url" -msgstr "映像檔 URL" - -msgid "An image schema url" -msgstr "映像檔綱目 URL" - -msgid "An image self url" -msgstr "映像檔自身 URL" - -#, python-format -msgid "An image with identifier %s already exists" -msgstr "ID 為 %s 的映像檔已存在" - -msgid "An import task exception occurred" -msgstr "發生匯入作業異常狀況" - -msgid "An object with the same identifier already exists." -msgstr "已存在具有相同 ID 的物件。" - -msgid "An object with the same identifier is currently being operated on." -msgstr "目前正在對具有相同 ID 的物件執行作業。" - -msgid "An object with the specified identifier was not found." -msgstr "找不到具有所指定 ID 的物件。" - -msgid "An unknown exception occurred" -msgstr "發生不明異常狀況" - -msgid "An unknown task exception occurred" -msgstr "發生不明的作業異常狀況" - -#, python-format -msgid "Attempt to upload duplicate image: %s" -msgstr "嘗試上傳重複的映像檔:%s" - -msgid "Attempted to update Location field for an image not in queued status." -msgstr "已嘗試更新處於未排入佇列狀態之映像檔的「位置」欄位。" - -#, python-format -msgid "Attribute '%(property)s' is read-only." -msgstr "屬性 '%(property)s' 是唯讀的。" - -#, python-format -msgid "Attribute '%(property)s' is reserved." -msgstr "屬性 '%(property)s' 已保留。" - -#, python-format -msgid "Attribute '%s' is read-only." -msgstr "屬性 '%s' 是唯讀的。" - -#, python-format -msgid "Attribute '%s' is reserved." -msgstr "屬性 '%s' 已保留。" - -msgid "Attribute container_format can be only replaced for a queued image." -msgstr "僅已排入佇列的映像檔可以取代屬性 container_format。" - -msgid "Attribute disk_format can be only replaced for a queued image." -msgstr "僅已排入佇列的映像檔可以取代屬性 disk_format。" - -#, python-format -msgid "Auth service at URL %(url)s not found." -msgstr "在 URL %(url)s 處找不到鑑別服務。" - -#, python-format -msgid "" -"Authentication error - the token may have expired during file upload. " -"Deleting image data for %s." -msgstr "鑑別錯誤 - 在檔案上傳期間,記號可能已過期。正在刪除 %s 的映像檔資料。" - -msgid "Authorization failed." -msgstr "授權失敗。" - -msgid "Available categories:" -msgstr "可用的種類:" - -#, python-format -msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." -msgstr "\"%s\" 查詢過濾器格式錯誤。請使用 ISO 8601 日期時間表示法。" - -#, python-format -msgid "Bad Command: %s" -msgstr "錯誤的指令:%s" - -#, python-format -msgid "Bad header: %(header_name)s" -msgstr "錯誤的標頭:%(header_name)s" - -#, python-format -msgid "Bad value passed to filter %(filter)s got %(val)s" -msgstr "傳遞給過濾器 %(filter)s 的值不正確,取得 %(val)s" - -#, python-format -msgid "Badly formed S3 URI: %(uri)s" -msgstr "S3 URI 的格式不正確:%(uri)s" - -#, python-format -msgid "Badly formed credentials '%(creds)s' in Swift URI" -msgstr "Swift URI 中認證 '%(creds)s' 的格式不正確" - -msgid "Badly formed credentials in Swift URI." -msgstr "Swift URI 中認證的格式不正確。" - -msgid "Body expected in request." -msgstr "要求中需要內文。" - -msgid "Cannot be a negative value" -msgstr "不能是負數值" - -msgid "Cannot be a negative value." -msgstr "不能是負數值。" - -#, python-format -msgid "Cannot convert image %(key)s '%(value)s' to an integer." -msgstr "無法將映像檔 %(key)s '%(value)s' 轉換為整數。" - -msgid "Cannot remove last location in the image." -msgstr "無法移除映像檔中的最後位置。" - -#, python-format -msgid "Cannot save data for image %(image_id)s: %(error)s" -msgstr "無法儲存映像檔 %(image_id)s 的資料:%(error)s" - -msgid "Cannot set locations to empty list." -msgstr "無法將位置設為空白清單。" - -msgid "Cannot upload to an unqueued image" -msgstr "無法上傳至未排入佇列的映像檔" - -#, python-format -msgid "Checksum verification failed. Aborted caching of image '%s'." -msgstr "總和檢查驗證失敗。已中止快取映像檔 '%s'。" - -msgid "Client disconnected before sending all data to backend" -msgstr "用戶端已在將所有資料傳送至後端之前斷線" - -msgid "Command not found" -msgstr "找不到指令" - -msgid "Configuration option was not valid" -msgstr "配置選項無效" - -#, python-format -msgid "Connect error/bad request to Auth service at URL %(url)s." -msgstr "將錯誤/不當的要求連接至 URL %(url)s 處的鑑別服務。" - -#, python-format -msgid "Constructed URL: %s" -msgstr "已建構 URL:%s" - -msgid "Container format is not specified." -msgstr "未指定儲存器格式。" - -msgid "Content-Type must be application/octet-stream" -msgstr "內容類型必須是 application/octet-stream" - -#, python-format -msgid "Corrupt image download for image %(image_id)s" -msgstr "映像檔 %(image_id)s 的映像檔下載已毀損" - -#, python-format -msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" -msgstr "嘗試 30 秒鐘後仍無法連結至 %(host)s:%(port)s" - -msgid "Could not find OVF file in OVA archive file." -msgstr "在 OVA 保存檔中找不到 OVF 檔。" - -#, python-format -msgid "Could not find metadata object %s" -msgstr "找不到 meta 資料物件 %s" - -#, python-format -msgid "Could not find metadata tag %s" -msgstr "找不到 meta 資料標籤 %s" - -#, python-format -msgid "Could not find namespace %s" -msgstr "找不到名稱空間 %s" - -#, python-format -msgid "Could not find property %s" -msgstr "找不到內容 %s" - -msgid "Could not find required configuration option" -msgstr "找不到必要配置選項" - -#, python-format -msgid "Could not find task %s" -msgstr "找不到作業 %s" - -#, python-format -msgid "Could not update image: %s" -msgstr "無法更新映像檔:%s" - -msgid "Currently, OVA packages containing multiple disk are not supported." -msgstr "目前,不支援包含多個磁碟的 OVA 套件。" - -#, python-format -msgid "Data for image_id not found: %s" -msgstr "找不到 image_id 的資料:%s" - -msgid "Data supplied was not valid." -msgstr "提供的資料無效。" - -msgid "Date and time of image member creation" -msgstr "映像檔成員的建立日期和時間" - -msgid "Date and time of image registration" -msgstr "映像檔登錄的日期和時間" - -msgid "Date and time of last modification of image member" -msgstr "映像檔成員的前次修改日期和時間" - -msgid "Date and time of namespace creation" -msgstr "名稱空間的建立日期和時間" - -msgid "Date and time of object creation" -msgstr "物件的建立日期和時間" - -msgid "Date and time of resource type association" -msgstr "資源類型關聯的日期和時間" - -msgid "Date and time of tag creation" -msgstr "標記的建立日期和時間" - -msgid "Date and time of the last image modification" -msgstr "映像檔的前次修改日期和時間" - -msgid "Date and time of the last namespace modification" -msgstr "名稱空間的前次修改日期和時間" - -msgid "Date and time of the last object modification" -msgstr "物件的前次修改日期和時間" - -msgid "Date and time of the last resource type association modification" -msgstr "資源類型關聯的前次修改日期和時間" - -msgid "Date and time of the last tag modification" -msgstr "標記的前次修改日期和時間" - -msgid "Datetime when this resource was created" -msgstr "此資源的建立日期時間" - -msgid "Datetime when this resource was updated" -msgstr "此資源的更新日期時間" - -msgid "Datetime when this resource would be subject to removal" -msgstr "可能會移除此資源的日期時間" - -#, python-format -msgid "Denying attempt to upload image because it exceeds the quota: %s" -msgstr "正在拒絕嘗試上傳映像檔,因為它已超出配額:%s" - -#, python-format -msgid "Denying attempt to upload image larger than %d bytes." -msgstr "正在拒絕嘗試上傳大於 %d 個位元組的映像檔。" - -msgid "Descriptive name for the image" -msgstr "映像檔的敘述性名稱" - -msgid "Disk format is not specified." -msgstr "未指定磁碟格式。" - -#, python-format -msgid "" -"Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" -msgstr "無法正確地配置驅動程式 %(driver_name)s。原因:%(reason)s" - -msgid "" -"Error decoding your request. Either the URL or the request body contained " -"characters that could not be decoded by Glance" -msgstr "" -"將您的要求進行解碼時發生錯誤。URL 或要求內文包含無法由 Glance 進行解碼的字元" - -#, python-format -msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" -msgstr "提取映像檔 %(image_id)s 的成員時發生錯誤:%(inner_msg)s" - -msgid "Error in store configuration. Adding images to store is disabled." -msgstr "儲存庫配置發生錯誤。已停用新增映像檔至儲存庫。" - -msgid "Expected a member in the form: {\"member\": \"image_id\"}" -msgstr "預期成員的格式為:{\"member\": \"image_id\"}" - -msgid "Expected a status in the form: {\"status\": \"status\"}" -msgstr "預期狀態的格式為:{\"status\": \"status\"}" - -msgid "External source should not be empty" -msgstr "外部來源不應是空的" - -#, python-format -msgid "External sources are not supported: '%s'" -msgstr "不支援外部來源:'%s'" - -#, python-format -msgid "Failed to activate image. Got error: %s" -msgstr "無法啟動映像檔。發生錯誤:%s" - -#, python-format -msgid "Failed to add image metadata. Got error: %s" -msgstr "無法新增映像檔 meta 資料。發生錯誤:%s" - -#, python-format -msgid "Failed to find image %(image_id)s to delete" -msgstr "找不到要刪除的映像檔 %(image_id)s" - -#, python-format -msgid "Failed to find image to delete: %s" -msgstr "找不到要刪除的映像檔:%s" - -#, python-format -msgid "Failed to find image to update: %s" -msgstr "找不到要更新的映像檔:%s" - -#, python-format -msgid "Failed to find resource type %(resourcetype)s to delete" -msgstr "找不到要刪除的資源類型 %(resourcetype)s" - -#, python-format -msgid "Failed to initialize the image cache database. Got error: %s" -msgstr "無法起始設定映像檔快取資料庫。發生錯誤:%s" - -#, python-format -msgid "Failed to read %s from config" -msgstr "無法從配置中讀取 %s" - -#, python-format -msgid "Failed to reserve image. Got error: %s" -msgstr "無法保留映像檔。發生錯誤:%s" - -#, python-format -msgid "Failed to update image metadata. Got error: %s" -msgstr "無法更新映像檔 meta 資料。發生錯誤:%s" - -#, python-format -msgid "Failed to upload image %s" -msgstr "無法上傳映像檔 %s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to HTTP error: " -"%(error)s" -msgstr "由於 HTTP 錯誤而無法上傳映像檔 %(image_id)s 的映像檔資料:%(error)s" - -#, python-format -msgid "" -"Failed to upload image data for image %(image_id)s due to internal error: " -"%(error)s" -msgstr "由於內部錯誤而無法上傳映像檔 %(image_id)s 的映像檔資料:%(error)s" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "檔案 %(path)s 具有無效的支援檔案 %(bfile)s,正在中斷。" - -msgid "" -"File based imports are not allowed. Please use a non-local source of image " -"data." -msgstr "不容許檔案型匯入。請使用映像檔資料的非本端來源。" - -msgid "Forbidden image access" -msgstr "已禁止映像檔存取" - -#, python-format -msgid "Forbidden to delete a %s image." -msgstr "已禁止刪除 %s 映像檔。" - -#, python-format -msgid "Forbidden to delete image: %s" -msgstr "已禁止刪除映像檔:%s" - -#, python-format -msgid "Forbidden to modify '%(key)s' of %(status)s image." -msgstr "已禁止修改 %(status)s 映像檔的 '%(key)s'。" - -#, python-format -msgid "Forbidden to modify '%s' of image." -msgstr "禁止修改映像檔的 '%s'。" - -msgid "Forbidden to reserve image." -msgstr "已禁止保留映像檔。" - -msgid "Forbidden to update deleted image." -msgstr "已禁止更新所刪除的映像檔。" - -#, python-format -msgid "Forbidden to update image: %s" -msgstr "已禁止更新映像檔:%s" - -#, python-format -msgid "Forbidden upload attempt: %s" -msgstr "已禁止的上傳嘗試:%s" - -#, python-format -msgid "Forbidding request, metadata definition namespace=%s is not visible." -msgstr "正在禁止要求,meta 資料定義名稱空間 %s 不可見。" - -#, python-format -msgid "Forbidding request, task %s is not visible" -msgstr "正在禁止要求,作業 %s 不可見" - -msgid "Format of the container" -msgstr "儲存器的格式" - -msgid "Format of the disk" -msgstr "磁碟的格式" - -#, python-format -msgid "Host \"%s\" is not valid." -msgstr "主機 \"%s\" 無效。" - -#, python-format -msgid "Host and port \"%s\" is not valid." -msgstr "主機和埠 \"%s\" 無效。" - -msgid "" -"Human-readable informative message only included when appropriate (usually " -"on failure)" -msgstr "適當的時候(通常是失敗時)僅併入人類可讀的參考訊息" - -msgid "If true, image will not be deletable." -msgstr "如果為 true,則映像檔不可刪除。" - -msgid "If true, namespace will not be deletable." -msgstr "如果為 True,則名稱空間將不可刪除。" - -#, python-format -msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" -msgstr "無法刪除映像檔 %(id)s,因為它在使用中:%(exc)s" - -#, python-format -msgid "Image %(id)s not found" -msgstr "找不到映像檔 %(id)s" - -#, python-format -msgid "" -"Image %(image_id)s could not be found after upload. The image may have been " -"deleted during the upload: %(error)s" -msgstr "" -"上傳之後找不到映像檔 %(image_id)s。可能已在上傳期間刪除該映像檔:%(error)s" - -#, python-format -msgid "Image %(image_id)s is protected and cannot be deleted." -msgstr "映像檔 %(image_id)s 已受保護,無法刪除。" - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload, cleaning up the chunks uploaded." -msgstr "" -"上傳之後找不到映像檔 %s。可能已在上傳期間刪除該映像檔,正在清除已上傳的區塊。" - -#, python-format -msgid "" -"Image %s could not be found after upload. The image may have been deleted " -"during the upload." -msgstr "在上傳之後,找不到映像檔 %s。在上傳期間,可能已刪除該映像檔。" - -#, python-format -msgid "Image %s is deactivated" -msgstr "已取消啟動映像檔 %s" - -#, python-format -msgid "Image %s is not active" -msgstr "映像檔 %s 不在作用中" - -#, python-format -msgid "Image %s not found." -msgstr "找不到映像檔 %s。" - -#, python-format -msgid "Image exceeds the storage quota: %s" -msgstr "映像檔超出儲存體配額:%s" - -msgid "Image id is required." -msgstr "映像檔 ID 是必要的。" - -msgid "Image is protected" -msgstr "映像檔是受保護的" - -#, python-format -msgid "Image member limit exceeded for image %(id)s: %(e)s:" -msgstr "已超出映像檔 %(id)s 的映像檔成員限制:%(e)s:" - -#, python-format -msgid "Image name too long: %d" -msgstr "映像檔名稱太長:%d" - -msgid "Image operation conflicts" -msgstr "映像檔作業衝突" - -#, python-format -msgid "" -"Image status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "不容許映像檔狀態從 %(cur_status)s 轉移至 %(new_status)s" - -#, python-format -msgid "Image storage media is full: %s" -msgstr "映像檔儲存媒體已滿:%s" - -#, python-format -msgid "Image tag limit exceeded for image %(id)s: %(e)s:" -msgstr "已超出映像檔 %(id)s 的映像檔標籤限制:%(e)s:" - -#, python-format -msgid "Image upload problem: %s" -msgstr "映像檔上傳問題:%s" - -#, python-format -msgid "Image with identifier %s already exists!" -msgstr "ID 為 %s 的映像檔已存在!" - -#, python-format -msgid "Image with identifier %s has been deleted." -msgstr "已刪除 ID 為 %s 的映像檔。" - -#, python-format -msgid "Image with identifier %s not found" -msgstr "找不到 ID 為 %s 的映像檔" - -#, python-format -msgid "Image with the given id %(image_id)s was not found" -msgstr "找不到具有給定 ID %(image_id)s 的映像檔" - -#, python-format -msgid "" -"Incorrect auth strategy, expected \"%(expected)s\" but received " -"\"%(received)s\"" -msgstr "不正確的鑑別策略,需要 \"%(expected)s\",但收到 \"%(received)s\"" - -#, python-format -msgid "Incorrect request: %s" -msgstr "不正確的要求:%s" - -#, python-format -msgid "Input does not contain '%(key)s' field" -msgstr "輸入不包含 '%(key)s' 欄位" - -#, python-format -msgid "Insufficient permissions on image storage media: %s" -msgstr "對映像檔儲存媒體的許可權不足:%s" - -#, python-format -msgid "Invalid JSON pointer for this resource: '/%s'" -msgstr "此資源的 JSON 指標無效:'/%s'" - -#, python-format -msgid "Invalid checksum '%s': can't exceed 32 characters" -msgstr "無效的總和檢查 '%s':不能超過 32 個字元" - -msgid "Invalid configuration in glance-swift conf file." -msgstr "glance-swift 配置檔中的配置無效。" - -msgid "Invalid configuration in property protection file." -msgstr "內容保護檔案中的配置無效。" - -#, python-format -msgid "Invalid container format '%s' for image." -msgstr "映像檔的儲存器格式 '%s' 無效。" - -#, python-format -msgid "Invalid content type %(content_type)s" -msgstr "無效的內容類型 %(content_type)s" - -#, python-format -msgid "Invalid disk format '%s' for image." -msgstr "映像檔的磁碟格式 '%s' 無效。" - -#, python-format -msgid "Invalid filter value %s. The quote is not closed." -msgstr "無效的過濾器值 %s。遺漏右引號。" - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma after closing quotation mark." -msgstr "無效的過濾器值 %s。右引號後面沒有逗點。" - -#, python-format -msgid "" -"Invalid filter value %s. There is no comma before opening quotation mark." -msgstr "無效的過濾器值 %s。左引號前面沒有逗點。" - -msgid "Invalid image id format" -msgstr "無效的映像檔 ID 格式" - -msgid "Invalid location" -msgstr "無效的位置" - -#, python-format -msgid "Invalid location %s" -msgstr "無效的位置 %s" - -#, python-format -msgid "Invalid location: %s" -msgstr "無效的位置:%s" - -#, python-format -msgid "" -"Invalid location_strategy option: %(name)s. The valid strategy option(s) " -"is(are): %(strategies)s" -msgstr "" -"無效的 location_strategy 選項:%(name)s。有效的策略選項為:%(strategies)s" - -msgid "Invalid locations" -msgstr "無效的位置" - -#, python-format -msgid "Invalid locations: %s" -msgstr "無效的位置:%s" - -msgid "Invalid marker format" -msgstr "無效的標記格式" - -msgid "Invalid marker. Image could not be found." -msgstr "無效的標記。找不到映像檔。" - -#, python-format -msgid "Invalid membership association: %s" -msgstr "無效的成員資格關聯:%s" - -msgid "" -"Invalid mix of disk and container formats. When setting a disk or container " -"format to one of 'aki', 'ari', or 'ami', the container and disk formats must " -"match." -msgstr "" -"磁碟格式及儲存器格式的混合無效。將磁碟格式或儲存器格式設為 'aki'、'ari' 或 " -"'ami' 其中之一時,儲存器格式及磁碟格式必須相符。" - -#, python-format -msgid "" -"Invalid operation: `%(op)s`. It must be one of the following: %(available)s." -msgstr "無效作業:`%(op)s`。它必須是下列其中一項:%(available)s。" - -msgid "Invalid position for adding a location." -msgstr "用於新增位置的位置無效。" - -msgid "Invalid position for removing a location." -msgstr "用於移除位置的位置無效。" - -msgid "Invalid service catalog json." -msgstr "無效的服務型錄 JSON。" - -#, python-format -msgid "Invalid sort direction: %s" -msgstr "無效的排序方向:%s" - -#, python-format -msgid "" -"Invalid sort key: %(sort_key)s. It must be one of the following: " -"%(available)s." -msgstr "排序鍵 %(sort_key)s 無效。它必須為下列其中一項:%(available)s。" - -#, python-format -msgid "Invalid status value: %s" -msgstr "無效的狀態值:%s" - -#, python-format -msgid "Invalid status: %s" -msgstr "無效的狀態:%s" - -#, python-format -msgid "Invalid time format for %s." -msgstr "%s 的時間格式無效。" - -#, python-format -msgid "Invalid type value: %s" -msgstr "無效的類型值:%s" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition namespace " -"with the same name of %s" -msgstr "更新無效。它會導致產生具有相同名稱 %s 的重複 meta 資料定義名稱空間。" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"無效的更新。此更新將導致下列名稱空間中存在具有相同名稱%(name)s 的重複 meta 資" -"料定義物件:%(namespace_name)s。" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition object " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"無效的更新。此更新將導致下列名稱空間中存在具有相同名稱%(name)s 的重複 meta 資" -"料定義物件:%(namespace_name)s。" - -#, python-format -msgid "" -"Invalid update. It would result in a duplicate metadata definition property " -"with the same name=%(name)s in namespace=%(namespace_name)s." -msgstr "" -"更新無效。它會導致在下列名稱空間中產生具有相同名稱 %(name)s 的重複 meta 資料" -"定義內容:%(namespace_name)s。" - -#, python-format -msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" -msgstr "參數 '%(param)s' 的值 '%(value)s' 無效:%(extra_msg)s" - -#, python-format -msgid "Invalid value for option %(option)s: %(value)s" -msgstr "選項 %(option)s 的值 %(value)s 無效" - -#, python-format -msgid "Invalid visibility value: %s" -msgstr "無效的可見性值:%s" - -msgid "It's invalid to provide multiple image sources." -msgstr "提供多個映像檔來源是無效的做法。" - -msgid "It's not allowed to add locations if locations are invisible." -msgstr "如果位置是隱藏的,則不容許新增位置。" - -msgid "It's not allowed to remove locations if locations are invisible." -msgstr "如果位置是隱藏的,則不容許移除位置。" - -msgid "It's not allowed to update locations if locations are invisible." -msgstr "如果位置是隱藏的,則不容許更新位置。" - -msgid "List of strings related to the image" -msgstr "與映像檔相關的字串清單" - -msgid "Malformed JSON in request body." -msgstr "要求內文中 JSON 的格式不正確。" - -msgid "Maximal age is count of days since epoch." -msgstr "經歷時間上限是自新紀元以來的天數。" - -#, python-format -msgid "Maximum redirects (%(redirects)s) was exceeded." -msgstr "已超出重新導向數目上限(%(redirects)s 個)。" - -#, python-format -msgid "Member %(member_id)s is duplicated for image %(image_id)s" -msgstr "針對映像檔 %(image_id)s,成員 %(member_id)s 重複" - -msgid "Member can't be empty" -msgstr "成員不能是空的" - -msgid "Member to be added not specified" -msgstr "未指定要新增的成員" - -msgid "Membership could not be found." -msgstr "找不到成員資格。" - -#, python-format -msgid "" -"Metadata definition namespace %(namespace)s is protected and cannot be " -"deleted." -msgstr "Meta 資料定義名稱空間 %(namespace)s 受保護,無法將其刪除。" - -#, python-format -msgid "Metadata definition namespace not found for id=%s" -msgstr "找不到 ID 為 %s 的 meta 資料定義名稱空間" - -#, python-format -msgid "" -"Metadata definition object %(object_name)s is protected and cannot be " -"deleted." -msgstr "Meta 資料定義物件 %(object_name)s 受保護,無法將其刪除。" - -#, python-format -msgid "Metadata definition object not found for id=%s" -msgstr "找不到 ID 為 %s 的 meta 資料定義物件" - -#, python-format -msgid "" -"Metadata definition property %(property_name)s is protected and cannot be " -"deleted." -msgstr "Meta 資料定義內容 %(property_name)s 受保護,無法將其刪除。" - -#, python-format -msgid "Metadata definition property not found for id=%s" -msgstr "找不到 ID 為 %s 的 meta 資料定義內容" - -#, python-format -msgid "" -"Metadata definition resource-type %(resource_type_name)s is a seeded-system " -"type and cannot be deleted." -msgstr "" -"Meta 資料定義資源類型 %(resource_type_name)s 是種子系統類型,無法將其刪除。" - -#, python-format -msgid "" -"Metadata definition resource-type-association %(resource_type)s is protected " -"and cannot be deleted." -msgstr "Meta 資料定義資源類型關聯 %(resource_type)s 已受保護,無法將其刪除。" - -#, python-format -msgid "" -"Metadata definition tag %(tag_name)s is protected and cannot be deleted." -msgstr "meta 資料定義標籤 %(tag_name)s 受保護,無法將其刪除。" - -#, python-format -msgid "Metadata definition tag not found for id=%s" -msgstr "找不到 ID 為 %s 的 meta 資料定義標籤" - -msgid "Minimal rows limit is 1." -msgstr "列數下限限制為 1。" - -#, python-format -msgid "Missing required credential: %(required)s" -msgstr "遺漏了必要認證:%(required)s" - -#, python-format -msgid "" -"Multiple 'image' service matches for region %(region)s. This generally means " -"that a region is required and you have not supplied one." -msgstr "" -"區域 %(region)s 有多個「映像檔」服務相符項。這通常表示需要一個區域,但您尚未" -"提供。" - -msgid "No authenticated user" -msgstr "沒有已鑑別使用者" - -#, python-format -msgid "No image found with ID %s" -msgstr "找不到 ID 為 %s 的映像檔" - -#, python-format -msgid "No location found with ID %(loc)s from image %(img)s" -msgstr "從映像檔 %(img)s 中找不到 ID 為 %(loc)s 的位置" - -msgid "No permission to share that image" -msgstr "沒有共用該映像檔的許可權" - -#, python-format -msgid "Not allowed to create members for image %s." -msgstr "不容許建立映像檔 %s 的成員。" - -#, python-format -msgid "Not allowed to deactivate image in status '%s'" -msgstr "不容許取消啟動處於狀態 '%s' 的映像檔" - -#, python-format -msgid "Not allowed to delete members for image %s." -msgstr "不容許刪除映像檔 %s 的成員。" - -#, python-format -msgid "Not allowed to delete tags for image %s." -msgstr "不容許刪除映像檔 %s 的標籤。" - -#, python-format -msgid "Not allowed to list members for image %s." -msgstr "不容許列出映像檔 %s 的成員。" - -#, python-format -msgid "Not allowed to reactivate image in status '%s'" -msgstr "不容許重新啟動處於狀態 '%s' 的映像檔" - -#, python-format -msgid "Not allowed to update members for image %s." -msgstr "不容許更新映像檔 %s 的成員。" - -#, python-format -msgid "Not allowed to update tags for image %s." -msgstr "不容許更新映像檔 %s 的標籤。" - -#, python-format -msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" -msgstr "不容許上傳映像檔 %(image_id)s 的映像檔資料:%(error)s" - -msgid "Number of sort dirs does not match the number of sort keys" -msgstr "排序方向數目與排序鍵數目不符" - -msgid "OVA extract is limited to admin" -msgstr "OVA 擷取已限制為管理者" - -msgid "Old and new sorting syntax cannot be combined" -msgstr "無法結合新舊排序語法" - -#, python-format -msgid "Operation \"%s\" requires a member named \"value\"." -msgstr "作業 \"%s\" 需要名稱為 \"value\" 的成員。" - -msgid "" -"Operation objects must contain exactly one member named \"add\", \"remove\", " -"or \"replace\"." -msgstr "" -"作業物件必須正好包含一個名稱為 \"add\"、\"remove\" 或 \"replace\" 的成員。" - -msgid "" -"Operation objects must contain only one member named \"add\", \"remove\", or " -"\"replace\"." -msgstr "作業物件只能包含一個名稱為 \"add\"、\"remove\" 或 \"replace\" 的成員。" - -msgid "Operations must be JSON objects." -msgstr "作業必須是 JSON 物件。" - -#, python-format -msgid "Original locations is not empty: %s" -msgstr "原始位置不是空的:%s" - -msgid "Owner can't be updated by non admin." -msgstr "擁有者無法由非管理者進行更新。" - -msgid "Owner must be specified to create a tag." -msgstr "必須指定擁有者才能建立標籤。" - -msgid "Owner of the image" -msgstr "映像檔的擁有者" - -msgid "Owner of the namespace." -msgstr "名稱空間的擁有者。" - -msgid "Param values can't contain 4 byte unicode." -msgstr "參數值不能包含 4 位元組 Unicode。" - -#, python-format -msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." -msgstr "指標 `%s` 包含不屬於可辨識 ESC 序列的 \"~\"。" - -#, python-format -msgid "Pointer `%s` contains adjacent \"/\"." -msgstr "指標 `%s` 包含相鄰的 \"/\"。" - -#, python-format -msgid "Pointer `%s` does not contains valid token." -msgstr "指標 `%s` 不包含有效的記號。" - -#, python-format -msgid "Pointer `%s` does not start with \"/\"." -msgstr "指標 `%s` 的開頭不是 \"/\"。" - -#, python-format -msgid "Pointer `%s` end with \"/\"." -msgstr "指標 `%s` 的結尾是 \"/\"。" - -#, python-format -msgid "Port \"%s\" is not valid." -msgstr "埠 \"%s\" 無效。" - -#, python-format -msgid "Process %d not running" -msgstr "程序 %d 不在執行中" - -#, python-format -msgid "Properties %s must be set prior to saving data." -msgstr "儲存資料之前必須設定內容 %s。" - -#, python-format -msgid "" -"Property %(property_name)s does not start with the expected resource type " -"association prefix of '%(prefix)s'." -msgstr "內容 %(property_name)s 的開頭不是預期的資源類型關聯字首 '%(prefix)s'。" - -#, python-format -msgid "Property %s already present." -msgstr "內容 %s 已存在。" - -#, python-format -msgid "Property %s does not exist." -msgstr "內容 %s 不存在。" - -#, python-format -msgid "Property %s may not be removed." -msgstr "可能無法移除內容 %s。" - -#, python-format -msgid "Property %s must be set prior to saving data." -msgstr "儲存資料之前必須設定內容 %s。" - -#, python-format -msgid "Property '%s' is protected" -msgstr "內容 '%s' 受保護" - -msgid "Property names can't contain 4 byte unicode." -msgstr "內容名稱不能包含 4 位元組 Unicode。" - -#, python-format -msgid "" -"Provided image size must match the stored image size. (provided size: " -"%(ps)d, stored size: %(ss)d)" -msgstr "" -"提供的映像檔大小必須符合儲存的映像檔大小。(提供的大小:%(ps)d,儲存的大小:" -"%(ss)d)" - -#, python-format -msgid "Provided object does not match schema '%(schema)s': %(reason)s" -msgstr "所提供的物件與綱目 '%(schema)s' 不符:%(reason)s" - -#, python-format -msgid "Provided status of task is unsupported: %(status)s" -msgstr "提供的作業狀態 %(status)s 不受支援" - -#, python-format -msgid "Provided type of task is unsupported: %(type)s" -msgstr "提供的作業類型 %(type)s 不受支援" - -msgid "Provides a user friendly description of the namespace." -msgstr "提供對使用者更為友善的名稱空間說明。" - -msgid "Received invalid HTTP redirect." -msgstr "收到無效的 HTTP 重新導向。" - -#, python-format -msgid "Redirecting to %(uri)s for authorization." -msgstr "正在重新導向至 %(uri)s 以進行授權。" - -#, python-format -msgid "Registry service can't use %s" -msgstr "登錄服務無法使用 %s" - -#, python-format -msgid "Registry was not configured correctly on API server. Reason: %(reason)s" -msgstr "API 伺服器上未正確地配置登錄。原因:%(reason)s" - -#, python-format -msgid "Reload of %(serv)s not supported" -msgstr "不支援重新載入 %(serv)s" - -#, python-format -msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "正在使用信號 (%(sig)s) 來重新載入 %(serv)s (pid %(pid)s)" - -#, python-format -msgid "Removing stale pid file %s" -msgstr "正在移除過時 PID 檔案 %s" - -msgid "Request body must be a JSON array of operation objects." -msgstr "要求內文必須是作業物件的 JSON 陣列。" - -msgid "Request must be a list of commands" -msgstr "要求必須是指令清單" - -#, python-format -msgid "Required store %s is invalid" -msgstr "需要的儲存庫 %s 無效" - -msgid "" -"Resource type names should be aligned with Heat resource types whenever " -"possible: http://docs.openstack.org/developer/heat/template_guide/openstack." -"html" -msgstr "" -"資源類型名稱應該儘可能與 Heat 資源類型一致:http://docs.openstack.org/" -"developer/heat/template_guide/openstack.html" - -msgid "Response from Keystone does not contain a Glance endpoint." -msgstr "Keystone 的回應不包含 Glance 端點。" - -msgid "Scope of image accessibility" -msgstr "映像檔的可存取性範圍" - -msgid "Scope of namespace accessibility." -msgstr "名稱空間的可存取性範圍。" - -#, python-format -msgid "Server %(serv)s is stopped" -msgstr "伺服器 %(serv)s 已停止" - -#, python-format -msgid "Server worker creation failed: %(reason)s." -msgstr "建立伺服器工作程式失敗:%(reason)s。" - -msgid "Signature verification failed" -msgstr "簽章驗證失敗" - -msgid "Size of image file in bytes" -msgstr "映像檔的大小(以位元組為單位)" - -msgid "" -"Some resource types allow more than one key / value pair per instance. For " -"example, Cinder allows user and image metadata on volumes. Only the image " -"properties metadata is evaluated by Nova (scheduling or drivers). This " -"property allows a namespace target to remove the ambiguity." -msgstr "" -"部分資源類型容許每個實例具有多個鍵值組。例如,Cinder 容許使用者及映像檔 meta " -"資料存在於多個磁區上。Nova 只評估映像檔內容 meta 資料(正在排程或驅動程式)。" -"此內容容許名稱空間目標消除此語義不明確情況。" - -msgid "Sort direction supplied was not valid." -msgstr "提供的排序方向無效。" - -msgid "Sort key supplied was not valid." -msgstr "提供的排序鍵無效。" - -msgid "" -"Specifies the prefix to use for the given resource type. Any properties in " -"the namespace should be prefixed with this prefix when being applied to the " -"specified resource type. Must include prefix separator (e.g. a colon :)." -msgstr "" -"指定要用於給定資源類型的字首。將名稱空間內的任何內容套用至指定的資源類型時," -"都應該為該內容新增此字首。必須包括字首分隔字元(例如,冒號 :)。" - -msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." -msgstr "狀態必須是 \"pending\"、\"accepted\" 或 \"rejected\"。" - -msgid "Status not specified" -msgstr "未指定狀態" - -msgid "Status of the image" -msgstr "映像檔的狀態" - -#, python-format -msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" -msgstr "不容許狀態從 %(cur_status)s 轉移至 %(new_status)s" - -#, python-format -msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" -msgstr "正在使用信號 (%(sig)s) 來停止 %(serv)s (pid %(pid)s)" - -#, python-format -msgid "Store for image_id not found: %s" -msgstr "找不到 image_id 的儲存庫:%s" - -#, python-format -msgid "Store for scheme %s not found" -msgstr "找不到架構 %s 的儲存庫" - -#, python-format -msgid "" -"Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " -"(%(actual)s) did not match. Setting image status to 'killed'." -msgstr "" -"提供的 %(attr)s (%(supplied)s),與從所上傳映像檔 (%(actual)s) 產生的 " -"%(attr)s 不符。正在將映像檔狀態設為「已結束」。" - -msgid "Supported values for the 'container_format' image attribute" -msgstr "'container_format' 映像檔屬性的支援值" - -msgid "Supported values for the 'disk_format' image attribute" -msgstr "'disk_format' 映像檔屬性的支援值" - -#, python-format -msgid "Suppressed respawn as %(serv)s was %(rsn)s." -msgstr "已暫停重新大量產生,因為 %(serv)s 是 %(rsn)s。" - -msgid "System SIGHUP signal received." -msgstr "接收到系統 SIGHUP 信號。" - -#, python-format -msgid "Task '%s' is required" -msgstr "需要作業 '%s'" - -msgid "Task does not exist" -msgstr "作業不存在" - -msgid "Task failed due to Internal Error" -msgstr "由於內部錯誤,作業失敗" - -msgid "Task was not configured properly" -msgstr "作業未適當地配置" - -#, python-format -msgid "Task with the given id %(task_id)s was not found" -msgstr "找不到具有給定 ID %(task_id)s 的作業" - -msgid "The \"changes-since\" filter is no longer available on v2." -msgstr "在第 2 版上,已無法再使用 \"changes-since\" 過濾器。" - -#, python-format -msgid "The CA file you specified %s does not exist" -msgstr "指定的 CA 檔 %s 不存在" - -#, python-format -msgid "" -"The Image %(image_id)s object being created by this task %(task_id)s, is no " -"longer in valid status for further processing." -msgstr "" -"此作業 %(task_id)s 所建立的映像檔 %(image_id)s 物件不再處於有效狀態,無法進一" -"步處理。" - -msgid "The Store URI was malformed." -msgstr "儲存庫 URI 的格式不正確。" - -msgid "" -"The URL to the keystone service. If \"use_user_token\" is not in effect and " -"using keystone auth, then URL of keystone can be specified." -msgstr "" -"Keystone 服務的 URL。如果 \"use_user_token\" 未生效並且使用了 Keystone 鑑別," -"則可以指定 Keystone 的 URL。" - -msgid "" -"The administrators password. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "管理者密碼。如果 \"use_user_token\" 未生效,則可以指定管理認證。" - -msgid "" -"The administrators user name. If \"use_user_token\" is not in effect, then " -"admin credentials can be specified." -msgstr "管理者使用者名稱。如果 \"use_user_token\" 未生效,則可以指定管理認證。" - -#, python-format -msgid "The cert file you specified %s does not exist" -msgstr "指定的憑證檔 %s 不存在" - -msgid "The current status of this task" -msgstr "此作業的現行狀態" - -#, python-format -msgid "" -"The device housing the image cache directory %(image_cache_dir)s does not " -"support xattr. It is likely you need to edit your fstab and add the " -"user_xattr option to the appropriate line for the device housing the cache " -"directory." -msgstr "" -"存放映像檔快取目錄 %(image_cache_dir)s 的裝置不支援 xattr。您可能需要編輯 " -"fstab 並將 user_xattr 選項新增至存放快取目錄之裝置的適當行。" - -#, python-format -msgid "" -"The given uri is not valid. Please specify a valid uri from the following " -"list of supported uri %(supported)s" -msgstr "" -"給定的 URI 無效。請從下列受支援的 URI %(supported)s 清單中指定有效的 URI" - -#, python-format -msgid "The incoming image is too large: %s" -msgstr "送入的映像檔太大:%s" - -#, python-format -msgid "The key file you specified %s does not exist" -msgstr "指定的金鑰檔 %s 不存在" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image locations. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"容許的映像檔位置數目已超出此限制。已嘗試:%(attempted)s,上限:%(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image members for this " -"image. Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"此映像檔容許的映像檔成員數目已超出此限制。已嘗試:%(attempted)s,上限:" -"%(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(attempted)s, Maximum: %(maximum)s" -msgstr "" -"容許的映像檔內容數目已超出此限制。已嘗試:%(attempted)s,上限:%(maximum)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image properties. " -"Attempted: %(num)s, Maximum: %(quota)s" -msgstr "容許的映像檔內容數目已超出此限制。已嘗試:%(num)s,上限:%(quota)s" - -#, python-format -msgid "" -"The limit has been exceeded on the number of allowed image tags. Attempted: " -"%(attempted)s, Maximum: %(maximum)s" -msgstr "" -"容許的映像檔標籤數目已超出此限制。已嘗試:%(attempted)s,上限:%(maximum)s" - -#, python-format -msgid "The location %(location)s already exists" -msgstr "位置 %(location)s 已存在" - -#, python-format -msgid "The location data has an invalid ID: %d" -msgstr "位置資料的 ID 無效:%d" - -#, python-format -msgid "" -"The metadata definition %(record_type)s with name=%(record_name)s not " -"deleted. Other records still refer to it." -msgstr "" -"未刪除名稱為 %(record_name)s 的 meta 資料定義 %(record_type)s。其他記錄仍參照" -"此 meta 資料定義。" - -#, python-format -msgid "The metadata definition namespace=%(namespace_name)s already exists." -msgstr "Meta 資料定義名稱空間 %(namespace_name)s 已經存在。" - -#, python-format -msgid "" -"The metadata definition object with name=%(object_name)s was not found in " -"namespace=%(namespace_name)s." -msgstr "" -"在下列名稱空間中,找不到名稱為 %(object_name)s 的 meta 資料定義物件:" -"%(namespace_name)s。" - -#, python-format -msgid "" -"The metadata definition property with name=%(property_name)s was not found " -"in namespace=%(namespace_name)s." -msgstr "" -"在下列名稱空間中,找不到名稱為 %(property_name)s 的 meta 資料定義內容:" -"%(namespace_name)s。" - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s already exists." -msgstr "" -"資源類型 %(resource_type_name)s 與名稱空間 %(namespace_name)s 的meta 資料定義" -"資源類型關聯已存在。" - -#, python-format -msgid "" -"The metadata definition resource-type association of resource-type=" -"%(resource_type_name)s to namespace=%(namespace_name)s, was not found." -msgstr "" -"找不到資源類型 %(resource_type_name)s 與名稱空間 %(namespace_name)s 的meta 資" -"料定義資源類型關聯。" - -#, python-format -msgid "" -"The metadata definition resource-type with name=%(resource_type_name)s, was " -"not found." -msgstr "找不到名稱為 %(resource_type_name)s 的 meta 資料定義資源類型。" - -#, python-format -msgid "" -"The metadata definition tag with name=%(name)s was not found in namespace=" -"%(namespace_name)s." -msgstr "" -"在下列名稱空間中,找不到名稱為 %(name)s 的 meta 資料定義標籤:" -"%(namespace_name)s。" - -msgid "The parameters required by task, JSON blob" -msgstr "作業所需的參數:JSON 二進位大型物件" - -msgid "The provided image is too large." -msgstr "所提供的映像檔太大。" - -msgid "" -"The region for the authentication service. If \"use_user_token\" is not in " -"effect and using keystone auth, then region name can be specified." -msgstr "" -"鑑別服務的區域。如果 \"use_user_token\" 未生效並且使用了 Keystone 鑑別,則可" -"以指定區域名稱。" - -msgid "The request returned 500 Internal Server Error." -msgstr "要求傳回了「500 內部伺服器錯誤」。" - -msgid "" -"The request returned 503 Service Unavailable. This generally occurs on " -"service overload or other transient outage." -msgstr "" -"要求傳回了「503 無法使用服務」。通常,在服務超載或其他暫時性服務中斷時發生。" - -#, python-format -msgid "" -"The request returned a 302 Multiple Choices. This generally means that you " -"have not included a version indicator in a request URI.\n" -"\n" -"The body of response returned:\n" -"%(body)s" -msgstr "" -"要求傳回了「302 多重選擇」。這通常表示要求 URI 中尚不包含版本指示符。\n" -"\n" -"傳回了回應內文:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned a 413 Request Entity Too Large. This generally means " -"that rate limiting or a quota threshold was breached.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"要求傳回了「413 要求實體太大」。這通常表示已違反評比限制或配額臨界值。\n" -"\n" -"回應內文:\n" -"%(body)s" - -#, python-format -msgid "" -"The request returned an unexpected status: %(status)s.\n" -"\n" -"The response body:\n" -"%(body)s" -msgstr "" -"要求傳回了非預期的狀態:%(status)s。\n" -"\n" -"回應內文:\n" -"%(body)s" - -msgid "" -"The requested image has been deactivated. Image data download is forbidden." -msgstr "已取消啟動所要求的映像檔。已禁止下載映像檔資料。" - -msgid "The result of current task, JSON blob" -msgstr "現行作業的結果:JSON 二進位大型物件" - -#, python-format -msgid "" -"The size of the data %(image_size)s will exceed the limit. %(remaining)s " -"bytes remaining." -msgstr "資料的大小 %(image_size)s 將超出該限制。剩餘 %(remaining)s 個位元組。" - -#, python-format -msgid "The specified member %s could not be found" -msgstr "找不到指定的成員 %s" - -#, python-format -msgid "The specified metadata object %s could not be found" -msgstr "找不到指定的 meta 資料物件 %s" - -#, python-format -msgid "The specified metadata tag %s could not be found" -msgstr "找不到指定的 meta 資料標籤 %s" - -#, python-format -msgid "The specified namespace %s could not be found" -msgstr "找不到指定的名稱空間 %s" - -#, python-format -msgid "The specified property %s could not be found" -msgstr "找不到指定的內容 %s" - -#, python-format -msgid "The specified resource type %s could not be found " -msgstr "找不到指定的資源類型 %s" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'" -msgstr "只能將已刪除映像檔位置的狀態設為 'pending_delete' 或'deleted'" - -msgid "" -"The status of deleted image location can only be set to 'pending_delete' or " -"'deleted'." -msgstr "只能將已刪除映像檔位置的狀態設為 'pending_delete' 或'deleted'。" - -msgid "The status of this image member" -msgstr "此映像檔成員的狀態" - -msgid "" -"The strategy to use for authentication. If \"use_user_token\" is not in " -"effect, then auth strategy can be specified." -msgstr "" -"用於進行鑑別的策略。如果 \"use_user_token\" 未生效,則可以指定鑑別策略。" - -#, python-format -msgid "" -"The target member %(member_id)s is already associated with image " -"%(image_id)s." -msgstr "目標成員 %(member_id)s 已經與映像檔%(image_id)s 相關聯。" - -msgid "" -"The tenant name of the administrative user. If \"use_user_token\" is not in " -"effect, then admin tenant name can be specified." -msgstr "" -"管理使用者的承租人名稱。如果 \"use_user_token\" 未生效,則可以指定管理承租人" -"名稱。" - -msgid "The type of task represented by this content" -msgstr "此內容所表示的作業類型" - -msgid "The unique namespace text." -msgstr "唯一的名稱空間文字。" - -msgid "The user friendly name for the namespace. Used by UI if available." -msgstr "對使用者更為友善的名稱空間名稱。如果有的話,則由使用者介面使用。" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. Error: %(ioe)s" -msgstr "" -"%(error_key_name)s %(error_filename)s 有問題。請驗證問題。錯誤:%(ioe)s" - -#, python-format -msgid "" -"There is a problem with your %(error_key_name)s %(error_filename)s. Please " -"verify it. OpenSSL error: %(ce)s" -msgstr "" -"%(error_key_name)s %(error_filename)s 有問題。請驗證問題。OpenSSL 錯誤:" -"%(ce)s" - -#, python-format -msgid "" -"There is a problem with your key pair. Please verify that cert " -"%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" -msgstr "" -"金鑰組有問題。請確認憑證 %(cert_file)s 及金鑰 %(key_file)s 是配對的。OpenSSL " -"錯誤 %(ce)s" - -msgid "There was an error configuring the client." -msgstr "配置用戶端時發生錯誤。" - -msgid "There was an error connecting to a server" -msgstr "連接至伺服器時發生錯誤" - -msgid "" -"This operation is currently not permitted on Glance Tasks. They are auto " -"deleted after reaching the time based on their expires_at property." -msgstr "" -"目前不允許對 Glance 作業執行這項作業。根據它們的 expires_at內容,將在達到時間" -"之後自動刪除它們。" - -msgid "This operation is currently not permitted on Glance images details." -msgstr "目前不允許對 Glance 映像檔詳細資料執行這項作業。" - -msgid "" -"Time in hours for which a task lives after, either succeeding or failing" -msgstr "作業在成功或失敗後存活的時間(小時)" - -msgid "Too few arguments." -msgstr "引數太少。" - -msgid "" -"URI cannot contain more than one occurrence of a scheme.If you have " -"specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " -"you need to change it to use the swift+http:// scheme, like so: swift+http://" -"user:pass@authurl.com/v1/container/obj" -msgstr "" -"URI 中不能多次出現某一架構。如果所指定的 URI 類似於 swift://user:pass@http://" -"authurl.com/v1/container/obj,則需要將其變更成使用 swift+http:// 架構,例如:" -"swift+http://user:pass@authurl.com/v1/container/obj" - -msgid "URL to access the image file kept in external store" -msgstr "用來存取外部儲存庫中所保留之映像檔的 URL" - -#, python-format -msgid "" -"Unable to create pid file %(pid)s. Running as non-root?\n" -"Falling back to a temp file, you can stop %(service)s service using:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" -msgstr "" -"無法建立 PID 檔案 %(pid)s。要以非 root 使用者身分執行嗎?\n" -"正在撤回而使用暫存檔,您可以使用下列指令來停止 %(service)s 服務:\n" -" %(file)s %(server)s stop --pid-file %(fb)s" - -#, python-format -msgid "Unable to filter by unknown operator '%s'." -msgstr "無法依不明運算子 '%s' 進行過濾。" - -msgid "Unable to filter on a range with a non-numeric value." -msgstr "無法對包含非數值的範圍進行過濾。" - -msgid "Unable to filter on a unknown operator." -msgstr "無法依不明運算子進行過濾。" - -msgid "Unable to filter using the specified operator." -msgstr "無法使用指定的運算子進行過濾。" - -msgid "Unable to filter using the specified range." -msgstr "無法使用指定的範圍進行過濾。" - -#, python-format -msgid "Unable to find '%s' in JSON Schema change" -msgstr "在「JSON 綱目」變更中找不到 '%s'" - -#, python-format -msgid "" -"Unable to find `op` in JSON Schema change. It must be one of the following: " -"%(available)s." -msgstr "在 JSON 綱目變更中找不到 `op`。它必須是下列其中一項:%(available)s。" - -msgid "Unable to increase file descriptor limit. Running as non-root?" -msgstr "無法增加檔案描述子限制。要以非 root 使用者身分執行嗎?" - -#, python-format -msgid "" -"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" -"Got: %(e)r" -msgstr "" -"無法從配置檔 %(conf_file)s 載入 %(app_name)s。\n" -"發生錯誤:%(e)r" - -#, python-format -msgid "Unable to load schema: %(reason)s" -msgstr "無法載入綱目:%(reason)s" - -#, python-format -msgid "Unable to locate paste config file for %s." -msgstr "找不到 %s 的 paste 配置檔。" - -#, python-format -msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" -msgstr "無法上傳映像檔 %(image_id)s 的重複映像檔資料:%(error)s" - -msgid "Unauthorized image access" -msgstr "未獲授權的映像檔存取" - -msgid "Unexpected body type. Expected list/dict." -msgstr "非預期的內文類型。預期為清單/字典。" - -#, python-format -msgid "Unexpected response: %s" -msgstr "非預期的回應:%s" - -#, python-format -msgid "Unknown auth strategy '%s'" -msgstr "不明的鑑別策略 '%s'" - -#, python-format -msgid "Unknown command: %s" -msgstr "不明指令:%s" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "不明的排序方向,必須為 'desc' 或 'asc'" - -msgid "Unrecognized JSON Schema draft version" -msgstr "無法辨識的「JSON 綱目」草稿版本" - -msgid "Unrecognized changes-since value" -msgstr "無法辨識 changes-since 值" - -#, python-format -msgid "Unsupported sort_dir. Acceptable values: %s" -msgstr "不支援的 sort_dir。可接受的值:%s" - -#, python-format -msgid "Unsupported sort_key. Acceptable values: %s" -msgstr "不支援的 sort_key。可接受的值:%s" - -msgid "Virtual size of image in bytes" -msgstr "映像檔的虛擬大小(以位元組為單位)" - -#, python-format -msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" -msgstr "等待 PID %(pid)s (%(file)s) 當掉已達到 15 秒;正在放棄" - -msgid "" -"When running server in SSL mode, you must specify both a cert_file and " -"key_file option value in your configuration file" -msgstr "" -"在 SSL 模式下執行伺服器時,必須在配置檔中指定 cert_file 及 key_file 選項值" - -msgid "" -"Whether to pass through the user token when making requests to the registry. " -"To prevent failures with token expiration during big files upload, it is " -"recommended to set this parameter to False.If \"use_user_token\" is not in " -"effect, then admin credentials can be specified." -msgstr "" -"是否要在向登錄發出要求時透過使用者記號來傳遞。如果要在上傳大型檔案期間防止與" -"記號有效期限相關的失敗,建議將此參數設定為 False。如果 \"use_user_token\" 未" -"生效,則可以指定管理認證。" - -#, python-format -msgid "Wrong command structure: %s" -msgstr "錯誤的指令結構:%s" - -msgid "You are not authenticated." -msgstr "您沒有進行鑑別。" - -msgid "You are not authorized to complete this action." -msgstr "您未獲授權來完成此動作。" - -#, python-format -msgid "You are not authorized to lookup image %s." -msgstr "您未獲授權來查閱映像檔 %s。" - -#, python-format -msgid "You are not authorized to lookup the members of the image %s." -msgstr "您未獲授權來查閱映像檔 %s 的成員。" - -#, python-format -msgid "You are not permitted to create a tag in the namespace owned by '%s'" -msgstr "不允許您在 '%s' 擁有的名稱空間中建立標籤" - -msgid "You are not permitted to create image members for the image." -msgstr "不允許您給映像檔建立映像檔成員。" - -#, python-format -msgid "You are not permitted to create images owned by '%s'." -msgstr "不允許您建立擁有者為 '%s' 的映像檔。" - -#, python-format -msgid "You are not permitted to create namespace owned by '%s'" -msgstr "不允許您建立擁有者為 '%s' 的名稱空間" - -#, python-format -msgid "You are not permitted to create object owned by '%s'" -msgstr "不允許您建立擁有者為 '%s' 的物件" - -#, python-format -msgid "You are not permitted to create property owned by '%s'" -msgstr "不允許您建立擁有者為 '%s' 的內容" - -#, python-format -msgid "You are not permitted to create resource_type owned by '%s'" -msgstr "不允許您建立擁有者為 '%s' 的 resource_type" - -#, python-format -msgid "You are not permitted to create this task with owner as: %s" -msgstr "不允許您以擁有者身分來建立此作業:%s" - -msgid "You are not permitted to deactivate this image." -msgstr "不允許您取消啟動此映像檔。" - -msgid "You are not permitted to delete this image." -msgstr "不允許您刪除此映像檔。" - -msgid "You are not permitted to delete this meta_resource_type." -msgstr "不允許您刪除此 meta_resource_type。" - -msgid "You are not permitted to delete this namespace." -msgstr "不允許您刪除此名稱空間。" - -msgid "You are not permitted to delete this object." -msgstr "不允許您刪除此物件。" - -msgid "You are not permitted to delete this property." -msgstr "不允許您刪除此內容。" - -msgid "You are not permitted to delete this tag." -msgstr "不允許您刪除此標籤。" - -#, python-format -msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." -msgstr "不允許您修改此 %(resource)s 上的 '%(attr)s'。" - -#, python-format -msgid "You are not permitted to modify '%s' on this image." -msgstr "不允許您修改此映像檔上的 '%s'。" - -msgid "You are not permitted to modify locations for this image." -msgstr "不允許您修改此映像檔的位置。" - -msgid "You are not permitted to modify tags on this image." -msgstr "不允許您修改此映像檔上的標籤。" - -msgid "You are not permitted to modify this image." -msgstr "不允許您修改此映像檔。" - -msgid "You are not permitted to reactivate this image." -msgstr "不允許您重新啟動此映像檔。" - -msgid "You are not permitted to set status on this task." -msgstr "不允許您在此作業上設定狀態。" - -msgid "You are not permitted to update this namespace." -msgstr "不允許您更新此名稱空間。" - -msgid "You are not permitted to update this object." -msgstr "不允許您更新此物件。" - -msgid "You are not permitted to update this property." -msgstr "不允許您更新此內容。" - -msgid "You are not permitted to update this tag." -msgstr "不允許您更新此標籤。" - -msgid "You are not permitted to upload data for this image." -msgstr "不允許您給此映像檔上傳資料。" - -#, python-format -msgid "You cannot add image member for %s" -msgstr "無法給 %s 新增映像檔成員" - -#, python-format -msgid "You cannot delete image member for %s" -msgstr "無法刪除 %s 的映像檔成員" - -#, python-format -msgid "You cannot get image member for %s" -msgstr "無法取得 %s 的映像檔成員" - -#, python-format -msgid "You cannot update image member %s" -msgstr "無法更新映像檔成員 %s" - -msgid "You do not own this image" -msgstr "您不是此映像檔的擁有者" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a cert, " -"however you have failed to supply either a key_file parameter or set the " -"GLANCE_CLIENT_KEY_FILE environ variable" -msgstr "" -"您已選取在連接時使用 SSL,並且提供了憑證,但未提供 key_file 參數,也沒有設定 " -"GLANCE_CLIENT_KEY_FILE 環境變數" - -msgid "" -"You have selected to use SSL in connecting, and you have supplied a key, " -"however you have failed to supply either a cert_file parameter or set the " -"GLANCE_CLIENT_CERT_FILE environ variable" -msgstr "" -"您已選取在連接時使用 SSL,並且提供了金鑰,但未提供 cert_file 參數,也沒有設" -"定 GLANCE_CLIENT_CERT_FILE 環境變數" - -msgid "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" -msgstr "" -"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" -"fA-F]){12}$" - -#, python-format -msgid "__init__() got unexpected keyword argument '%s'" -msgstr "__init__() 取得非預期的關鍵字引數 '%s'" - -#, python-format -msgid "" -"cannot transition from %(current)s to %(next)s in update (wanted from_state=" -"%(from)s)" -msgstr "更新時無法從 %(current)s 轉移至 %(next)s(需要 from_state = %(from)s)" - -#, python-format -msgid "custom properties (%(props)s) conflict with base properties" -msgstr "自訂內容 (%(props)s) 與基本內容相衝突" - -msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" -msgstr "此平台上無法使用 eventlet 'poll' 及 'selects' 中心。" - -msgid "is_public must be None, True, or False" -msgstr "is_public 必須是 None、True 或 False" - -msgid "limit param must be an integer" -msgstr "限制參數必須是整數" - -msgid "limit param must be positive" -msgstr "限制參數必須是正數" - -msgid "md5 hash of image contents." -msgstr "映像檔內容的 md5 雜湊值。" - -#, python-format -msgid "new_image() got unexpected keywords %s" -msgstr "new_image() 取得非預期的關鍵字 %s" - -msgid "protected must be True, or False" -msgstr "protected 必須是 True 或 False" - -#, python-format -msgid "unable to launch %(serv)s. Got error: %(e)s" -msgstr "無法啟動 %(serv)s。取得錯誤:%(e)s" - -#, python-format -msgid "x-openstack-request-id is too long, max size %s" -msgstr "x-openstack-request-id 太長,大小上限為 %s" diff --git a/glance/location.py b/glance/location.py deleted file mode 100644 index f7f22509..00000000 --- a/glance/location.py +++ /dev/null @@ -1,511 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import copy - -from cryptography import exceptions as crypto_exception -from cursive import exception as cursive_exception -from cursive import signature_utils -import glance_store as store -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils - -from glance.common import exception -from glance.common import utils -import glance.domain.proxy -from glance.i18n import _, _LE, _LI, _LW - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class ImageRepoProxy(glance.domain.proxy.Repo): - - def __init__(self, image_repo, context, store_api, store_utils): - self.context = context - self.store_api = store_api - proxy_kwargs = {'context': context, 'store_api': store_api, - 'store_utils': store_utils} - super(ImageRepoProxy, self).__init__(image_repo, - item_proxy_class=ImageProxy, - item_proxy_kwargs=proxy_kwargs) - - self.db_api = glance.db.get_api() - - def _set_acls(self, image): - public = image.visibility == 'public' - member_ids = [] - if image.locations and not public: - member_repo = _get_member_repo_for_store(image, - self.context, - self.db_api, - self.store_api) - member_ids = [m.member_id for m in member_repo.list()] - for location in image.locations: - self.store_api.set_acls(location['url'], public=public, - read_tenants=member_ids, - context=self.context) - - def add(self, image): - result = super(ImageRepoProxy, self).add(image) - self._set_acls(image) - return result - - def save(self, image, from_state=None): - result = super(ImageRepoProxy, self).save(image, from_state=from_state) - self._set_acls(image) - return result - - -def _get_member_repo_for_store(image, context, db_api, store_api): - image_member_repo = glance.db.ImageMemberRepo(context, db_api, image) - store_image_repo = glance.location.ImageMemberRepoProxy( - image_member_repo, image, context, store_api) - - return store_image_repo - - -def _check_location_uri(context, store_api, store_utils, uri): - """Check if an image location is valid. - - :param context: Glance request context - :param store_api: store API module - :param store_utils: store utils module - :param uri: location's uri string - """ - - try: - # NOTE(zhiyan): Some stores return zero when it catch exception - is_ok = (store_utils.validate_external_location(uri) and - store_api.get_size_from_backend(uri, context=context) > 0) - except (store.UnknownScheme, store.NotFound, store.BadStoreUri): - is_ok = False - if not is_ok: - reason = _('Invalid location') - raise exception.BadStoreUri(message=reason) - - -def _check_image_location(context, store_api, store_utils, location): - _check_location_uri(context, store_api, store_utils, location['url']) - store_api.check_location_metadata(location['metadata']) - - -def _set_image_size(context, image, locations): - if not image.size: - for location in locations: - size_from_backend = store.get_size_from_backend( - location['url'], context=context) - - if size_from_backend: - # NOTE(flwang): This assumes all locations have the same size - image.size = size_from_backend - break - - -def _count_duplicated_locations(locations, new): - """ - To calculate the count of duplicated locations for new one. - - :param locations: The exiting image location set - :param new: The new image location - :returns: The count of duplicated locations - """ - - ret = 0 - for loc in locations: - if loc['url'] == new['url'] and loc['metadata'] == new['metadata']: - ret += 1 - return ret - - -class ImageFactoryProxy(glance.domain.proxy.ImageFactory): - def __init__(self, factory, context, store_api, store_utils): - self.context = context - self.store_api = store_api - self.store_utils = store_utils - proxy_kwargs = {'context': context, 'store_api': store_api, - 'store_utils': store_utils} - super(ImageFactoryProxy, self).__init__(factory, - proxy_class=ImageProxy, - proxy_kwargs=proxy_kwargs) - - def new_image(self, **kwargs): - locations = kwargs.get('locations', []) - for loc in locations: - _check_image_location(self.context, - self.store_api, - self.store_utils, - loc) - loc['status'] = 'active' - if _count_duplicated_locations(locations, loc) > 1: - raise exception.DuplicateLocation(location=loc['url']) - return super(ImageFactoryProxy, self).new_image(**kwargs) - - -class StoreLocations(collections.MutableSequence): - """ - The proxy for store location property. It takes responsibility for:: - - 1. Location uri correctness checking when adding a new location. - 2. Remove the image data from the store when a location is removed - from an image. - - """ - def __init__(self, image_proxy, value): - self.image_proxy = image_proxy - if isinstance(value, list): - self.value = value - else: - self.value = list(value) - - def append(self, location): - # NOTE(flaper87): Insert this - # location at the very end of - # the value list. - self.insert(len(self.value), location) - - def extend(self, other): - if isinstance(other, StoreLocations): - locations = other.value - else: - locations = list(other) - - for location in locations: - self.append(location) - - def insert(self, i, location): - _check_image_location(self.image_proxy.context, - self.image_proxy.store_api, - self.image_proxy.store_utils, - location) - location['status'] = 'active' - if _count_duplicated_locations(self.value, location) > 0: - raise exception.DuplicateLocation(location=location['url']) - - self.value.insert(i, location) - _set_image_size(self.image_proxy.context, - self.image_proxy, - [location]) - - def pop(self, i=-1): - location = self.value.pop(i) - try: - self.image_proxy.store_utils.delete_image_location_from_backend( - self.image_proxy.context, - self.image_proxy.image.image_id, - location) - except Exception: - with excutils.save_and_reraise_exception(): - self.value.insert(i, location) - return location - - def count(self, location): - return self.value.count(location) - - def index(self, location, *args): - return self.value.index(location, *args) - - def remove(self, location): - if self.count(location): - self.pop(self.index(location)) - else: - self.value.remove(location) - - def reverse(self): - self.value.reverse() - - # Mutable sequence, so not hashable - __hash__ = None - - def __getitem__(self, i): - return self.value.__getitem__(i) - - def __setitem__(self, i, location): - _check_image_location(self.image_proxy.context, - self.image_proxy.store_api, - self.image_proxy.store_utils, - location) - location['status'] = 'active' - self.value.__setitem__(i, location) - _set_image_size(self.image_proxy.context, - self.image_proxy, - [location]) - - def __delitem__(self, i): - if isinstance(i, slice): - if i.step not in (None, 1): - raise NotImplementedError("slice with step") - self.__delslice__(i.start, i.stop) - return - location = None - try: - location = self.value[i] - except Exception: - del self.value[i] - return - self.image_proxy.store_utils.delete_image_location_from_backend( - self.image_proxy.context, - self.image_proxy.image.image_id, - location) - del self.value[i] - - def __delslice__(self, i, j): - i = 0 if i is None else max(i, 0) - j = len(self) if j is None else max(j, 0) - locations = [] - try: - locations = self.value[i:j] - except Exception: - del self.value[i:j] - return - for location in locations: - self.image_proxy.store_utils.delete_image_location_from_backend( - self.image_proxy.context, - self.image_proxy.image.image_id, - location) - del self.value[i] - - def __iadd__(self, other): - self.extend(other) - return self - - def __contains__(self, location): - return location in self.value - - def __len__(self): - return len(self.value) - - def __cast(self, other): - if isinstance(other, StoreLocations): - return other.value - else: - return other - - def __cmp__(self, other): - return cmp(self.value, self.__cast(other)) - - def __eq__(self, other): - return self.value == self.__cast(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __iter__(self): - return iter(self.value) - - def __copy__(self): - return type(self)(self.image_proxy, self.value) - - def __deepcopy__(self, memo): - # NOTE(zhiyan): Only copy location entries, others can be reused. - value = copy.deepcopy(self.value, memo) - self.image_proxy.image.locations = value - return type(self)(self.image_proxy, value) - - -def _locations_proxy(target, attr): - """ - Make a location property proxy on the image object. - - :param target: the image object on which to add the proxy - :param attr: the property proxy we want to hook - """ - def get_attr(self): - value = getattr(getattr(self, target), attr) - return StoreLocations(self, value) - - def set_attr(self, value): - if not isinstance(value, (list, StoreLocations)): - reason = _('Invalid locations') - raise exception.BadStoreUri(message=reason) - ori_value = getattr(getattr(self, target), attr) - if ori_value != value: - # NOTE(flwang): If all the URL of passed-in locations are same as - # current image locations, that means user would like to only - # update the metadata, not the URL. - ordered_value = sorted([loc['url'] for loc in value]) - ordered_ori = sorted([loc['url'] for loc in ori_value]) - if len(ori_value) > 0 and ordered_value != ordered_ori: - raise exception.Invalid(_('Original locations is not empty: ' - '%s') % ori_value) - # NOTE(zhiyan): Check locations are all valid - # NOTE(flwang): If all the URL of passed-in locations are same as - # current image locations, then it's not necessary to verify those - # locations again. Otherwise, if there is any restricted scheme in - # existing locations. _check_image_location will fail. - if ordered_value != ordered_ori: - for loc in value: - _check_image_location(self.context, - self.store_api, - self.store_utils, - loc) - loc['status'] = 'active' - if _count_duplicated_locations(value, loc) > 1: - raise exception.DuplicateLocation(location=loc['url']) - _set_image_size(self.context, getattr(self, target), value) - else: - for loc in value: - loc['status'] = 'active' - return setattr(getattr(self, target), attr, list(value)) - - def del_attr(self): - value = getattr(getattr(self, target), attr) - while len(value): - self.store_utils.delete_image_location_from_backend( - self.context, - self.image.image_id, - value[0]) - del value[0] - setattr(getattr(self, target), attr, value) - return delattr(getattr(self, target), attr) - - return property(get_attr, set_attr, del_attr) - - -class ImageProxy(glance.domain.proxy.Image): - - locations = _locations_proxy('image', 'locations') - - def __init__(self, image, context, store_api, store_utils): - self.image = image - self.context = context - self.store_api = store_api - self.store_utils = store_utils - proxy_kwargs = { - 'context': context, - 'image': self, - 'store_api': store_api, - } - super(ImageProxy, self).__init__( - image, member_repo_proxy_class=ImageMemberRepoProxy, - member_repo_proxy_kwargs=proxy_kwargs) - - def delete(self): - self.image.delete() - if self.image.locations: - for location in self.image.locations: - self.store_utils.delete_image_location_from_backend( - self.context, - self.image.image_id, - location) - - def set_data(self, data, size=None): - if size is None: - size = 0 # NOTE(markwash): zero -> unknown size - - # Create the verifier for signature verification (if correct properties - # are present) - extra_props = self.image.extra_properties - if (signature_utils.should_create_verifier(extra_props)): - # NOTE(bpoulos): if creating verifier fails, exception will be - # raised - img_signature = extra_props[signature_utils.SIGNATURE] - hash_method = extra_props[signature_utils.HASH_METHOD] - key_type = extra_props[signature_utils.KEY_TYPE] - cert_uuid = extra_props[signature_utils.CERT_UUID] - verifier = signature_utils.get_verifier( - context=self.context, - img_signature_certificate_uuid=cert_uuid, - img_signature_hash_method=hash_method, - img_signature=img_signature, - img_signature_key_type=key_type - ) - else: - verifier = None - - location, size, checksum, loc_meta = self.store_api.add_to_backend( - CONF, - self.image.image_id, - utils.LimitingReader(utils.CooperativeReader(data), - CONF.image_size_cap), - size, - context=self.context, - verifier=verifier) - - # NOTE(bpoulos): if verification fails, exception will be raised - if verifier: - try: - verifier.verify() - LOG.info(_LI("Successfully verified signature for image %s"), - self.image.image_id) - except crypto_exception.InvalidSignature: - raise cursive_exception.SignatureVerificationError( - _('Signature verification failed') - ) - - self.image.locations = [{'url': location, 'metadata': loc_meta, - 'status': 'active'}] - self.image.size = size - self.image.checksum = checksum - self.image.status = 'active' - - def get_data(self, offset=0, chunk_size=None): - if not self.image.locations: - # NOTE(mclaren): This is the only set of arguments - # which work with this exception currently, see: - # https://bugs.launchpad.net/glance-store/+bug/1501443 - # When the above glance_store bug is fixed we can - # add a msg as usual. - raise store.NotFound(image=None) - err = None - for loc in self.image.locations: - try: - data, size = self.store_api.get_from_backend( - loc['url'], - offset=offset, - chunk_size=chunk_size, - context=self.context) - - return data - except Exception as e: - LOG.warn(_LW('Get image %(id)s data failed: ' - '%(err)s.') - % {'id': self.image.image_id, - 'err': encodeutils.exception_to_unicode(e)}) - err = e - # tried all locations - LOG.error(_LE('Glance tried all active locations to get data for ' - 'image %s but all have failed.') % self.image.image_id) - raise err - - -class ImageMemberRepoProxy(glance.domain.proxy.Repo): - def __init__(self, repo, image, context, store_api): - self.repo = repo - self.image = image - self.context = context - self.store_api = store_api - super(ImageMemberRepoProxy, self).__init__(repo) - - def _set_acls(self): - public = self.image.visibility == 'public' - if self.image.locations and not public: - member_ids = [m.member_id for m in self.repo.list()] - for location in self.image.locations: - self.store_api.set_acls(location['url'], public=public, - read_tenants=member_ids, - context=self.context) - - def add(self, member): - super(ImageMemberRepoProxy, self).add(member) - self._set_acls() - - def remove(self, member): - super(ImageMemberRepoProxy, self).remove(member) - self._set_acls() diff --git a/glance/notifier.py b/glance/notifier.py deleted file mode 100644 index d2c2075b..00000000 --- a/glance/notifier.py +++ /dev/null @@ -1,913 +0,0 @@ -# Copyright 2011, OpenStack Foundation -# Copyright 2012, Red Hat, Inc. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import glance_store -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging -from oslo_utils import encodeutils -from oslo_utils import excutils -import six -import webob - -from glance.common import exception -from glance.common import timeutils -from glance.domain import proxy as domain_proxy -from glance.i18n import _, _LE - - -notifier_opts = [ - cfg.StrOpt('default_publisher_id', - default="image.localhost", - help=_(""" -Default publisher_id for outgoing Glance notifications. - -This is the value that the notification driver will use to identify -messages for events originating from the Glance service. Typically, -this is the hostname of the instance that generated the message. - -Possible values: - * Any reasonable instance identifier, for example: image.host1 - -Related options: - * None - -""")), - cfg.ListOpt('disabled_notifications', - default=[], - help=_(""" -List of notifications to be disabled. - -Specify a list of notifications that should not be emitted. -A notification can be given either as a notification type to -disable a single event notification, or as a notification group -prefix to disable all event notifications within a group. - -Possible values: - A comma-separated list of individual notification types or - notification groups to be disabled. Currently supported groups: - * image - * image.member - * task - * metadef_namespace - * metadef_object - * metadef_property - * metadef_resource_type - * metadef_tag - For a complete listing and description of each event refer to: - http://docs.openstack.org/developer/glance/notifications.html - - The values must be specified as: . - For example: image.create,task.success,metadef_tag - -Related options: - * None - -""")), -] - -CONF = cfg.CONF -CONF.register_opts(notifier_opts) - -LOG = logging.getLogger(__name__) - - -def set_defaults(control_exchange='glance'): - oslo_messaging.set_transport_defaults(control_exchange) - - -def get_transport(): - return oslo_messaging.get_notification_transport(CONF) - - -class Notifier(object): - """Uses a notification strategy to send out messages about events.""" - - def __init__(self): - publisher_id = CONF.default_publisher_id - self._transport = get_transport() - self._notifier = oslo_messaging.Notifier(self._transport, - publisher_id=publisher_id) - - def warn(self, event_type, payload): - self._notifier.warn({}, event_type, payload) - - def info(self, event_type, payload): - self._notifier.info({}, event_type, payload) - - def error(self, event_type, payload): - self._notifier.error({}, event_type, payload) - - -def _get_notification_group(notification): - return notification.split('.', 1)[0] - - -def _is_notification_enabled(notification): - disabled_notifications = CONF.disabled_notifications - notification_group = _get_notification_group(notification) - - notifications = (notification, notification_group) - for disabled_notification in disabled_notifications: - if disabled_notification in notifications: - return False - - return True - - -def _send_notification(notify, notification_type, payload): - if _is_notification_enabled(notification_type): - notify(notification_type, payload) - - -def format_image_notification(image): - """ - Given a glance.domain.Image object, return a dictionary of relevant - notification information. We purposely do not include 'location' - as it may contain credentials. - """ - return { - 'id': image.image_id, - 'name': image.name, - 'status': image.status, - 'created_at': timeutils.isotime(image.created_at), - 'updated_at': timeutils.isotime(image.updated_at), - 'min_disk': image.min_disk, - 'min_ram': image.min_ram, - 'protected': image.protected, - 'checksum': image.checksum, - 'owner': image.owner, - 'disk_format': image.disk_format, - 'container_format': image.container_format, - 'size': image.size, - 'virtual_size': image.virtual_size, - 'is_public': image.visibility == 'public', - 'visibility': image.visibility, - 'properties': dict(image.extra_properties), - 'tags': list(image.tags), - 'deleted': False, - 'deleted_at': None, - } - - -def format_image_member_notification(image_member): - """Given a glance.domain.ImageMember object, return a dictionary of relevant - notification information. - """ - return { - 'image_id': image_member.image_id, - 'member_id': image_member.member_id, - 'status': image_member.status, - 'created_at': timeutils.isotime(image_member.created_at), - 'updated_at': timeutils.isotime(image_member.updated_at), - 'deleted': False, - 'deleted_at': None, - } - - -def format_task_notification(task): - # NOTE(nikhil): input is not passed to the notifier payload as it may - # contain sensitive info. - return { - 'id': task.task_id, - 'type': task.type, - 'status': task.status, - 'result': None, - 'owner': task.owner, - 'message': None, - 'expires_at': timeutils.isotime(task.expires_at), - 'created_at': timeutils.isotime(task.created_at), - 'updated_at': timeutils.isotime(task.updated_at), - 'deleted': False, - 'deleted_at': None, - } - - -def format_metadef_namespace_notification(metadef_namespace): - return { - 'namespace': metadef_namespace.namespace, - 'namespace_old': metadef_namespace.namespace, - 'display_name': metadef_namespace.display_name, - 'protected': metadef_namespace.protected, - 'visibility': metadef_namespace.visibility, - 'owner': metadef_namespace.owner, - 'description': metadef_namespace.description, - 'created_at': timeutils.isotime(metadef_namespace.created_at), - 'updated_at': timeutils.isotime(metadef_namespace.updated_at), - 'deleted': False, - 'deleted_at': None, - } - - -def format_metadef_object_notification(metadef_object): - object_properties = metadef_object.properties or {} - properties = [] - for name, prop in six.iteritems(object_properties): - object_property = _format_metadef_object_property(name, prop) - properties.append(object_property) - - return { - 'namespace': metadef_object.namespace, - 'name': metadef_object.name, - 'name_old': metadef_object.name, - 'properties': properties, - 'required': metadef_object.required, - 'description': metadef_object.description, - 'created_at': timeutils.isotime(metadef_object.created_at), - 'updated_at': timeutils.isotime(metadef_object.updated_at), - 'deleted': False, - 'deleted_at': None, - } - - -def _format_metadef_object_property(name, metadef_property): - return { - 'name': name, - 'type': metadef_property.type or None, - 'title': metadef_property.title or None, - 'description': metadef_property.description or None, - 'default': metadef_property.default or None, - 'minimum': metadef_property.minimum or None, - 'maximum': metadef_property.maximum or None, - 'enum': metadef_property.enum or None, - 'pattern': metadef_property.pattern or None, - 'minLength': metadef_property.minLength or None, - 'maxLength': metadef_property.maxLength or None, - 'confidential': metadef_property.confidential or None, - 'items': metadef_property.items or None, - 'uniqueItems': metadef_property.uniqueItems or None, - 'minItems': metadef_property.minItems or None, - 'maxItems': metadef_property.maxItems or None, - 'additionalItems': metadef_property.additionalItems or None, - } - - -def format_metadef_property_notification(metadef_property): - schema = metadef_property.schema - - return { - 'namespace': metadef_property.namespace, - 'name': metadef_property.name, - 'name_old': metadef_property.name, - 'type': schema.get('type'), - 'title': schema.get('title'), - 'description': schema.get('description'), - 'default': schema.get('default'), - 'minimum': schema.get('minimum'), - 'maximum': schema.get('maximum'), - 'enum': schema.get('enum'), - 'pattern': schema.get('pattern'), - 'minLength': schema.get('minLength'), - 'maxLength': schema.get('maxLength'), - 'confidential': schema.get('confidential'), - 'items': schema.get('items'), - 'uniqueItems': schema.get('uniqueItems'), - 'minItems': schema.get('minItems'), - 'maxItems': schema.get('maxItems'), - 'additionalItems': schema.get('additionalItems'), - 'deleted': False, - 'deleted_at': None, - } - - -def format_metadef_resource_type_notification(metadef_resource_type): - return { - 'namespace': metadef_resource_type.namespace, - 'name': metadef_resource_type.name, - 'name_old': metadef_resource_type.name, - 'prefix': metadef_resource_type.prefix, - 'properties_target': metadef_resource_type.properties_target, - 'created_at': timeutils.isotime(metadef_resource_type.created_at), - 'updated_at': timeutils.isotime(metadef_resource_type.updated_at), - 'deleted': False, - 'deleted_at': None, - } - - -def format_metadef_tag_notification(metadef_tag): - return { - 'namespace': metadef_tag.namespace, - 'name': metadef_tag.name, - 'name_old': metadef_tag.name, - 'created_at': timeutils.isotime(metadef_tag.created_at), - 'updated_at': timeutils.isotime(metadef_tag.updated_at), - 'deleted': False, - 'deleted_at': None, - } - - -class NotificationBase(object): - def get_payload(self, obj): - return {} - - def send_notification(self, notification_id, obj, extra_payload=None): - payload = self.get_payload(obj) - if extra_payload is not None: - payload.update(extra_payload) - - _send_notification(self.notifier.info, notification_id, payload) - - -@six.add_metaclass(abc.ABCMeta) -class NotificationProxy(NotificationBase): - def __init__(self, repo, context, notifier): - self.repo = repo - self.context = context - self.notifier = notifier - - super_class = self.get_super_class() - super_class.__init__(self, repo) - - @abc.abstractmethod - def get_super_class(self): - pass - - -@six.add_metaclass(abc.ABCMeta) -class NotificationRepoProxy(NotificationBase): - def __init__(self, repo, context, notifier): - self.repo = repo - self.context = context - self.notifier = notifier - proxy_kwargs = {'context': self.context, 'notifier': self.notifier} - - proxy_class = self.get_proxy_class() - super_class = self.get_super_class() - super_class.__init__(self, repo, proxy_class, proxy_kwargs) - - @abc.abstractmethod - def get_super_class(self): - pass - - @abc.abstractmethod - def get_proxy_class(self): - pass - - -@six.add_metaclass(abc.ABCMeta) -class NotificationFactoryProxy(object): - def __init__(self, factory, context, notifier): - kwargs = {'context': context, 'notifier': notifier} - - proxy_class = self.get_proxy_class() - super_class = self.get_super_class() - super_class.__init__(self, factory, proxy_class, kwargs) - - @abc.abstractmethod - def get_super_class(self): - pass - - @abc.abstractmethod - def get_proxy_class(self): - pass - - -class ImageProxy(NotificationProxy, domain_proxy.Image): - def get_super_class(self): - return domain_proxy.Image - - def get_payload(self, obj): - return format_image_notification(obj) - - def _format_image_send(self, bytes_sent): - return { - 'bytes_sent': bytes_sent, - 'image_id': self.repo.image_id, - 'owner_id': self.repo.owner, - 'receiver_tenant_id': self.context.tenant, - 'receiver_user_id': self.context.user, - } - - def _get_chunk_data_iterator(self, data, chunk_size=None): - sent = 0 - for chunk in data: - yield chunk - sent += len(chunk) - - if sent != (chunk_size or self.repo.size): - notify = self.notifier.error - else: - notify = self.notifier.info - - try: - _send_notification(notify, 'image.send', - self._format_image_send(sent)) - except Exception as err: - msg = (_LE("An error occurred during image.send" - " notification: %(err)s") % {'err': err}) - LOG.error(msg) - - def get_data(self, offset=0, chunk_size=None): - # Due to the need of evaluating subsequent proxies, this one - # should return a generator, the call should be done before - # generator creation - data = self.repo.get_data(offset=offset, chunk_size=chunk_size) - return self._get_chunk_data_iterator(data, chunk_size=chunk_size) - - def set_data(self, data, size=None): - self.send_notification('image.prepare', self.repo) - - notify_error = self.notifier.error - try: - self.repo.set_data(data, size) - except glance_store.StorageFull as e: - msg = (_("Image storage media is full: %s") % - encodeutils.exception_to_unicode(e)) - _send_notification(notify_error, 'image.upload', msg) - raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) - except glance_store.StorageWriteDenied as e: - msg = (_("Insufficient permissions on image storage media: %s") - % encodeutils.exception_to_unicode(e)) - _send_notification(notify_error, 'image.upload', msg) - raise webob.exc.HTTPServiceUnavailable(explanation=msg) - except ValueError as e: - msg = (_("Cannot save data for image %(image_id)s: %(error)s") % - {'image_id': self.repo.image_id, - 'error': encodeutils.exception_to_unicode(e)}) - _send_notification(notify_error, 'image.upload', msg) - raise webob.exc.HTTPBadRequest( - explanation=encodeutils.exception_to_unicode(e)) - except exception.Duplicate as e: - msg = (_("Unable to upload duplicate image data for image" - "%(image_id)s: %(error)s") % - {'image_id': self.repo.image_id, - 'error': encodeutils.exception_to_unicode(e)}) - _send_notification(notify_error, 'image.upload', msg) - raise webob.exc.HTTPConflict(explanation=msg) - except exception.Forbidden as e: - msg = (_("Not allowed to upload image data for image %(image_id)s:" - " %(error)s") - % {'image_id': self.repo.image_id, - 'error': encodeutils.exception_to_unicode(e)}) - _send_notification(notify_error, 'image.upload', msg) - raise webob.exc.HTTPForbidden(explanation=msg) - except exception.NotFound as e: - exc_str = encodeutils.exception_to_unicode(e) - msg = (_("Image %(image_id)s could not be found after upload." - " The image may have been deleted during the upload:" - " %(error)s") % {'image_id': self.repo.image_id, - 'error': exc_str}) - _send_notification(notify_error, 'image.upload', msg) - raise webob.exc.HTTPNotFound(explanation=exc_str) - except webob.exc.HTTPError as e: - with excutils.save_and_reraise_exception(): - msg = (_("Failed to upload image data for image %(image_id)s" - " due to HTTP error: %(error)s") % - {'image_id': self.repo.image_id, - 'error': encodeutils.exception_to_unicode(e)}) - _send_notification(notify_error, 'image.upload', msg) - except Exception as e: - with excutils.save_and_reraise_exception(): - msg = (_("Failed to upload image data for image %(image_id)s " - "due to internal error: %(error)s") % - {'image_id': self.repo.image_id, - 'error': encodeutils.exception_to_unicode(e)}) - _send_notification(notify_error, 'image.upload', msg) - else: - self.send_notification('image.upload', self.repo) - self.send_notification('image.activate', self.repo) - - -class ImageMemberProxy(NotificationProxy, domain_proxy.ImageMember): - def get_super_class(self): - return domain_proxy.ImageMember - - -class ImageFactoryProxy(NotificationFactoryProxy, domain_proxy.ImageFactory): - def get_super_class(self): - return domain_proxy.ImageFactory - - def get_proxy_class(self): - return ImageProxy - - -class ImageRepoProxy(NotificationRepoProxy, domain_proxy.Repo): - def get_super_class(self): - return domain_proxy.Repo - - def get_proxy_class(self): - return ImageProxy - - def get_payload(self, obj): - return format_image_notification(obj) - - def save(self, image, from_state=None): - super(ImageRepoProxy, self).save(image, from_state=from_state) - self.send_notification('image.update', image) - - def add(self, image): - super(ImageRepoProxy, self).add(image) - self.send_notification('image.create', image) - - def remove(self, image): - super(ImageRepoProxy, self).remove(image) - self.send_notification('image.delete', image, extra_payload={ - 'deleted': True, 'deleted_at': timeutils.isotime() - }) - - -class ImageMemberRepoProxy(NotificationBase, domain_proxy.MemberRepo): - - def __init__(self, repo, image, context, notifier): - self.repo = repo - self.image = image - self.context = context - self.notifier = notifier - proxy_kwargs = {'context': self.context, 'notifier': self.notifier} - - proxy_class = self.get_proxy_class() - super_class = self.get_super_class() - super_class.__init__(self, image, repo, proxy_class, proxy_kwargs) - - def get_super_class(self): - return domain_proxy.MemberRepo - - def get_proxy_class(self): - return ImageMemberProxy - - def get_payload(self, obj): - return format_image_member_notification(obj) - - def save(self, member, from_state=None): - super(ImageMemberRepoProxy, self).save(member, from_state=from_state) - self.send_notification('image.member.update', member) - - def add(self, member): - super(ImageMemberRepoProxy, self).add(member) - self.send_notification('image.member.create', member) - - def remove(self, member): - super(ImageMemberRepoProxy, self).remove(member) - self.send_notification('image.member.delete', member, extra_payload={ - 'deleted': True, 'deleted_at': timeutils.isotime() - }) - - -class TaskProxy(NotificationProxy, domain_proxy.Task): - def get_super_class(self): - return domain_proxy.Task - - def get_payload(self, obj): - return format_task_notification(obj) - - def begin_processing(self): - super(TaskProxy, self).begin_processing() - self.send_notification('task.processing', self.repo) - - def succeed(self, result): - super(TaskProxy, self).succeed(result) - self.send_notification('task.success', self.repo) - - def fail(self, message): - super(TaskProxy, self).fail(message) - self.send_notification('task.failure', self.repo) - - def run(self, executor): - super(TaskProxy, self).run(executor) - self.send_notification('task.run', self.repo) - - -class TaskFactoryProxy(NotificationFactoryProxy, domain_proxy.TaskFactory): - def get_super_class(self): - return domain_proxy.TaskFactory - - def get_proxy_class(self): - return TaskProxy - - -class TaskRepoProxy(NotificationRepoProxy, domain_proxy.TaskRepo): - def get_super_class(self): - return domain_proxy.TaskRepo - - def get_proxy_class(self): - return TaskProxy - - def get_payload(self, obj): - return format_task_notification(obj) - - def add(self, task): - result = super(TaskRepoProxy, self).add(task) - self.send_notification('task.create', task) - return result - - def remove(self, task): - result = super(TaskRepoProxy, self).remove(task) - self.send_notification('task.delete', task, extra_payload={ - 'deleted': True, 'deleted_at': timeutils.isotime() - }) - return result - - -class TaskStubProxy(NotificationProxy, domain_proxy.TaskStub): - def get_super_class(self): - return domain_proxy.TaskStub - - -class TaskStubRepoProxy(NotificationRepoProxy, domain_proxy.TaskStubRepo): - def get_super_class(self): - return domain_proxy.TaskStubRepo - - def get_proxy_class(self): - return TaskStubProxy - - -class MetadefNamespaceProxy(NotificationProxy, domain_proxy.MetadefNamespace): - def get_super_class(self): - return domain_proxy.MetadefNamespace - - -class MetadefNamespaceFactoryProxy(NotificationFactoryProxy, - domain_proxy.MetadefNamespaceFactory): - def get_super_class(self): - return domain_proxy.MetadefNamespaceFactory - - def get_proxy_class(self): - return MetadefNamespaceProxy - - -class MetadefNamespaceRepoProxy(NotificationRepoProxy, - domain_proxy.MetadefNamespaceRepo): - def get_super_class(self): - return domain_proxy.MetadefNamespaceRepo - - def get_proxy_class(self): - return MetadefNamespaceProxy - - def get_payload(self, obj): - return format_metadef_namespace_notification(obj) - - def save(self, metadef_namespace): - name = getattr(metadef_namespace, '_old_namespace', - metadef_namespace.namespace) - result = super(MetadefNamespaceRepoProxy, self).save(metadef_namespace) - self.send_notification( - 'metadef_namespace.update', metadef_namespace, - extra_payload={ - 'namespace_old': name, - }) - return result - - def add(self, metadef_namespace): - result = super(MetadefNamespaceRepoProxy, self).add(metadef_namespace) - self.send_notification('metadef_namespace.create', metadef_namespace) - return result - - def remove(self, metadef_namespace): - result = super(MetadefNamespaceRepoProxy, self).remove( - metadef_namespace) - self.send_notification( - 'metadef_namespace.delete', metadef_namespace, - extra_payload={'deleted': True, 'deleted_at': timeutils.isotime()} - ) - return result - - def remove_objects(self, metadef_namespace): - result = super(MetadefNamespaceRepoProxy, self).remove_objects( - metadef_namespace) - self.send_notification('metadef_namespace.delete_objects', - metadef_namespace) - return result - - def remove_properties(self, metadef_namespace): - result = super(MetadefNamespaceRepoProxy, self).remove_properties( - metadef_namespace) - self.send_notification('metadef_namespace.delete_properties', - metadef_namespace) - return result - - def remove_tags(self, metadef_namespace): - result = super(MetadefNamespaceRepoProxy, self).remove_tags( - metadef_namespace) - self.send_notification('metadef_namespace.delete_tags', - metadef_namespace) - return result - - -class MetadefObjectProxy(NotificationProxy, domain_proxy.MetadefObject): - def get_super_class(self): - return domain_proxy.MetadefObject - - -class MetadefObjectFactoryProxy(NotificationFactoryProxy, - domain_proxy.MetadefObjectFactory): - def get_super_class(self): - return domain_proxy.MetadefObjectFactory - - def get_proxy_class(self): - return MetadefObjectProxy - - -class MetadefObjectRepoProxy(NotificationRepoProxy, - domain_proxy.MetadefObjectRepo): - def get_super_class(self): - return domain_proxy.MetadefObjectRepo - - def get_proxy_class(self): - return MetadefObjectProxy - - def get_payload(self, obj): - return format_metadef_object_notification(obj) - - def save(self, metadef_object): - name = getattr(metadef_object, '_old_name', metadef_object.name) - result = super(MetadefObjectRepoProxy, self).save(metadef_object) - self.send_notification( - 'metadef_object.update', metadef_object, - extra_payload={ - 'namespace': metadef_object.namespace.namespace, - 'name_old': name, - }) - return result - - def add(self, metadef_object): - result = super(MetadefObjectRepoProxy, self).add(metadef_object) - self.send_notification('metadef_object.create', metadef_object) - return result - - def remove(self, metadef_object): - result = super(MetadefObjectRepoProxy, self).remove(metadef_object) - self.send_notification( - 'metadef_object.delete', metadef_object, - extra_payload={ - 'deleted': True, - 'deleted_at': timeutils.isotime(), - 'namespace': metadef_object.namespace.namespace - } - ) - return result - - -class MetadefPropertyProxy(NotificationProxy, domain_proxy.MetadefProperty): - def get_super_class(self): - return domain_proxy.MetadefProperty - - -class MetadefPropertyFactoryProxy(NotificationFactoryProxy, - domain_proxy.MetadefPropertyFactory): - def get_super_class(self): - return domain_proxy.MetadefPropertyFactory - - def get_proxy_class(self): - return MetadefPropertyProxy - - -class MetadefPropertyRepoProxy(NotificationRepoProxy, - domain_proxy.MetadefPropertyRepo): - def get_super_class(self): - return domain_proxy.MetadefPropertyRepo - - def get_proxy_class(self): - return MetadefPropertyProxy - - def get_payload(self, obj): - return format_metadef_property_notification(obj) - - def save(self, metadef_property): - name = getattr(metadef_property, '_old_name', metadef_property.name) - result = super(MetadefPropertyRepoProxy, self).save(metadef_property) - self.send_notification( - 'metadef_property.update', metadef_property, - extra_payload={ - 'namespace': metadef_property.namespace.namespace, - 'name_old': name, - }) - return result - - def add(self, metadef_property): - result = super(MetadefPropertyRepoProxy, self).add(metadef_property) - self.send_notification('metadef_property.create', metadef_property) - return result - - def remove(self, metadef_property): - result = super(MetadefPropertyRepoProxy, self).remove(metadef_property) - self.send_notification( - 'metadef_property.delete', metadef_property, - extra_payload={ - 'deleted': True, - 'deleted_at': timeutils.isotime(), - 'namespace': metadef_property.namespace.namespace - } - ) - return result - - -class MetadefResourceTypeProxy(NotificationProxy, - domain_proxy.MetadefResourceType): - def get_super_class(self): - return domain_proxy.MetadefResourceType - - -class MetadefResourceTypeFactoryProxy(NotificationFactoryProxy, - domain_proxy.MetadefResourceTypeFactory): - def get_super_class(self): - return domain_proxy.MetadefResourceTypeFactory - - def get_proxy_class(self): - return MetadefResourceTypeProxy - - -class MetadefResourceTypeRepoProxy(NotificationRepoProxy, - domain_proxy.MetadefResourceTypeRepo): - def get_super_class(self): - return domain_proxy.MetadefResourceTypeRepo - - def get_proxy_class(self): - return MetadefResourceTypeProxy - - def get_payload(self, obj): - return format_metadef_resource_type_notification(obj) - - def add(self, md_resource_type): - result = super(MetadefResourceTypeRepoProxy, self).add( - md_resource_type) - self.send_notification('metadef_resource_type.create', - md_resource_type) - return result - - def remove(self, md_resource_type): - result = super(MetadefResourceTypeRepoProxy, self).remove( - md_resource_type) - self.send_notification( - 'metadef_resource_type.delete', md_resource_type, - extra_payload={ - 'deleted': True, - 'deleted_at': timeutils.isotime(), - 'namespace': md_resource_type.namespace.namespace - } - ) - return result - - -class MetadefTagProxy(NotificationProxy, domain_proxy.MetadefTag): - def get_super_class(self): - return domain_proxy.MetadefTag - - -class MetadefTagFactoryProxy(NotificationFactoryProxy, - domain_proxy.MetadefTagFactory): - def get_super_class(self): - return domain_proxy.MetadefTagFactory - - def get_proxy_class(self): - return MetadefTagProxy - - -class MetadefTagRepoProxy(NotificationRepoProxy, domain_proxy.MetadefTagRepo): - def get_super_class(self): - return domain_proxy.MetadefTagRepo - - def get_proxy_class(self): - return MetadefTagProxy - - def get_payload(self, obj): - return format_metadef_tag_notification(obj) - - def save(self, metadef_tag): - name = getattr(metadef_tag, '_old_name', metadef_tag.name) - result = super(MetadefTagRepoProxy, self).save(metadef_tag) - self.send_notification( - 'metadef_tag.update', metadef_tag, - extra_payload={ - 'namespace': metadef_tag.namespace.namespace, - 'name_old': name, - }) - return result - - def add(self, metadef_tag): - result = super(MetadefTagRepoProxy, self).add(metadef_tag) - self.send_notification('metadef_tag.create', metadef_tag) - return result - - def add_tags(self, metadef_tags): - result = super(MetadefTagRepoProxy, self).add_tags(metadef_tags) - for metadef_tag in metadef_tags: - self.send_notification('metadef_tag.create', metadef_tag) - - return result - - def remove(self, metadef_tag): - result = super(MetadefTagRepoProxy, self).remove(metadef_tag) - self.send_notification( - 'metadef_tag.delete', metadef_tag, - extra_payload={ - 'deleted': True, - 'deleted_at': timeutils.isotime(), - 'namespace': metadef_tag.namespace.namespace - } - ) - return result diff --git a/glance/opts.py b/glance/opts.py deleted file mode 100644 index 033b2686..00000000 --- a/glance/opts.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'list_api_opts', - 'list_registry_opts', - 'list_scrubber_opts', - 'list_cache_opts', - 'list_manage_opts', -] - -import copy -import itertools - -from osprofiler import opts as profiler - -import glance.api.middleware.context -import glance.api.versions -import glance.async.flows.convert -import glance.async.taskflow_executor -import glance.common.config -import glance.common.location_strategy -import glance.common.location_strategy.store_type -import glance.common.property_utils -import glance.common.rpc -import glance.common.wsgi -import glance.image_cache -import glance.image_cache.drivers.sqlite -import glance.notifier -import glance.registry -import glance.registry.client -import glance.registry.client.v1.api -import glance.scrubber - - -_api_opts = [ - (None, list(itertools.chain( - glance.api.middleware.context.context_opts, - glance.api.versions.versions_opts, - glance.common.config.common_opts, - glance.common.location_strategy.location_strategy_opts, - glance.common.property_utils.property_opts, - glance.common.rpc.rpc_opts, - glance.common.wsgi.bind_opts, - glance.common.wsgi.eventlet_opts, - glance.common.wsgi.socket_opts, - glance.common.wsgi.wsgi_opts, - glance.image_cache.drivers.sqlite.sqlite_opts, - glance.image_cache.image_cache_opts, - glance.notifier.notifier_opts, - glance.registry.registry_addr_opts, - glance.registry.client.registry_client_ctx_opts, - glance.registry.client.registry_client_opts, - glance.registry.client.v1.api.registry_client_ctx_opts, - glance.scrubber.scrubber_opts))), - ('image_format', glance.common.config.image_format_opts), - ('task', glance.common.config.task_opts), - ('taskflow_executor', list(itertools.chain( - glance.async.taskflow_executor.taskflow_executor_opts, - glance.async.flows.convert.convert_task_opts))), - ('store_type_location_strategy', - glance.common.location_strategy.store_type.store_type_opts), - profiler.list_opts()[0], - ('paste_deploy', glance.common.config.paste_deploy_opts) -] -_registry_opts = [ - (None, list(itertools.chain( - glance.api.middleware.context.context_opts, - glance.common.config.common_opts, - glance.common.wsgi.bind_opts, - glance.common.wsgi.socket_opts, - glance.common.wsgi.wsgi_opts, - glance.common.wsgi.eventlet_opts))), - profiler.list_opts()[0], - ('paste_deploy', glance.common.config.paste_deploy_opts) -] -_scrubber_opts = [ - (None, list(itertools.chain( - glance.common.config.common_opts, - glance.scrubber.scrubber_opts, - glance.scrubber.scrubber_cmd_opts, - glance.scrubber.scrubber_cmd_cli_opts, - glance.registry.client.registry_client_opts, - glance.registry.client.registry_client_ctx_opts, - glance.registry.registry_addr_opts))), -] -_cache_opts = [ - (None, list(itertools.chain( - glance.common.config.common_opts, - glance.image_cache.drivers.sqlite.sqlite_opts, - glance.image_cache.image_cache_opts, - glance.registry.registry_addr_opts, - glance.registry.client.registry_client_opts, - glance.registry.client.registry_client_ctx_opts))), -] -_manage_opts = [ - (None, []) -] - - -def list_api_opts(): - """Return a list of oslo_config options available in Glance API service. - - Each element of the list is a tuple. The first element is the name of the - group under which the list of elements in the second element will be - registered. A group name of None corresponds to the [DEFAULT] group in - config files. - - This function is also discoverable via the 'glance.api' entry point - under the 'oslo_config.opts' namespace. - - The purpose of this is to allow tools like the Oslo sample config file - generator to discover the options exposed to users by Glance. - - :returns: a list of (group_name, opts) tuples - """ - - return [(g, copy.deepcopy(o)) for g, o in _api_opts] - - -def list_registry_opts(): - """Return a list of oslo_config options available in Glance Registry - service. - """ - return [(g, copy.deepcopy(o)) for g, o in _registry_opts] - - -def list_scrubber_opts(): - """Return a list of oslo_config options available in Glance Scrubber - service. - """ - return [(g, copy.deepcopy(o)) for g, o in _scrubber_opts] - - -def list_cache_opts(): - """Return a list of oslo_config options available in Glance Cache - service. - """ - return [(g, copy.deepcopy(o)) for g, o in _cache_opts] - - -def list_manage_opts(): - """Return a list of oslo_config options available in Glance manage.""" - return [(g, copy.deepcopy(o)) for g, o in _manage_opts] diff --git a/glance/quota/__init__.py b/glance/quota/__init__.py deleted file mode 100644 index ee16a0c1..00000000 --- a/glance/quota/__init__.py +++ /dev/null @@ -1,386 +0,0 @@ -# Copyright 2013, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import glance_store as store -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils - -import glance.api.common -import glance.common.exception as exception -from glance.common import utils -import glance.domain -import glance.domain.proxy -from glance.i18n import _, _LI - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -CONF.import_opt('image_member_quota', 'glance.common.config') -CONF.import_opt('image_property_quota', 'glance.common.config') -CONF.import_opt('image_tag_quota', 'glance.common.config') - - -def _enforce_image_tag_quota(tags): - if CONF.image_tag_quota < 0: - # If value is negative, allow unlimited number of tags - return - - if not tags: - return - - if len(tags) > CONF.image_tag_quota: - raise exception.ImageTagLimitExceeded(attempted=len(tags), - maximum=CONF.image_tag_quota) - - -def _calc_required_size(context, image, locations): - required_size = None - if image.size: - required_size = image.size * len(locations) - else: - for location in locations: - size_from_backend = None - - try: - size_from_backend = store.get_size_from_backend( - location['url'], context=context) - except (store.UnknownScheme, store.NotFound): - pass - except store.BadStoreUri: - raise exception.BadStoreUri - - if size_from_backend: - required_size = size_from_backend * len(locations) - break - return required_size - - -def _enforce_image_location_quota(image, locations, is_setter=False): - if CONF.image_location_quota < 0: - # If value is negative, allow unlimited number of locations - return - - attempted = len(image.locations) + len(locations) - attempted = attempted if not is_setter else len(locations) - maximum = CONF.image_location_quota - if attempted > maximum: - raise exception.ImageLocationLimitExceeded(attempted=attempted, - maximum=maximum) - - -class ImageRepoProxy(glance.domain.proxy.Repo): - - def __init__(self, image_repo, context, db_api, store_utils): - self.image_repo = image_repo - self.db_api = db_api - proxy_kwargs = {'context': context, 'db_api': db_api, - 'store_utils': store_utils} - super(ImageRepoProxy, self).__init__(image_repo, - item_proxy_class=ImageProxy, - item_proxy_kwargs=proxy_kwargs) - - def _enforce_image_property_quota(self, attempted): - if CONF.image_property_quota < 0: - # If value is negative, allow unlimited number of properties - return - - maximum = CONF.image_property_quota - if attempted > maximum: - kwargs = {'attempted': attempted, 'maximum': maximum} - exc = exception.ImagePropertyLimitExceeded(**kwargs) - LOG.debug(encodeutils.exception_to_unicode(exc)) - raise exc - - def save(self, image, from_state=None): - if image.added_new_properties(): - self._enforce_image_property_quota(len(image.extra_properties)) - return super(ImageRepoProxy, self).save(image, from_state=from_state) - - def add(self, image): - self._enforce_image_property_quota(len(image.extra_properties)) - return super(ImageRepoProxy, self).add(image) - - -class ImageFactoryProxy(glance.domain.proxy.ImageFactory): - def __init__(self, factory, context, db_api, store_utils): - proxy_kwargs = {'context': context, 'db_api': db_api, - 'store_utils': store_utils} - super(ImageFactoryProxy, self).__init__(factory, - proxy_class=ImageProxy, - proxy_kwargs=proxy_kwargs) - - def new_image(self, **kwargs): - tags = kwargs.pop('tags', set([])) - _enforce_image_tag_quota(tags) - return super(ImageFactoryProxy, self).new_image(tags=tags, **kwargs) - - -class QuotaImageTagsProxy(object): - - def __init__(self, orig_set): - if orig_set is None: - orig_set = set([]) - self.tags = orig_set - - def add(self, item): - self.tags.add(item) - _enforce_image_tag_quota(self.tags) - - def __cast__(self, *args, **kwargs): - return self.tags.__cast__(*args, **kwargs) - - def __contains__(self, *args, **kwargs): - return self.tags.__contains__(*args, **kwargs) - - def __eq__(self, other): - return self.tags == other - - def __ne__(self, other): - return not self.__eq__(other) - - def __iter__(self, *args, **kwargs): - return self.tags.__iter__(*args, **kwargs) - - def __len__(self, *args, **kwargs): - return self.tags.__len__(*args, **kwargs) - - def __getattr__(self, name): - return getattr(self.tags, name) - - -class ImageMemberFactoryProxy(glance.domain.proxy.ImageMembershipFactory): - - def __init__(self, member_factory, context, db_api, store_utils): - self.db_api = db_api - self.context = context - proxy_kwargs = {'context': context, 'db_api': db_api, - 'store_utils': store_utils} - super(ImageMemberFactoryProxy, self).__init__( - member_factory, - proxy_class=ImageMemberProxy, - proxy_kwargs=proxy_kwargs) - - def _enforce_image_member_quota(self, image): - if CONF.image_member_quota < 0: - # If value is negative, allow unlimited number of members - return - - current_member_count = self.db_api.image_member_count(self.context, - image.image_id) - attempted = current_member_count + 1 - maximum = CONF.image_member_quota - if attempted > maximum: - raise exception.ImageMemberLimitExceeded(attempted=attempted, - maximum=maximum) - - def new_image_member(self, image, member_id): - self._enforce_image_member_quota(image) - return super(ImageMemberFactoryProxy, self).new_image_member(image, - member_id) - - -class QuotaImageLocationsProxy(object): - - def __init__(self, image, context, db_api): - self.image = image - self.context = context - self.db_api = db_api - self.locations = image.locations - - def __cast__(self, *args, **kwargs): - return self.locations.__cast__(*args, **kwargs) - - def __contains__(self, *args, **kwargs): - return self.locations.__contains__(*args, **kwargs) - - def __delitem__(self, *args, **kwargs): - return self.locations.__delitem__(*args, **kwargs) - - def __delslice__(self, *args, **kwargs): - return self.locations.__delslice__(*args, **kwargs) - - def __eq__(self, other): - return self.locations == other - - def __ne__(self, other): - return not self.__eq__(other) - - def __getitem__(self, *args, **kwargs): - return self.locations.__getitem__(*args, **kwargs) - - def __iadd__(self, other): - if not hasattr(other, '__iter__'): - raise TypeError() - self._check_user_storage_quota(other) - return self.locations.__iadd__(other) - - def __iter__(self, *args, **kwargs): - return self.locations.__iter__(*args, **kwargs) - - def __len__(self, *args, **kwargs): - return self.locations.__len__(*args, **kwargs) - - def __setitem__(self, key, value): - return self.locations.__setitem__(key, value) - - def count(self, *args, **kwargs): - return self.locations.count(*args, **kwargs) - - def index(self, *args, **kwargs): - return self.locations.index(*args, **kwargs) - - def pop(self, *args, **kwargs): - return self.locations.pop(*args, **kwargs) - - def remove(self, *args, **kwargs): - return self.locations.remove(*args, **kwargs) - - def reverse(self, *args, **kwargs): - return self.locations.reverse(*args, **kwargs) - - def _check_user_storage_quota(self, locations): - required_size = _calc_required_size(self.context, - self.image, - locations) - glance.api.common.check_quota(self.context, - required_size, - self.db_api) - _enforce_image_location_quota(self.image, locations) - - def __copy__(self): - return type(self)(self.image, self.context, self.db_api) - - def __deepcopy__(self, memo): - # NOTE(zhiyan): Only copy location entries, others can be reused. - self.image.locations = copy.deepcopy(self.locations, memo) - return type(self)(self.image, self.context, self.db_api) - - def append(self, object): - self._check_user_storage_quota([object]) - return self.locations.append(object) - - def insert(self, index, object): - self._check_user_storage_quota([object]) - return self.locations.insert(index, object) - - def extend(self, iter): - self._check_user_storage_quota(iter) - return self.locations.extend(iter) - - -class ImageProxy(glance.domain.proxy.Image): - - def __init__(self, image, context, db_api, store_utils): - self.image = image - self.context = context - self.db_api = db_api - self.store_utils = store_utils - super(ImageProxy, self).__init__(image) - self.orig_props = set(image.extra_properties.keys()) - - def set_data(self, data, size=None): - remaining = glance.api.common.check_quota( - self.context, size, self.db_api, image_id=self.image.image_id) - if remaining is not None: - # NOTE(jbresnah) we are trying to enforce a quota, put a limit - # reader on the data - data = utils.LimitingReader(data, remaining) - try: - self.image.set_data(data, size=size) - except exception.ImageSizeLimitExceeded: - raise exception.StorageQuotaFull(image_size=size, - remaining=remaining) - - # NOTE(jbresnah) If two uploads happen at the same time and neither - # properly sets the size attribute[1] then there is a race condition - # that will allow for the quota to be broken[2]. Thus we must recheck - # the quota after the upload and thus after we know the size. - # - # Also, when an upload doesn't set the size properly then the call to - # check_quota above returns None and so utils.LimitingReader is not - # used above. Hence the store (e.g. filesystem store) may have to - # download the entire file before knowing the actual file size. Here - # also we need to check for the quota again after the image has been - # downloaded to the store. - # - # [1] For e.g. when using chunked transfers the 'Content-Length' - # header is not set. - # [2] For e.g.: - # - Upload 1 does not exceed quota but upload 2 exceeds quota. - # Both uploads are to different locations - # - Upload 2 completes before upload 1 and writes image.size. - # - Immediately, upload 1 completes and (over)writes image.size - # with the smaller size. - # - Now, to glance, image has not exceeded quota but, in - # reality, the quota has been exceeded. - - try: - glance.api.common.check_quota( - self.context, self.image.size, self.db_api, - image_id=self.image.image_id) - except exception.StorageQuotaFull: - with excutils.save_and_reraise_exception(): - LOG.info(_LI('Cleaning up %s after exceeding the quota.'), - self.image.image_id) - self.store_utils.safe_delete_from_backend( - self.context, self.image.image_id, self.image.locations[0]) - - @property - def tags(self): - return QuotaImageTagsProxy(self.image.tags) - - @tags.setter - def tags(self, value): - _enforce_image_tag_quota(value) - self.image.tags = value - - @property - def locations(self): - return QuotaImageLocationsProxy(self.image, - self.context, - self.db_api) - - @locations.setter - def locations(self, value): - _enforce_image_location_quota(self.image, value, is_setter=True) - - if not isinstance(value, (list, QuotaImageLocationsProxy)): - raise exception.Invalid(_('Invalid locations: %s') % value) - - required_size = _calc_required_size(self.context, - self.image, - value) - - glance.api.common.check_quota( - self.context, required_size, self.db_api, - image_id=self.image.image_id) - self.image.locations = value - - def added_new_properties(self): - current_props = set(self.image.extra_properties.keys()) - return bool(current_props.difference(self.orig_props)) - - -class ImageMemberProxy(glance.domain.proxy.ImageMember): - - def __init__(self, image_member, context, db_api, store_utils): - self.image_member = image_member - self.context = context - self.db_api = db_api - self.store_utils = store_utils - super(ImageMemberProxy, self).__init__(image_member) diff --git a/glance/registry/__init__.py b/glance/registry/__init__.py deleted file mode 100644 index 06fbe00c..00000000 --- a/glance/registry/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Registry API -""" - -from oslo_config import cfg - -from glance.i18n import _ - - -registry_addr_opts = [ - cfg.HostAddressOpt('registry_host', - default='0.0.0.0', - help=_(""" -Address the registry server is hosted on. - -Possible values: - * A valid IP or hostname - -Related options: - * None - -""")), - cfg.PortOpt('registry_port', default=9191, - help=_(""" -Port the registry server is listening on. - -Possible values: - * A valid port number - -Related options: - * None - -""")), -] - -CONF = cfg.CONF -CONF.register_opts(registry_addr_opts) diff --git a/glance/registry/api/__init__.py b/glance/registry/api/__init__.py deleted file mode 100644 index 12374985..00000000 --- a/glance/registry/api/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from glance.common import wsgi -from glance.registry.api import v1 -from glance.registry.api import v2 - -CONF = cfg.CONF -CONF.import_opt('enable_v1_registry', 'glance.common.config') -CONF.import_opt('enable_v2_registry', 'glance.common.config') - - -class API(wsgi.Router): - """WSGI entry point for all Registry requests.""" - - def __init__(self, mapper): - mapper = mapper or wsgi.APIMapper() - if CONF.enable_v1_registry: - v1.init(mapper) - if CONF.enable_v2_registry: - v2.init(mapper) - - super(API, self).__init__(mapper) diff --git a/glance/registry/api/v1/__init__.py b/glance/registry/api/v1/__init__.py deleted file mode 100644 index 0e2b41c4..00000000 --- a/glance/registry/api/v1/__init__.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.common import wsgi -from glance.registry.api.v1 import images -from glance.registry.api.v1 import members - - -def init(mapper): - images_resource = images.create_resource() - - mapper.connect("/", - controller=images_resource, - action="index") - mapper.connect("/images", - controller=images_resource, - action="index", - conditions={'method': ['GET']}) - mapper.connect("/images", - controller=images_resource, - action="create", - conditions={'method': ['POST']}) - mapper.connect("/images/detail", - controller=images_resource, - action="detail", - conditions={'method': ['GET']}) - mapper.connect("/images/{id}", - controller=images_resource, - action="show", - conditions=dict(method=["GET"])) - mapper.connect("/images/{id}", - controller=images_resource, - action="update", - conditions=dict(method=["PUT"])) - mapper.connect("/images/{id}", - controller=images_resource, - action="delete", - conditions=dict(method=["DELETE"])) - - members_resource = members.create_resource() - - mapper.connect("/images/{image_id}/members", - controller=members_resource, - action="index", - conditions={'method': ['GET']}) - mapper.connect("/images/{image_id}/members", - controller=members_resource, - action="create", - conditions={'method': ['POST']}) - mapper.connect("/images/{image_id}/members", - controller=members_resource, - action="update_all", - conditions=dict(method=["PUT"])) - mapper.connect("/images/{image_id}/members/{id}", - controller=members_resource, - action="show", - conditions={'method': ['GET']}) - mapper.connect("/images/{image_id}/members/{id}", - controller=members_resource, - action="update", - conditions={'method': ['PUT']}) - mapper.connect("/images/{image_id}/members/{id}", - controller=members_resource, - action="delete", - conditions={'method': ['DELETE']}) - mapper.connect("/shared-images/{id}", - controller=members_resource, - action="index_shared_images") - - -class API(wsgi.Router): - """WSGI entry point for all Registry requests.""" - - def __init__(self, mapper): - mapper = mapper or wsgi.APIMapper() - - init(mapper) - - super(API, self).__init__(mapper) diff --git a/glance/registry/api/v1/images.py b/glance/registry/api/v1/images.py deleted file mode 100644 index 7fc00d9d..00000000 --- a/glance/registry/api/v1/images.py +++ /dev/null @@ -1,569 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Reference implementation registry server WSGI controller -""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import strutils -from oslo_utils import uuidutils -from webob import exc - -from glance.common import exception -from glance.common import timeutils -from glance.common import utils -from glance.common import wsgi -import glance.db -from glance.i18n import _, _LE, _LI, _LW - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - -DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'size', - 'disk_format', 'container_format', - 'checksum'] - -SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', - 'min_ram', 'min_disk', 'size_min', 'size_max', - 'changes-since', 'protected'] - -SUPPORTED_SORT_KEYS = ('name', 'status', 'container_format', 'disk_format', - 'size', 'id', 'created_at', 'updated_at') - -SUPPORTED_SORT_DIRS = ('asc', 'desc') - -SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') - - -def _normalize_image_location_for_db(image_data): - """ - This function takes the legacy locations field and the newly added - location_data field from the image_data values dictionary which flows - over the wire between the registry and API servers and converts it - into the location_data format only which is then consumable by the - Image object. - - :param image_data: a dict of values representing information in the image - :returns: a new image data dict - """ - if 'locations' not in image_data and 'location_data' not in image_data: - image_data['locations'] = None - return image_data - - locations = image_data.pop('locations', []) - location_data = image_data.pop('location_data', []) - - location_data_dict = {} - for l in locations: - location_data_dict[l] = {} - for l in location_data: - location_data_dict[l['url']] = {'metadata': l['metadata'], - 'status': l['status'], - # Note(zhiyan): New location has no ID. - 'id': l['id'] if 'id' in l else None} - - # NOTE(jbresnah) preserve original order. tests assume original order, - # should that be defined functionality - ordered_keys = locations[:] - for ld in location_data: - if ld['url'] not in ordered_keys: - ordered_keys.append(ld['url']) - - location_data = [] - for loc in ordered_keys: - data = location_data_dict[loc] - if data: - location_data.append({'url': loc, - 'metadata': data['metadata'], - 'status': data['status'], - 'id': data['id']}) - else: - location_data.append({'url': loc, - 'metadata': {}, - 'status': 'active', - 'id': None}) - - image_data['locations'] = location_data - return image_data - - -class Controller(object): - - def __init__(self): - self.db_api = glance.db.get_api() - - def _get_images(self, context, filters, **params): - """Get images, wrapping in exception if necessary.""" - # NOTE(markwash): for backwards compatibility, is_public=True for - # admins actually means "treat me as if I'm not an admin and show me - # all my images" - if context.is_admin and params.get('is_public') is True: - params['admin_as_user'] = True - del params['is_public'] - try: - return self.db_api.image_get_all(context, filters=filters, - v1_mode=True, **params) - except exception.ImageNotFound: - LOG.warn(_LW("Invalid marker. Image %(id)s could not be " - "found.") % {'id': params.get('marker')}) - msg = _("Invalid marker. Image could not be found.") - raise exc.HTTPBadRequest(explanation=msg) - except exception.Forbidden: - LOG.warn(_LW("Access denied to image %(id)s but returning " - "'not found'") % {'id': params.get('marker')}) - msg = _("Invalid marker. Image could not be found.") - raise exc.HTTPBadRequest(explanation=msg) - except Exception: - LOG.exception(_LE("Unable to get images")) - raise - - def index(self, req): - """Return a basic filtered list of public, non-deleted images - - :param req: the Request object coming from the wsgi layer - :returns: a mapping of the following form - - .. code-block:: python - - dict(images=[image_list]) - - Where image_list is a sequence of mappings - - :: - - { - 'id': , - 'name': , - 'size': , - 'disk_format': , - 'container_format': , - 'checksum': - } - - """ - params = self._get_query_params(req) - images = self._get_images(req.context, **params) - - results = [] - for image in images: - result = {} - for field in DISPLAY_FIELDS_IN_INDEX: - result[field] = image[field] - results.append(result) - - LOG.debug("Returning image list") - return dict(images=results) - - def detail(self, req): - """Return a filtered list of public, non-deleted images in detail - - :param req: the Request object coming from the wsgi layer - :returns: a mapping of the following form - - :: - - {'images': - [{ - 'id': , - 'name': , - 'size': , - 'disk_format': , - 'container_format': , - 'checksum': , - 'min_disk': , - 'min_ram': , - 'store': , - 'status': , - 'created_at': , - 'updated_at': , - 'deleted_at': |, - 'properties': {'distro': 'Ubuntu 10.04 LTS', {...}} - }, {...}] - } - - """ - params = self._get_query_params(req) - - images = self._get_images(req.context, **params) - image_dicts = [make_image_dict(i) for i in images] - LOG.debug("Returning detailed image list") - return dict(images=image_dicts) - - def _get_query_params(self, req): - """Extract necessary query parameters from http request. - - :param req: the Request object coming from the wsgi layer - :returns: dictionary of filters to apply to list of images - """ - params = { - 'filters': self._get_filters(req), - 'limit': self._get_limit(req), - 'sort_key': [self._get_sort_key(req)], - 'sort_dir': [self._get_sort_dir(req)], - 'marker': self._get_marker(req), - } - - if req.context.is_admin: - # Only admin gets to look for non-public images - params['is_public'] = self._get_is_public(req) - - # need to coy items because the params is modified in the loop body - items = list(params.items()) - for key, value in items: - if value is None: - del params[key] - - # Fix for LP Bug #1132294 - # Ensure all shared images are returned in v1 - params['member_status'] = 'all' - return params - - def _get_filters(self, req): - """Return a dictionary of query param filters from the request - - :param req: the Request object coming from the wsgi layer - :returns: a dict of key/value filters - """ - filters = {} - properties = {} - - for param in req.params: - if param in SUPPORTED_FILTERS: - filters[param] = req.params.get(param) - if param.startswith('property-'): - _param = param[9:] - properties[_param] = req.params.get(param) - - if 'changes-since' in filters: - isotime = filters['changes-since'] - try: - filters['changes-since'] = timeutils.parse_isotime(isotime) - except ValueError: - raise exc.HTTPBadRequest(_("Unrecognized changes-since value")) - - if 'protected' in filters: - value = self._get_bool(filters['protected']) - if value is None: - raise exc.HTTPBadRequest(_("protected must be True, or " - "False")) - - filters['protected'] = value - - # only allow admins to filter on 'deleted' - if req.context.is_admin: - deleted_filter = self._parse_deleted_filter(req) - if deleted_filter is not None: - filters['deleted'] = deleted_filter - elif 'changes-since' not in filters: - filters['deleted'] = False - elif 'changes-since' not in filters: - filters['deleted'] = False - - if properties: - filters['properties'] = properties - - return filters - - def _get_limit(self, req): - """Parse a limit query param into something usable.""" - try: - limit = int(req.params.get('limit', CONF.limit_param_default)) - except ValueError: - raise exc.HTTPBadRequest(_("limit param must be an integer")) - - if limit < 0: - raise exc.HTTPBadRequest(_("limit param must be positive")) - - return min(CONF.api_limit_max, limit) - - def _get_marker(self, req): - """Parse a marker query param into something usable.""" - marker = req.params.get('marker') - - if marker and not uuidutils.is_uuid_like(marker): - msg = _('Invalid marker format') - raise exc.HTTPBadRequest(explanation=msg) - - return marker - - def _get_sort_key(self, req): - """Parse a sort key query param from the request object.""" - sort_key = req.params.get('sort_key', 'created_at') - if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS: - _keys = ', '.join(SUPPORTED_SORT_KEYS) - msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,) - raise exc.HTTPBadRequest(explanation=msg) - return sort_key - - def _get_sort_dir(self, req): - """Parse a sort direction query param from the request object.""" - sort_dir = req.params.get('sort_dir', 'desc') - if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS: - _keys = ', '.join(SUPPORTED_SORT_DIRS) - msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,) - raise exc.HTTPBadRequest(explanation=msg) - return sort_dir - - def _get_bool(self, value): - value = value.lower() - if value == 'true' or value == '1': - return True - elif value == 'false' or value == '0': - return False - - return None - - def _get_is_public(self, req): - """Parse is_public into something usable.""" - is_public = req.params.get('is_public') - - if is_public is None: - # NOTE(vish): This preserves the default value of showing only - # public images. - return True - elif is_public.lower() == 'none': - return None - - value = self._get_bool(is_public) - if value is None: - raise exc.HTTPBadRequest(_("is_public must be None, True, or " - "False")) - - return value - - def _parse_deleted_filter(self, req): - """Parse deleted into something usable.""" - deleted = req.params.get('deleted') - if deleted is None: - return None - return strutils.bool_from_string(deleted) - - def show(self, req, id): - """Return data about the given image id.""" - try: - image = self.db_api.image_get(req.context, id, v1_mode=True) - LOG.debug("Successfully retrieved image %(id)s", {'id': id}) - except exception.ImageNotFound: - LOG.info(_LI("Image %(id)s not found"), {'id': id}) - raise exc.HTTPNotFound() - except exception.Forbidden: - # If it's private and doesn't belong to them, don't let on - # that it exists - LOG.info(_LI("Access denied to image %(id)s but returning" - " 'not found'"), {'id': id}) - raise exc.HTTPNotFound() - except Exception: - LOG.exception(_LE("Unable to show image %s") % id) - raise - - return dict(image=make_image_dict(image)) - - @utils.mutating - def delete(self, req, id): - """Deletes an existing image with the registry. - - :param req: wsgi Request object - :param id: The opaque internal identifier for the image - - :returns: 200 if delete was successful, a fault if not. On - success, the body contains the deleted image - information as a mapping. - """ - try: - deleted_image = self.db_api.image_destroy(req.context, id) - LOG.info(_LI("Successfully deleted image %(id)s"), {'id': id}) - return dict(image=make_image_dict(deleted_image)) - except exception.ForbiddenPublicImage: - LOG.info(_LI("Delete denied for public image %(id)s"), {'id': id}) - raise exc.HTTPForbidden() - except exception.Forbidden: - # If it's private and doesn't belong to them, don't let on - # that it exists - LOG.info(_LI("Access denied to image %(id)s but returning" - " 'not found'"), {'id': id}) - return exc.HTTPNotFound() - except exception.ImageNotFound: - LOG.info(_LI("Image %(id)s not found"), {'id': id}) - return exc.HTTPNotFound() - except Exception: - LOG.exception(_LE("Unable to delete image %s") % id) - raise - - @utils.mutating - def create(self, req, body): - """Registers a new image with the registry. - - :param req: wsgi Request object - :param body: Dictionary of information about the image - - :returns: The newly-created image information as a mapping, - which will include the newly-created image's internal id - in the 'id' field - """ - image_data = body['image'] - - # Ensure the image has a status set - image_data.setdefault('status', 'active') - - # Set up the image owner - if not req.context.is_admin or 'owner' not in image_data: - image_data['owner'] = req.context.owner - - image_id = image_data.get('id') - if image_id and not uuidutils.is_uuid_like(image_id): - LOG.info(_LI("Rejecting image creation request for invalid image " - "id '%(bad_id)s'"), {'bad_id': image_id}) - msg = _("Invalid image id format") - return exc.HTTPBadRequest(explanation=msg) - - if 'location' in image_data: - image_data['locations'] = [image_data.pop('location')] - - try: - image_data = _normalize_image_location_for_db(image_data) - image_data = self.db_api.image_create(req.context, image_data, - v1_mode=True) - image_data = dict(image=make_image_dict(image_data)) - LOG.info(_LI("Successfully created image %(id)s"), - {'id': image_data['image']['id']}) - return image_data - except exception.Duplicate: - msg = _("Image with identifier %s already exists!") % image_id - LOG.warn(msg) - return exc.HTTPConflict(msg) - except exception.Invalid as e: - msg = (_("Failed to add image metadata. " - "Got error: %s") % encodeutils.exception_to_unicode(e)) - LOG.error(msg) - return exc.HTTPBadRequest(msg) - except Exception: - LOG.exception(_LE("Unable to create image %s"), image_id) - raise - - @utils.mutating - def update(self, req, id, body): - """Updates an existing image with the registry. - - :param req: wsgi Request object - :param body: Dictionary of information about the image - :param id: The opaque internal identifier for the image - - :returns: Returns the updated image information as a mapping, - """ - image_data = body['image'] - from_state = body.get('from_state') - - # Prohibit modification of 'owner' - if not req.context.is_admin and 'owner' in image_data: - del image_data['owner'] - - if 'location' in image_data: - image_data['locations'] = [image_data.pop('location')] - - purge_props = req.headers.get("X-Glance-Registry-Purge-Props", "false") - try: - # These fields hold sensitive data, which should not be printed in - # the logs. - sensitive_fields = ['locations', 'location_data'] - LOG.debug("Updating image %(id)s with metadata: %(image_data)r", - {'id': id, - 'image_data': {k: v for k, v in image_data.items() - if k not in sensitive_fields}}) - image_data = _normalize_image_location_for_db(image_data) - if purge_props == "true": - purge_props = True - else: - purge_props = False - - updated_image = self.db_api.image_update(req.context, id, - image_data, - purge_props=purge_props, - from_state=from_state, - v1_mode=True) - - LOG.info(_LI("Updating metadata for image %(id)s"), {'id': id}) - return dict(image=make_image_dict(updated_image)) - except exception.Invalid as e: - msg = (_("Failed to update image metadata. " - "Got error: %s") % encodeutils.exception_to_unicode(e)) - LOG.error(msg) - return exc.HTTPBadRequest(msg) - except exception.ImageNotFound: - LOG.info(_LI("Image %(id)s not found"), {'id': id}) - raise exc.HTTPNotFound(body='Image not found', - request=req, - content_type='text/plain') - except exception.ForbiddenPublicImage: - LOG.info(_LI("Update denied for public image %(id)s"), {'id': id}) - raise exc.HTTPForbidden() - except exception.Forbidden: - # If it's private and doesn't belong to them, don't let on - # that it exists - LOG.info(_LI("Access denied to image %(id)s but returning" - " 'not found'"), {'id': id}) - raise exc.HTTPNotFound(body='Image not found', - request=req, - content_type='text/plain') - except exception.Conflict as e: - LOG.info(encodeutils.exception_to_unicode(e)) - raise exc.HTTPConflict(body='Image operation conflicts', - request=req, - content_type='text/plain') - except Exception: - LOG.exception(_LE("Unable to update image %s") % id) - raise - - -def _limit_locations(image): - locations = image.pop('locations', []) - image['location_data'] = locations - image['location'] = None - for loc in locations: - if loc['status'] == 'active': - image['location'] = loc['url'] - break - - -def make_image_dict(image): - """Create a dict representation of an image which we can use to - serialize the image. - """ - - def _fetch_attrs(d, attrs): - return {a: d[a] for a in attrs if a in d.keys()} - - # TODO(sirp): should this be a dict, or a list of dicts? - # A plain dict is more convenient, but list of dicts would provide - # access to created_at, etc - properties = {p['name']: p['value'] for p in image['properties'] - if not p['deleted']} - - image_dict = _fetch_attrs(image, glance.db.IMAGE_ATTRS) - image_dict['properties'] = properties - _limit_locations(image_dict) - - return image_dict - - -def create_resource(): - """Images resource factory method.""" - deserializer = wsgi.JSONRequestDeserializer() - serializer = wsgi.JSONResponseSerializer() - return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/glance/registry/api/v1/members.py b/glance/registry/api/v1/members.py deleted file mode 100644 index eba49ba2..00000000 --- a/glance/registry/api/v1/members.py +++ /dev/null @@ -1,366 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_utils import encodeutils -import webob.exc - -from glance.common import exception -from glance.common import utils -from glance.common import wsgi -import glance.db -from glance.i18n import _, _LI, _LW - - -LOG = logging.getLogger(__name__) - - -class Controller(object): - - def _check_can_access_image_members(self, context): - if context.owner is None and not context.is_admin: - raise webob.exc.HTTPUnauthorized(_("No authenticated user")) - - def __init__(self): - self.db_api = glance.db.get_api() - - def is_image_sharable(self, context, image): - """Return True if the image can be shared to others in this context.""" - # Is admin == image sharable - if context.is_admin: - return True - - # Only allow sharing if we have an owner - if context.owner is None: - return False - - # If we own the image, we can share it - if context.owner == image['owner']: - return True - - members = self.db_api.image_member_find(context, - image_id=image['id'], - member=context.owner) - if members: - return members[0]['can_share'] - - return False - - def index(self, req, image_id): - """ - Get the members of an image. - """ - try: - self.db_api.image_get(req.context, image_id, v1_mode=True) - except exception.NotFound: - msg = _("Image %(id)s not found") % {'id': image_id} - LOG.warn(msg) - raise webob.exc.HTTPNotFound(msg) - except exception.Forbidden: - # If it's private and doesn't belong to them, don't let on - # that it exists - msg = _LW("Access denied to image %(id)s but returning" - " 'not found'") % {'id': image_id} - LOG.warn(msg) - raise webob.exc.HTTPNotFound() - - members = self.db_api.image_member_find(req.context, image_id=image_id) - LOG.debug("Returning member list for image %(id)s", {'id': image_id}) - return dict(members=make_member_list(members, - member_id='member', - can_share='can_share')) - - @utils.mutating - def update_all(self, req, image_id, body): - """ - Replaces the members of the image with those specified in the - body. The body is a dict with the following format:: - - {'memberships': [ - {'member_id': , - ['can_share': [True|False]]}, ... - ]} - """ - self._check_can_access_image_members(req.context) - - # Make sure the image exists - try: - image = self.db_api.image_get(req.context, image_id, v1_mode=True) - except exception.NotFound: - msg = _("Image %(id)s not found") % {'id': image_id} - LOG.warn(msg) - raise webob.exc.HTTPNotFound(msg) - except exception.Forbidden: - # If it's private and doesn't belong to them, don't let on - # that it exists - msg = _LW("Access denied to image %(id)s but returning" - " 'not found'") % {'id': image_id} - LOG.warn(msg) - raise webob.exc.HTTPNotFound() - - # Can they manipulate the membership? - if not self.is_image_sharable(req.context, image): - msg = (_LW("User lacks permission to share image %(id)s") % - {'id': image_id}) - LOG.warn(msg) - msg = _("No permission to share that image") - raise webob.exc.HTTPForbidden(msg) - - # Get the membership list - try: - memb_list = body['memberships'] - except Exception as e: - # Malformed entity... - msg = _LW("Invalid membership association specified for " - "image %(id)s") % {'id': image_id} - LOG.warn(msg) - msg = (_("Invalid membership association: %s") % - encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=msg) - - add = [] - existing = {} - # Walk through the incoming memberships - for memb in memb_list: - try: - datum = dict(image_id=image['id'], - member=memb['member_id'], - can_share=None) - except Exception as e: - # Malformed entity... - msg = _LW("Invalid membership association specified for " - "image %(id)s") % {'id': image_id} - LOG.warn(msg) - msg = (_("Invalid membership association: %s") % - encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=msg) - - # Figure out what can_share should be - if 'can_share' in memb: - datum['can_share'] = bool(memb['can_share']) - - # Try to find the corresponding membership - members = self.db_api.image_member_find(req.context, - image_id=datum['image_id'], - member=datum['member'], - include_deleted=True) - try: - member = members[0] - except IndexError: - # Default can_share - datum['can_share'] = bool(datum['can_share']) - add.append(datum) - else: - # Are we overriding can_share? - if datum['can_share'] is None: - datum['can_share'] = members[0]['can_share'] - - existing[member['id']] = { - 'values': datum, - 'membership': member, - } - - # We now have a filtered list of memberships to add and - # memberships to modify. Let's start by walking through all - # the existing image memberships... - existing_members = self.db_api.image_member_find(req.context, - image_id=image['id'], - include_deleted=True) - for member in existing_members: - if member['id'] in existing: - # Just update the membership in place - update = existing[member['id']]['values'] - self.db_api.image_member_update(req.context, - member['id'], - update) - else: - if not member['deleted']: - # Outdated one; needs to be deleted - self.db_api.image_member_delete(req.context, member['id']) - - # Now add the non-existent ones - for memb in add: - self.db_api.image_member_create(req.context, memb) - - # Make an appropriate result - LOG.info(_LI("Successfully updated memberships for image %(id)s"), - {'id': image_id}) - return webob.exc.HTTPNoContent() - - @utils.mutating - def update(self, req, image_id, id, body=None): - """ - Adds a membership to the image, or updates an existing one. - If a body is present, it is a dict with the following format:: - - {'member': { - 'can_share': [True|False] - }} - - If `can_share` is provided, the member's ability to share is - set accordingly. If it is not provided, existing memberships - remain unchanged and new memberships default to False. - """ - self._check_can_access_image_members(req.context) - - # Make sure the image exists - try: - image = self.db_api.image_get(req.context, image_id, v1_mode=True) - except exception.NotFound: - msg = _("Image %(id)s not found") % {'id': image_id} - LOG.warn(msg) - raise webob.exc.HTTPNotFound(msg) - except exception.Forbidden: - # If it's private and doesn't belong to them, don't let on - # that it exists - msg = _LW("Access denied to image %(id)s but returning" - " 'not found'") % {'id': image_id} - LOG.warn(msg) - raise webob.exc.HTTPNotFound() - - # Can they manipulate the membership? - if not self.is_image_sharable(req.context, image): - msg = (_LW("User lacks permission to share image %(id)s") % - {'id': image_id}) - LOG.warn(msg) - msg = _("No permission to share that image") - raise webob.exc.HTTPForbidden(msg) - - # Determine the applicable can_share value - can_share = None - if body: - try: - can_share = bool(body['member']['can_share']) - except Exception as e: - # Malformed entity... - msg = _LW("Invalid membership association specified for " - "image %(id)s") % {'id': image_id} - LOG.warn(msg) - msg = (_("Invalid membership association: %s") % - encodeutils.exception_to_unicode(e)) - raise webob.exc.HTTPBadRequest(explanation=msg) - - # Look up an existing membership... - members = self.db_api.image_member_find(req.context, - image_id=image_id, - member=id, - include_deleted=True) - if members: - if can_share is not None: - values = dict(can_share=can_share) - self.db_api.image_member_update(req.context, - members[0]['id'], - values) - else: - values = dict(image_id=image['id'], member=id, - can_share=bool(can_share)) - self.db_api.image_member_create(req.context, values) - - LOG.info(_LI("Successfully updated a membership for image %(id)s"), - {'id': image_id}) - return webob.exc.HTTPNoContent() - - @utils.mutating - def delete(self, req, image_id, id): - """ - Removes a membership from the image. - """ - self._check_can_access_image_members(req.context) - - # Make sure the image exists - try: - image = self.db_api.image_get(req.context, image_id, v1_mode=True) - except exception.NotFound: - msg = _("Image %(id)s not found") % {'id': image_id} - LOG.warn(msg) - raise webob.exc.HTTPNotFound(msg) - except exception.Forbidden: - # If it's private and doesn't belong to them, don't let on - # that it exists - msg = _LW("Access denied to image %(id)s but returning" - " 'not found'") % {'id': image_id} - LOG.warn(msg) - raise webob.exc.HTTPNotFound() - - # Can they manipulate the membership? - if not self.is_image_sharable(req.context, image): - msg = (_LW("User lacks permission to share image %(id)s") % - {'id': image_id}) - LOG.warn(msg) - msg = _("No permission to share that image") - raise webob.exc.HTTPForbidden(msg) - - # Look up an existing membership - members = self.db_api.image_member_find(req.context, - image_id=image_id, - member=id) - if members: - self.db_api.image_member_delete(req.context, members[0]['id']) - else: - LOG.debug("%(id)s is not a member of image %(image_id)s", - {'id': id, 'image_id': image_id}) - msg = _("Membership could not be found.") - raise webob.exc.HTTPNotFound(explanation=msg) - - # Make an appropriate result - LOG.info(_LI("Successfully deleted a membership from image %(id)s"), - {'id': image_id}) - return webob.exc.HTTPNoContent() - - def default(self, req, *args, **kwargs): - """This will cover the missing 'show' and 'create' actions""" - LOG.debug("The method %s is not allowed for this resource", - req.environ['REQUEST_METHOD']) - raise webob.exc.HTTPMethodNotAllowed( - headers=[('Allow', 'PUT, DELETE')]) - - def index_shared_images(self, req, id): - """ - Retrieves images shared with the given member. - """ - try: - members = self.db_api.image_member_find(req.context, member=id) - except exception.NotFound: - msg = _LW("Member %(id)s not found") % {'id': id} - LOG.warn(msg) - msg = _("Membership could not be found.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - LOG.debug("Returning list of images shared with member %(id)s", - {'id': id}) - return dict(shared_images=make_member_list(members, - image_id='image_id', - can_share='can_share')) - - -def make_member_list(members, **attr_map): - """ - Create a dict representation of a list of members which we can use - to serialize the members list. Keyword arguments map the names of - optional attributes to include to the database attribute. - """ - - def _fetch_memb(memb, attr_map): - return {k: memb[v] for k, v in attr_map.items() if v in memb.keys()} - - # Return the list of members with the given attribute mapping - return [_fetch_memb(memb, attr_map) for memb in members] - - -def create_resource(): - """Image members resource factory method.""" - deserializer = wsgi.JSONRequestDeserializer() - serializer = wsgi.JSONResponseSerializer() - return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/glance/registry/api/v2/__init__.py b/glance/registry/api/v2/__init__.py deleted file mode 100644 index bdea2208..00000000 --- a/glance/registry/api/v2/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.common import wsgi -from glance.registry.api.v2 import rpc - - -def init(mapper): - rpc_resource = rpc.create_resource() - mapper.connect("/rpc", controller=rpc_resource, - conditions=dict(method=["POST"]), - action="__call__") - - -class API(wsgi.Router): - """WSGI entry point for all Registry requests.""" - - def __init__(self, mapper): - mapper = mapper or wsgi.APIMapper() - - init(mapper) - - super(API, self).__init__(mapper) diff --git a/glance/registry/api/v2/rpc.py b/glance/registry/api/v2/rpc.py deleted file mode 100644 index 00496843..00000000 --- a/glance/registry/api/v2/rpc.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -RPC Controller -""" - -from oslo_config import cfg - -from glance.common import rpc -from glance.common import wsgi -import glance.db -from glance.i18n import _ - - -CONF = cfg.CONF - - -class Controller(rpc.Controller): - - def __init__(self, raise_exc=False): - super(Controller, self).__init__(raise_exc) - - # NOTE(flaper87): Avoid using registry's db - # driver for the registry service. It would - # end up in an infinite loop. - if CONF.data_api == "glance.db.registry.api": - msg = _("Registry service can't use %s") % CONF.data_api - raise RuntimeError(msg) - - # NOTE(flaper87): Register the - # db_api as a resource to expose. - db_api = glance.db.get_api() - self.register(glance.db.unwrap(db_api)) - - -def create_resource(): - """Images resource factory method.""" - deserializer = rpc.RPCJSONDeserializer() - serializer = rpc.RPCJSONSerializer() - return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/glance/registry/client/__init__.py b/glance/registry/client/__init__.py deleted file mode 100644 index 6dc5fca7..00000000 --- a/glance/registry/client/__init__.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from glance.i18n import _ - - -registry_client_opts = [ - cfg.StrOpt('registry_client_protocol', - default='http', - choices=('http', 'https'), - help=_(""" -Protocol to use for communication with the registry server. - -Provide a string value representing the protocol to use for -communication with the registry server. By default, this option is -set to ``http`` and the connection is not secure. - -This option can be set to ``https`` to establish a secure connection -to the registry server. In this case, provide a key to use for the -SSL connection using the ``registry_client_key_file`` option. Also -include the CA file and cert file using the options -``registry_client_ca_file`` and ``registry_client_cert_file`` -respectively. - -Possible values: - * http - * https - -Related options: - * registry_client_key_file - * registry_client_cert_file - * registry_client_ca_file - -""")), - cfg.StrOpt('registry_client_key_file', - sample_default='/etc/ssl/key/key-file.pem', - help=_(""" -Absolute path to the private key file. - -Provide a string value representing a valid absolute path to the -private key file to use for establishing a secure connection to -the registry server. - -NOTE: This option must be set if ``registry_client_protocol`` is -set to ``https``. Alternatively, the GLANCE_CLIENT_KEY_FILE -environment variable may be set to a filepath of the key file. - -Possible values: - * String value representing a valid absolute path to the key - file. - -Related options: - * registry_client_protocol - -""")), - cfg.StrOpt('registry_client_cert_file', - sample_default='/etc/ssl/certs/file.crt', - help=_(""" -Absolute path to the certificate file. - -Provide a string value representing a valid absolute path to the -certificate file to use for establishing a secure connection to -the registry server. - -NOTE: This option must be set if ``registry_client_protocol`` is -set to ``https``. Alternatively, the GLANCE_CLIENT_CERT_FILE -environment variable may be set to a filepath of the certificate -file. - -Possible values: - * String value representing a valid absolute path to the - certificate file. - -Related options: - * registry_client_protocol - -""")), - cfg.StrOpt('registry_client_ca_file', - sample_default='/etc/ssl/cafile/file.ca', - help=_(""" -Absolute path to the Certificate Authority file. - -Provide a string value representing a valid absolute path to the -certificate authority file to use for establishing a secure -connection to the registry server. - -NOTE: This option must be set if ``registry_client_protocol`` is -set to ``https``. Alternatively, the GLANCE_CLIENT_CA_FILE -environment variable may be set to a filepath of the CA file. -This option is ignored if the ``registry_client_insecure`` option -is set to ``True``. - -Possible values: - * String value representing a valid absolute path to the CA - file. - -Related options: - * registry_client_protocol - * registry_client_insecure - -""")), - cfg.BoolOpt('registry_client_insecure', - default=False, - help=_(""" -Set verification of the registry server certificate. - -Provide a boolean value to determine whether or not to validate -SSL connections to the registry server. By default, this option -is set to ``False`` and the SSL connections are validated. - -If set to ``True``, the connection to the registry server is not -validated via a certifying authority and the -``registry_client_ca_file`` option is ignored. This is the -registry's equivalent of specifying --insecure on the command line -using glanceclient for the API. - -Possible values: - * True - * False - -Related options: - * registry_client_protocol - * registry_client_ca_file - -""")), - cfg.IntOpt('registry_client_timeout', - default=600, - min=0, - help=_(""" -Timeout value for registry requests. - -Provide an integer value representing the period of time in seconds -that the API server will wait for a registry request to complete. -The default value is 600 seconds. - -A value of 0 implies that a request will never timeout. - -Possible values: - * Zero - * Positive integer - -Related options: - * None - -""")), -] - -_DEPRECATE_USE_USER_TOKEN_MSG = ('This option was considered harmful and ' - 'has been deprecated in M release. It will ' - 'be removed in O release. For more ' - 'information read OSSN-0060. ' - 'Related functionality with uploading big ' - 'images has been implemented with Keystone ' - 'trusts support.') - -registry_client_ctx_opts = [ - cfg.BoolOpt('use_user_token', default=True, deprecated_for_removal=True, - deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, - help=_('Whether to pass through the user token when ' - 'making requests to the registry. To prevent ' - 'failures with token expiration during big ' - 'files upload, it is recommended to set this ' - 'parameter to False.' - 'If "use_user_token" is not in effect, then ' - 'admin credentials can be specified.')), - cfg.StrOpt('admin_user', secret=True, deprecated_for_removal=True, - deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, - help=_('The administrators user name. ' - 'If "use_user_token" is not in effect, then ' - 'admin credentials can be specified.')), - cfg.StrOpt('admin_password', secret=True, deprecated_for_removal=True, - deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, - help=_('The administrators password. ' - 'If "use_user_token" is not in effect, then ' - 'admin credentials can be specified.')), - cfg.StrOpt('admin_tenant_name', secret=True, deprecated_for_removal=True, - deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, - help=_('The tenant name of the administrative user. ' - 'If "use_user_token" is not in effect, then ' - 'admin tenant name can be specified.')), - cfg.StrOpt('auth_url', deprecated_for_removal=True, - deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, - help=_('The URL to the keystone service. ' - 'If "use_user_token" is not in effect and ' - 'using keystone auth, then URL of keystone ' - 'can be specified.')), - cfg.StrOpt('auth_strategy', default='noauth', deprecated_for_removal=True, - deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, - help=_('The strategy to use for authentication. ' - 'If "use_user_token" is not in effect, then ' - 'auth strategy can be specified.')), - cfg.StrOpt('auth_region', deprecated_for_removal=True, - deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, - help=_('The region for the authentication service. ' - 'If "use_user_token" is not in effect and ' - 'using keystone auth, then region name can ' - 'be specified.')), -] - -CONF = cfg.CONF -CONF.register_opts(registry_client_opts) -CONF.register_opts(registry_client_ctx_opts) diff --git a/glance/registry/client/v1/__init__.py b/glance/registry/client/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/registry/client/v1/api.py b/glance/registry/client/v1/api.py deleted file mode 100644 index 2c260d2f..00000000 --- a/glance/registry/client/v1/api.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Registry's Client API -""" - -import os - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from glance.common import exception -from glance.i18n import _ -from glance.registry.client.v1 import client - -LOG = logging.getLogger(__name__) - -registry_client_ctx_opts = [ - cfg.BoolOpt('send_identity_headers', - default=False, - help=_(""" -Send headers received from identity when making requests to -registry. - -Typically, Glance registry can be deployed in multiple flavors, -which may or may not include authentication. For example, -``trusted-auth`` is a flavor that does not require the registry -service to authenticate the requests it receives. However, the -registry service may still need a user context to be populated to -serve the requests. This can be achieved by the caller -(the Glance API usually) passing through the headers it received -from authenticating with identity for the same request. The typical -headers sent are ``X-User-Id``, ``X-Tenant-Id``, ``X-Roles``, -``X-Identity-Status`` and ``X-Service-Catalog``. - -Provide a boolean value to determine whether to send the identity -headers to provide tenant and user information along with the -requests to registry service. By default, this option is set to -``False``, which means that user and tenant information is not -available readily. It must be obtained by authenticating. Hence, if -this is set to ``False``, ``flavor`` must be set to value that -either includes authentication or authenticated user context. - -Possible values: - * True - * False - -Related options: - * flavor - -""")), -] - -CONF = cfg.CONF -CONF.register_opts(registry_client_ctx_opts) -_registry_client = 'glance.registry.client' -CONF.import_opt('registry_client_protocol', _registry_client) -CONF.import_opt('registry_client_key_file', _registry_client) -CONF.import_opt('registry_client_cert_file', _registry_client) -CONF.import_opt('registry_client_ca_file', _registry_client) -CONF.import_opt('registry_client_insecure', _registry_client) -CONF.import_opt('registry_client_timeout', _registry_client) -CONF.import_opt('use_user_token', _registry_client) -CONF.import_opt('admin_user', _registry_client) -CONF.import_opt('admin_password', _registry_client) -CONF.import_opt('admin_tenant_name', _registry_client) -CONF.import_opt('auth_url', _registry_client) -CONF.import_opt('auth_strategy', _registry_client) -CONF.import_opt('auth_region', _registry_client) -CONF.import_opt('metadata_encryption_key', 'glance.common.config') - -_CLIENT_CREDS = None -_CLIENT_HOST = None -_CLIENT_PORT = None -_CLIENT_KWARGS = {} -# AES key used to encrypt 'location' metadata -_METADATA_ENCRYPTION_KEY = None - - -def configure_registry_client(): - """ - Sets up a registry client for use in registry lookups - """ - global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT, _METADATA_ENCRYPTION_KEY - try: - host, port = CONF.registry_host, CONF.registry_port - except cfg.ConfigFileValueError: - msg = _("Configuration option was not valid") - LOG.error(msg) - raise exception.BadRegistryConnectionConfiguration(reason=msg) - except IndexError: - msg = _("Could not find required configuration option") - LOG.error(msg) - raise exception.BadRegistryConnectionConfiguration(reason=msg) - - _CLIENT_HOST = host - _CLIENT_PORT = port - _METADATA_ENCRYPTION_KEY = CONF.metadata_encryption_key - _CLIENT_KWARGS = { - 'use_ssl': CONF.registry_client_protocol.lower() == 'https', - 'key_file': CONF.registry_client_key_file, - 'cert_file': CONF.registry_client_cert_file, - 'ca_file': CONF.registry_client_ca_file, - 'insecure': CONF.registry_client_insecure, - 'timeout': CONF.registry_client_timeout, - } - - if not CONF.use_user_token: - configure_registry_admin_creds() - - -def configure_registry_admin_creds(): - global _CLIENT_CREDS - - if CONF.auth_url or os.getenv('OS_AUTH_URL'): - strategy = 'keystone' - else: - strategy = CONF.auth_strategy - - _CLIENT_CREDS = { - 'user': CONF.admin_user, - 'password': CONF.admin_password, - 'username': CONF.admin_user, - 'tenant': CONF.admin_tenant_name, - 'auth_url': os.getenv('OS_AUTH_URL') or CONF.auth_url, - 'strategy': strategy, - 'region': CONF.auth_region, - } - - -def get_registry_client(cxt): - global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT - global _METADATA_ENCRYPTION_KEY - kwargs = _CLIENT_KWARGS.copy() - if CONF.use_user_token: - kwargs['auth_token'] = cxt.auth_token - if _CLIENT_CREDS: - kwargs['creds'] = _CLIENT_CREDS - - if CONF.send_identity_headers: - identity_headers = { - 'X-User-Id': cxt.user or '', - 'X-Tenant-Id': cxt.tenant or '', - 'X-Roles': ','.join(cxt.roles), - 'X-Identity-Status': 'Confirmed', - 'X-Service-Catalog': jsonutils.dumps(cxt.service_catalog), - } - kwargs['identity_headers'] = identity_headers - - kwargs['request_id'] = cxt.request_id - - return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT, - _METADATA_ENCRYPTION_KEY, **kwargs) - - -def get_images_list(context, **kwargs): - c = get_registry_client(context) - return c.get_images(**kwargs) - - -def get_images_detail(context, **kwargs): - c = get_registry_client(context) - return c.get_images_detailed(**kwargs) - - -def get_image_metadata(context, image_id): - c = get_registry_client(context) - return c.get_image(image_id) - - -def add_image_metadata(context, image_meta): - LOG.debug("Adding image metadata...") - c = get_registry_client(context) - return c.add_image(image_meta) - - -def update_image_metadata(context, image_id, image_meta, - purge_props=False, from_state=None): - LOG.debug("Updating image metadata for image %s...", image_id) - c = get_registry_client(context) - return c.update_image(image_id, image_meta, purge_props=purge_props, - from_state=from_state) - - -def delete_image_metadata(context, image_id): - LOG.debug("Deleting image metadata for image %s...", image_id) - c = get_registry_client(context) - return c.delete_image(image_id) - - -def get_image_members(context, image_id): - c = get_registry_client(context) - return c.get_image_members(image_id) - - -def get_member_images(context, member_id): - c = get_registry_client(context) - return c.get_member_images(member_id) - - -def replace_members(context, image_id, member_data): - c = get_registry_client(context) - return c.replace_members(image_id, member_data) - - -def add_member(context, image_id, member_id, can_share=None): - c = get_registry_client(context) - return c.add_member(image_id, member_id, can_share=can_share) - - -def delete_member(context, image_id, member_id): - c = get_registry_client(context) - return c.delete_member(image_id, member_id) diff --git a/glance/registry/client/v1/client.py b/glance/registry/client/v1/client.py deleted file mode 100644 index 45c2fb2b..00000000 --- a/glance/registry/client/v1/client.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Simple client class to speak with any RESTful service that implements -the Glance Registry API -""" - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import excutils -import six - -from glance.common.client import BaseClient -from glance.common import crypt -from glance.common import exception -from glance.i18n import _LE -from glance.registry.api.v1 import images - -LOG = logging.getLogger(__name__) - - -class RegistryClient(BaseClient): - - """A client for the Registry image metadata service.""" - - DEFAULT_PORT = 9191 - - def __init__(self, host=None, port=None, metadata_encryption_key=None, - identity_headers=None, **kwargs): - """ - :param metadata_encryption_key: Key used to encrypt 'location' metadata - """ - self.metadata_encryption_key = metadata_encryption_key - # NOTE (dprince): by default base client overwrites host and port - # settings when using keystone. configure_via_auth=False disables - # this behaviour to ensure we still send requests to the Registry API - self.identity_headers = identity_headers - # store available passed request id for do_request call - self._passed_request_id = kwargs.pop('request_id', None) - BaseClient.__init__(self, host, port, configure_via_auth=False, - **kwargs) - - def decrypt_metadata(self, image_metadata): - if self.metadata_encryption_key: - if image_metadata.get('location'): - location = crypt.urlsafe_decrypt(self.metadata_encryption_key, - image_metadata['location']) - image_metadata['location'] = location - if image_metadata.get('location_data'): - ld = [] - for loc in image_metadata['location_data']: - url = crypt.urlsafe_decrypt(self.metadata_encryption_key, - loc['url']) - ld.append({'id': loc['id'], 'url': url, - 'metadata': loc['metadata'], - 'status': loc['status']}) - image_metadata['location_data'] = ld - return image_metadata - - def encrypt_metadata(self, image_metadata): - if self.metadata_encryption_key: - location_url = image_metadata.get('location') - if location_url: - location = crypt.urlsafe_encrypt(self.metadata_encryption_key, - location_url, - 64) - image_metadata['location'] = location - if image_metadata.get('location_data'): - ld = [] - for loc in image_metadata['location_data']: - if loc['url'] == location_url: - url = location - else: - url = crypt.urlsafe_encrypt( - self.metadata_encryption_key, loc['url'], 64) - ld.append({'url': url, 'metadata': loc['metadata'], - 'status': loc['status'], - # NOTE(zhiyan): New location has no ID field. - 'id': loc.get('id')}) - image_metadata['location_data'] = ld - return image_metadata - - def get_images(self, **kwargs): - """ - Returns a list of image id/name mappings from Registry - - :param filters: dict of keys & expected values to filter results - :param marker: image id after which to start page - :param limit: max number of images to return - :param sort_key: results will be ordered by this image attribute - :param sort_dir: direction in which to order results (asc, desc) - """ - params = self._extract_params(kwargs, images.SUPPORTED_PARAMS) - res = self.do_request("GET", "/images", params=params) - image_list = jsonutils.loads(res.read())['images'] - for image in image_list: - image = self.decrypt_metadata(image) - return image_list - - def do_request(self, method, action, **kwargs): - try: - kwargs['headers'] = kwargs.get('headers', {}) - kwargs['headers'].update(self.identity_headers or {}) - if self._passed_request_id: - request_id = self._passed_request_id - if six.PY3 and isinstance(request_id, bytes): - request_id = request_id.decode('utf-8') - kwargs['headers']['X-Openstack-Request-ID'] = request_id - res = super(RegistryClient, self).do_request(method, - action, - **kwargs) - status = res.status - request_id = res.getheader('x-openstack-request-id') - if six.PY3 and isinstance(request_id, bytes): - request_id = request_id.decode('utf-8') - LOG.debug("Registry request %(method)s %(action)s HTTP %(status)s" - " request id %(request_id)s", - {'method': method, 'action': action, - 'status': status, 'request_id': request_id}) - - # a 404 condition is not fatal, we shouldn't log at a fatal - # level for it. - except exception.NotFound: - raise - - # The following exception logging should only really be used - # in extreme and unexpected cases. - except Exception as exc: - with excutils.save_and_reraise_exception(): - exc_name = exc.__class__.__name__ - LOG.exception(_LE("Registry client request %(method)s " - "%(action)s raised %(exc_name)s"), - {'method': method, 'action': action, - 'exc_name': exc_name}) - return res - - def get_images_detailed(self, **kwargs): - """ - Returns a list of detailed image data mappings from Registry - - :param filters: dict of keys & expected values to filter results - :param marker: image id after which to start page - :param limit: max number of images to return - :param sort_key: results will be ordered by this image attribute - :param sort_dir: direction in which to order results (asc, desc) - """ - params = self._extract_params(kwargs, images.SUPPORTED_PARAMS) - res = self.do_request("GET", "/images/detail", params=params) - image_list = jsonutils.loads(res.read())['images'] - for image in image_list: - image = self.decrypt_metadata(image) - return image_list - - def get_image(self, image_id): - """Returns a mapping of image metadata from Registry.""" - res = self.do_request("GET", "/images/%s" % image_id) - data = jsonutils.loads(res.read())['image'] - return self.decrypt_metadata(data) - - def add_image(self, image_metadata): - """ - Tells registry about an image's metadata - """ - headers = { - 'Content-Type': 'application/json', - } - - if 'image' not in image_metadata: - image_metadata = dict(image=image_metadata) - - encrypted_metadata = self.encrypt_metadata(image_metadata['image']) - image_metadata['image'] = encrypted_metadata - body = jsonutils.dump_as_bytes(image_metadata) - - res = self.do_request("POST", "/images", body=body, headers=headers) - # Registry returns a JSONified dict(image=image_info) - data = jsonutils.loads(res.read()) - image = data['image'] - return self.decrypt_metadata(image) - - def update_image(self, image_id, image_metadata, purge_props=False, - from_state=None): - """ - Updates Registry's information about an image - """ - if 'image' not in image_metadata: - image_metadata = dict(image=image_metadata) - - encrypted_metadata = self.encrypt_metadata(image_metadata['image']) - image_metadata['image'] = encrypted_metadata - image_metadata['from_state'] = from_state - body = jsonutils.dump_as_bytes(image_metadata) - - headers = { - 'Content-Type': 'application/json', - } - - if purge_props: - headers["X-Glance-Registry-Purge-Props"] = "true" - - res = self.do_request("PUT", "/images/%s" % image_id, body=body, - headers=headers) - data = jsonutils.loads(res.read()) - image = data['image'] - return self.decrypt_metadata(image) - - def delete_image(self, image_id): - """ - Deletes Registry's information about an image - """ - res = self.do_request("DELETE", "/images/%s" % image_id) - data = jsonutils.loads(res.read()) - image = data['image'] - return image - - def get_image_members(self, image_id): - """Return a list of membership associations from Registry.""" - res = self.do_request("GET", "/images/%s/members" % image_id) - data = jsonutils.loads(res.read())['members'] - return data - - def get_member_images(self, member_id): - """Return a list of membership associations from Registry.""" - res = self.do_request("GET", "/shared-images/%s" % member_id) - data = jsonutils.loads(res.read())['shared_images'] - return data - - def replace_members(self, image_id, member_data): - """Replace registry's information about image membership.""" - if isinstance(member_data, (list, tuple)): - member_data = dict(memberships=list(member_data)) - elif (isinstance(member_data, dict) and - 'memberships' not in member_data): - member_data = dict(memberships=[member_data]) - - body = jsonutils.dump_as_bytes(member_data) - - headers = {'Content-Type': 'application/json', } - - res = self.do_request("PUT", "/images/%s/members" % image_id, - body=body, headers=headers) - return self.get_status_code(res) == 204 - - def add_member(self, image_id, member_id, can_share=None): - """Add to registry's information about image membership.""" - body = None - headers = {} - # Build up a body if can_share is specified - if can_share is not None: - body = jsonutils.dump_as_bytes( - dict(member=dict(can_share=can_share))) - headers['Content-Type'] = 'application/json' - - url = "/images/%s/members/%s" % (image_id, member_id) - res = self.do_request("PUT", url, body=body, - headers=headers) - return self.get_status_code(res) == 204 - - def delete_member(self, image_id, member_id): - """Delete registry's information about image membership.""" - res = self.do_request("DELETE", "/images/%s/members/%s" % - (image_id, member_id)) - return self.get_status_code(res) == 204 diff --git a/glance/registry/client/v2/__init__.py b/glance/registry/client/v2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/registry/client/v2/api.py b/glance/registry/client/v2/api.py deleted file mode 100644 index 69a80168..00000000 --- a/glance/registry/client/v2/api.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2013 Red Hat, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Registry's Client V2 -""" - -import os - -from oslo_config import cfg -from oslo_log import log as logging - -from glance.common import exception -from glance.i18n import _ -from glance.registry.client.v2 import client - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF -_registry_client = 'glance.registry.client' -CONF.import_opt('registry_client_protocol', _registry_client) -CONF.import_opt('registry_client_key_file', _registry_client) -CONF.import_opt('registry_client_cert_file', _registry_client) -CONF.import_opt('registry_client_ca_file', _registry_client) -CONF.import_opt('registry_client_insecure', _registry_client) -CONF.import_opt('registry_client_timeout', _registry_client) -CONF.import_opt('use_user_token', _registry_client) -CONF.import_opt('admin_user', _registry_client) -CONF.import_opt('admin_password', _registry_client) -CONF.import_opt('admin_tenant_name', _registry_client) -CONF.import_opt('auth_url', _registry_client) -CONF.import_opt('auth_strategy', _registry_client) -CONF.import_opt('auth_region', _registry_client) - -_CLIENT_CREDS = None -_CLIENT_HOST = None -_CLIENT_PORT = None -_CLIENT_KWARGS = {} - - -def configure_registry_client(): - """ - Sets up a registry client for use in registry lookups - """ - global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT - try: - host, port = CONF.registry_host, CONF.registry_port - except cfg.ConfigFileValueError: - msg = _("Configuration option was not valid") - LOG.error(msg) - raise exception.BadRegistryConnectionConfiguration(msg) - except IndexError: - msg = _("Could not find required configuration option") - LOG.error(msg) - raise exception.BadRegistryConnectionConfiguration(msg) - - _CLIENT_HOST = host - _CLIENT_PORT = port - _CLIENT_KWARGS = { - 'use_ssl': CONF.registry_client_protocol.lower() == 'https', - 'key_file': CONF.registry_client_key_file, - 'cert_file': CONF.registry_client_cert_file, - 'ca_file': CONF.registry_client_ca_file, - 'insecure': CONF.registry_client_insecure, - 'timeout': CONF.registry_client_timeout, - } - - if not CONF.use_user_token: - configure_registry_admin_creds() - - -def configure_registry_admin_creds(): - global _CLIENT_CREDS - - if CONF.auth_url or os.getenv('OS_AUTH_URL'): - strategy = 'keystone' - else: - strategy = CONF.auth_strategy - - _CLIENT_CREDS = { - 'user': CONF.admin_user, - 'password': CONF.admin_password, - 'username': CONF.admin_user, - 'tenant': CONF.admin_tenant_name, - 'auth_url': os.getenv('OS_AUTH_URL') or CONF.auth_url, - 'strategy': strategy, - 'region': CONF.auth_region, - } - - -def get_registry_client(cxt): - global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT - kwargs = _CLIENT_KWARGS.copy() - if CONF.use_user_token: - kwargs['auth_token'] = cxt.auth_token - if _CLIENT_CREDS: - kwargs['creds'] = _CLIENT_CREDS - return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT, **kwargs) diff --git a/glance/registry/client/v2/client.py b/glance/registry/client/v2/client.py deleted file mode 100644 index c6d61a7c..00000000 --- a/glance/registry/client/v2/client.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Simple client class to speak with any RESTful service that implements -the Glance Registry API -""" - -from glance.common import rpc - - -class RegistryClient(rpc.RPCClient): - """Registry's V2 Client.""" - - DEFAULT_PORT = 9191 diff --git a/glance/schema.py b/glance/schema.py deleted file mode 100644 index f1bf4310..00000000 --- a/glance/schema.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import jsonschema -from oslo_utils import encodeutils -import six - -from glance.common import exception -from glance.i18n import _ - - -class Schema(object): - - def __init__(self, name, properties=None, links=None, required=None, - definitions=None): - self.name = name - if properties is None: - properties = {} - self.properties = properties - self.links = links - self.required = required - self.definitions = definitions - - def validate(self, obj): - try: - jsonschema.validate(obj, self.raw()) - except jsonschema.ValidationError as e: - reason = encodeutils.exception_to_unicode(e) - raise exception.InvalidObject(schema=self.name, reason=reason) - - def filter(self, obj): - filtered = {} - for key, value in six.iteritems(obj): - if self._filter_func(self.properties, key): - filtered[key] = value - - # NOTE(flaper87): This exists to allow for v1, null properties, - # to be used with the V2 API. During Kilo, it was allowed for the - # later to return None values without considering that V1 allowed - # for custom properties to be None, which is something V2 doesn't - # allow for. This small hack here will set V1 custom `None` pro- - # perties to an empty string so that they will be updated along - # with the image (if an update happens). - # - # We could skip the properties that are `None` but that would bring - # back the behavior we moved away from. Note that we can't consider - # doing a schema migration because we don't know which properties - # are "custom" and which came from `schema-image` if those custom - # properties were created with v1. - if key not in self.properties and value is None: - filtered[key] = '' - return filtered - - @staticmethod - def _filter_func(properties, key): - return key in properties - - def merge_properties(self, properties): - # Ensure custom props aren't attempting to override base props - original_keys = set(self.properties.keys()) - new_keys = set(properties.keys()) - intersecting_keys = original_keys.intersection(new_keys) - conflicting_keys = [k for k in intersecting_keys - if self.properties[k] != properties[k]] - if conflicting_keys: - props = ', '.join(conflicting_keys) - reason = _("custom properties (%(props)s) conflict " - "with base properties") - raise exception.SchemaLoadError(reason=reason % {'props': props}) - - self.properties.update(properties) - - def raw(self): - raw = { - 'name': self.name, - 'properties': self.properties, - 'additionalProperties': False, - } - if self.definitions: - raw['definitions'] = self.definitions - if self.required: - raw['required'] = self.required - if self.links: - raw['links'] = self.links - return raw - - def minimal(self): - minimal = { - 'name': self.name, - 'properties': self.properties - } - if self.definitions: - minimal['definitions'] = self.definitions - if self.required: - minimal['required'] = self.required - return minimal - - -class PermissiveSchema(Schema): - @staticmethod - def _filter_func(properties, key): - return True - - def raw(self): - raw = super(PermissiveSchema, self).raw() - raw['additionalProperties'] = {'type': 'string'} - return raw - - def minimal(self): - minimal = super(PermissiveSchema, self).raw() - return minimal - - -class CollectionSchema(object): - - def __init__(self, name, item_schema): - self.name = name - self.item_schema = item_schema - - def raw(self): - definitions = None - if self.item_schema.definitions: - definitions = self.item_schema.definitions - self.item_schema.definitions = None - raw = { - 'name': self.name, - 'properties': { - self.name: { - 'type': 'array', - 'items': self.item_schema.raw(), - }, - 'first': {'type': 'string'}, - 'next': {'type': 'string'}, - 'schema': {'type': 'string'}, - }, - 'links': [ - {'rel': 'first', 'href': '{first}'}, - {'rel': 'next', 'href': '{next}'}, - {'rel': 'describedby', 'href': '{schema}'}, - ], - } - if definitions: - raw['definitions'] = definitions - self.item_schema.definitions = definitions - - return raw - - def minimal(self): - definitions = None - if self.item_schema.definitions: - definitions = self.item_schema.definitions - self.item_schema.definitions = None - minimal = { - 'name': self.name, - 'properties': { - self.name: { - 'type': 'array', - 'items': self.item_schema.minimal(), - }, - 'schema': {'type': 'string'}, - }, - 'links': [ - {'rel': 'describedby', 'href': '{schema}'}, - ], - } - if definitions: - minimal['definitions'] = definitions - self.item_schema.definitions = definitions - - return minimal - - -class DictCollectionSchema(Schema): - def __init__(self, name, item_schema): - self.name = name - self.item_schema = item_schema - - def raw(self): - definitions = None - if self.item_schema.definitions: - definitions = self.item_schema.definitions - self.item_schema.definitions = None - raw = { - 'name': self.name, - 'properties': { - self.name: { - 'type': 'object', - 'additionalProperties': self.item_schema.raw(), - }, - 'first': {'type': 'string'}, - 'next': {'type': 'string'}, - 'schema': {'type': 'string'}, - }, - 'links': [ - {'rel': 'first', 'href': '{first}'}, - {'rel': 'next', 'href': '{next}'}, - {'rel': 'describedby', 'href': '{schema}'}, - ], - } - if definitions: - raw['definitions'] = definitions - self.item_schema.definitions = definitions - - return raw - - def minimal(self): - definitions = None - if self.item_schema.definitions: - definitions = self.item_schema.definitions - self.item_schema.definitions = None - minimal = { - 'name': self.name, - 'properties': { - self.name: { - 'type': 'object', - 'additionalProperties': self.item_schema.minimal(), - }, - 'schema': {'type': 'string'}, - }, - 'links': [ - {'rel': 'describedby', 'href': '{schema}'}, - ], - } - if definitions: - minimal['definitions'] = definitions - self.item_schema.definitions = definitions - - return minimal diff --git a/glance/scrubber.py b/glance/scrubber.py deleted file mode 100644 index 1894c7f2..00000000 --- a/glance/scrubber.py +++ /dev/null @@ -1,479 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import calendar -import time - -import eventlet -from glance_store import exceptions as store_exceptions -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -import six - -from glance.common import crypt -from glance.common import exception -from glance import context -import glance.db as db_api -from glance.i18n import _, _LC, _LE, _LI, _LW -import glance.registry.client.v1.api as registry - -LOG = logging.getLogger(__name__) - -scrubber_opts = [ - cfg.IntOpt('scrub_time', default=0, min=0, - help=_(""" -The amount of time, in seconds, to delay image scrubbing. - -When delayed delete is turned on, an image is put into ``pending_delete`` -state upon deletion until the scrubber deletes its image data. Typically, soon -after the image is put into ``pending_delete`` state, it is available for -scrubbing. However, scrubbing can be delayed until a later point using this -configuration option. This option denotes the time period an image spends in -``pending_delete`` state before it is available for scrubbing. - -It is important to realize that this has storage implications. The larger the -``scrub_time``, the longer the time to reclaim backend storage from deleted -images. - -Possible values: - * Any non-negative integer - -Related options: - * ``delayed_delete`` - -""")), - cfg.IntOpt('scrub_pool_size', default=1, min=1, - help=_(""" -The size of thread pool to be used for scrubbing images. - -When there are a large number of images to scrub, it is beneficial to scrub -images in parallel so that the scrub queue stays in control and the backend -storage is reclaimed in a timely fashion. This configuration option denotes -the maximum number of images to be scrubbed in parallel. The default value is -one, which signifies serial scrubbing. Any value above one indicates parallel -scrubbing. - -Possible values: - * Any non-zero positive integer - -Related options: - * ``delayed_delete`` - -""")), - cfg.BoolOpt('delayed_delete', default=False, - help=_(""" -Turn on/off delayed delete. - -Typically when an image is deleted, the ``glance-api`` service puts the image -into ``deleted`` state and deletes its data at the same time. Delayed delete -is a feature in Glance that delays the actual deletion of image data until a -later point in time (as determined by the configuration option ``scrub_time``). -When delayed delete is turned on, the ``glance-api`` service puts the image -into ``pending_delete`` state upon deletion and leaves the image data in the -storage backend for the image scrubber to delete at a later time. The image -scrubber will move the image into ``deleted`` state upon successful deletion -of image data. - -NOTE: When delayed delete is turned on, image scrubber MUST be running as a -periodic task to prevent the backend storage from filling up with undesired -usage. - -Possible values: - * True - * False - -Related options: - * ``scrub_time`` - * ``wakeup_time`` - * ``scrub_pool_size`` - -""")), - - # Note: Though the conf option admin_role is used by other Glance - # service and their usage differs requiring us to have a differing - # help text here, oslo.config generator treats them as the same - # config option and would throw a DuplicateError exception in case - # of differing help texts. Hence we have the same help text for - # admin_role here and in context.py. - - cfg.StrOpt('admin_role', default='admin', - help=_(""" -Role used to identify an authenticated user as administrator. - -Provide a string value representing a Keystone role to identify an -administrative user. Users with this role will be granted -administrative privileges. The default value for this option is -'admin'. - -Possible values: - * A string value which is a valid Keystone role - -Related options: - * None - -""")), - cfg.BoolOpt('send_identity_headers', - default=False, - help=_(""" -Send headers received from identity when making requests to -registry. - -Typically, Glance registry can be deployed in multiple flavors, -which may or may not include authentication. For example, -``trusted-auth`` is a flavor that does not require the registry -service to authenticate the requests it receives. However, the -registry service may still need a user context to be populated to -serve the requests. This can be achieved by the caller -(the Glance API usually) passing through the headers it received -from authenticating with identity for the same request. The typical -headers sent are ``X-User-Id``, ``X-Tenant-Id``, ``X-Roles``, -``X-Identity-Status`` and ``X-Service-Catalog``. - -Provide a boolean value to determine whether to send the identity -headers to provide tenant and user information along with the -requests to registry service. By default, this option is set to -``False``, which means that user and tenant information is not -available readily. It must be obtained by authenticating. Hence, if -this is set to ``False``, ``flavor`` must be set to value that -either includes authentication or authenticated user context. - -Possible values: - * True - * False - -Related options: - * flavor - -""")), -] - -scrubber_cmd_opts = [ - cfg.IntOpt('wakeup_time', default=300, min=0, - help=_(""" -Time interval, in seconds, between scrubber runs in daemon mode. - -Scrubber can be run either as a cron job or daemon. When run as a daemon, this -configuration time specifies the time period between two runs. When the -scrubber wakes up, it fetches and scrubs all ``pending_delete`` images that -are available for scrubbing after taking ``scrub_time`` into consideration. - -If the wakeup time is set to a large number, there may be a large number of -images to be scrubbed for each run. Also, this impacts how quickly the backend -storage is reclaimed. - -Possible values: - * Any non-negative integer - -Related options: - * ``daemon`` - * ``delayed_delete`` - -""")) -] - -scrubber_cmd_cli_opts = [ - cfg.BoolOpt('daemon', - short='D', - default=False, - help=_(""" -Run scrubber as a daemon. - -This boolean configuration option indicates whether scrubber should -run as a long-running process that wakes up at regular intervals to -scrub images. The wake up interval can be specified using the -configuration option ``wakeup_time``. - -If this configuration option is set to ``False``, which is the -default value, scrubber runs once to scrub images and exits. In this -case, if the operator wishes to implement continuous scrubbing of -images, scrubber needs to be scheduled as a cron job. - -Possible values: - * True - * False - -Related options: - * ``wakeup_time`` - -""")) -] - -CONF = cfg.CONF -CONF.register_opts(scrubber_opts) -CONF.import_opt('metadata_encryption_key', 'glance.common.config') - - -class ScrubDBQueue(object): - """Database-based image scrub queue class.""" - def __init__(self): - self.scrub_time = CONF.scrub_time - self.metadata_encryption_key = CONF.metadata_encryption_key - registry.configure_registry_client() - registry.configure_registry_admin_creds() - admin_user = CONF.admin_user - admin_tenant = CONF.admin_tenant_name - - if CONF.send_identity_headers: - # When registry is operating in trusted-auth mode - roles = [CONF.admin_role] - self.admin_context = context.RequestContext(user=admin_user, - tenant=admin_tenant, - auth_token=None, - roles=roles) - self.registry = registry.get_registry_client(self.admin_context) - else: - ctxt = context.RequestContext() - self.registry = registry.get_registry_client(ctxt) - admin_token = self.registry.auth_token - self.admin_context = context.RequestContext(user=admin_user, - tenant=admin_tenant, - auth_token=admin_token) - - def add_location(self, image_id, location): - """Adding image location to scrub queue. - - :param image_id: The opaque image identifier - :param location: The opaque image location - - :returns: A boolean value to indicate success or not - """ - loc_id = location.get('id') - if loc_id: - db_api.get_api().image_location_delete(self.admin_context, - image_id, loc_id, - 'pending_delete') - return True - else: - return False - - def _get_images_page(self, marker): - filters = {'deleted': True, - 'is_public': 'none', - 'status': 'pending_delete'} - - if marker: - return self.registry.get_images_detailed(filters=filters, - marker=marker) - else: - return self.registry.get_images_detailed(filters=filters) - - def _get_all_images(self): - """Generator to fetch all appropriate images, paging as needed.""" - - marker = None - while True: - images = self._get_images_page(marker) - if len(images) == 0: - break - marker = images[-1]['id'] - - for image in images: - yield image - - def get_all_locations(self): - """Returns a list of image id and location tuple from scrub queue. - - :returns: a list of image id, location id and uri tuple from - scrub queue - - """ - ret = [] - - for image in self._get_all_images(): - deleted_at = image.get('deleted_at') - if not deleted_at: - continue - - # NOTE: Strip off microseconds which may occur after the last '.,' - # Example: 2012-07-07T19:14:34.974216 - date_str = deleted_at.rsplit('.', 1)[0].rsplit(',', 1)[0] - delete_time = calendar.timegm(time.strptime(date_str, - "%Y-%m-%dT%H:%M:%S")) - - if delete_time + self.scrub_time > time.time(): - continue - - for loc in image['location_data']: - if loc['status'] != 'pending_delete': - continue - - if self.metadata_encryption_key: - uri = crypt.urlsafe_encrypt(self.metadata_encryption_key, - loc['url'], 64) - else: - uri = loc['url'] - - ret.append((image['id'], loc['id'], uri)) - return ret - - def has_image(self, image_id): - """Returns whether the queue contains an image or not. - - :param image_id: The opaque image identifier - - :returns: a boolean value to inform including or not - """ - try: - image = self.registry.get_image(image_id) - return image['status'] == 'pending_delete' - except exception.NotFound: - return False - - -_db_queue = None - - -def get_scrub_queue(): - global _db_queue - if not _db_queue: - _db_queue = ScrubDBQueue() - return _db_queue - - -class Daemon(object): - def __init__(self, wakeup_time=300, threads=100): - LOG.info(_LI("Starting Daemon: wakeup_time=%(wakeup_time)s " - "threads=%(threads)s"), - {'wakeup_time': wakeup_time, 'threads': threads}) - self.wakeup_time = wakeup_time - self.event = eventlet.event.Event() - # This pool is used for periodic instantiation of scrubber - self.daemon_pool = eventlet.greenpool.GreenPool(threads) - - def start(self, application): - self._run(application) - - def wait(self): - try: - self.event.wait() - except KeyboardInterrupt: - msg = _LI("Daemon Shutdown on KeyboardInterrupt") - LOG.info(msg) - - def _run(self, application): - LOG.debug("Running application") - self.daemon_pool.spawn_n(application.run, self.event) - eventlet.spawn_after(self.wakeup_time, self._run, application) - LOG.debug("Next run scheduled in %s seconds", self.wakeup_time) - - -class Scrubber(object): - def __init__(self, store_api): - LOG.info(_LI("Initializing scrubber with configuration: %s"), - six.text_type({'registry_host': CONF.registry_host, - 'registry_port': CONF.registry_port})) - - self.store_api = store_api - - registry.configure_registry_client() - registry.configure_registry_admin_creds() - - # Here we create a request context with credentials to support - # delayed delete when using multi-tenant backend storage - admin_user = CONF.admin_user - admin_tenant = CONF.admin_tenant_name - - if CONF.send_identity_headers: - # When registry is operating in trusted-auth mode - roles = [CONF.admin_role] - self.admin_context = context.RequestContext(user=admin_user, - tenant=admin_tenant, - auth_token=None, - roles=roles) - self.registry = registry.get_registry_client(self.admin_context) - else: - ctxt = context.RequestContext() - self.registry = registry.get_registry_client(ctxt) - auth_token = self.registry.auth_token - self.admin_context = context.RequestContext(user=admin_user, - tenant=admin_tenant, - auth_token=auth_token) - - self.db_queue = get_scrub_queue() - self.pool = eventlet.greenpool.GreenPool(CONF.scrub_pool_size) - - def _get_delete_jobs(self): - try: - records = self.db_queue.get_all_locations() - except Exception as err: - # Note(dharinic): spawn_n, in Daemon mode will log the - # exception raised. Otherwise, exit 1 will occur. - msg = (_LC("Can not get scrub jobs from queue: %s") % - encodeutils.exception_to_unicode(err)) - LOG.critical(msg) - raise exception.FailedToGetScrubberJobs() - - delete_jobs = {} - for image_id, loc_id, loc_uri in records: - if image_id not in delete_jobs: - delete_jobs[image_id] = [] - delete_jobs[image_id].append((image_id, loc_id, loc_uri)) - return delete_jobs - - def run(self, event=None): - delete_jobs = self._get_delete_jobs() - - if delete_jobs: - list(self.pool.starmap(self._scrub_image, delete_jobs.items())) - - def _scrub_image(self, image_id, delete_jobs): - if len(delete_jobs) == 0: - return - - LOG.info(_LI("Scrubbing image %(id)s from %(count)d locations."), - {'id': image_id, 'count': len(delete_jobs)}) - - success = True - for img_id, loc_id, uri in delete_jobs: - try: - self._delete_image_location_from_backend(img_id, loc_id, uri) - except Exception: - success = False - - if success: - image = self.registry.get_image(image_id) - if image['status'] == 'pending_delete': - self.registry.update_image(image_id, {'status': 'deleted'}) - LOG.info(_LI("Image %s has been scrubbed successfully"), image_id) - else: - LOG.warn(_LW("One or more image locations couldn't be scrubbed " - "from backend. Leaving image '%s' in 'pending_delete'" - " status") % image_id) - - def _delete_image_location_from_backend(self, image_id, loc_id, uri): - if CONF.metadata_encryption_key: - uri = crypt.urlsafe_decrypt(CONF.metadata_encryption_key, uri) - try: - LOG.debug("Scrubbing image %s from a location.", image_id) - try: - self.store_api.delete_from_backend(uri, self.admin_context) - except store_exceptions.NotFound: - LOG.info(_LI("Image location for image '%s' not found in " - "backend; Marking image location deleted in " - "db."), image_id) - - if loc_id != '-': - db_api.get_api().image_location_delete(self.admin_context, - image_id, - int(loc_id), - 'deleted') - LOG.info(_LI("Image %s is scrubbed from a location."), image_id) - except Exception as e: - LOG.error(_LE("Unable to scrub image %(id)s from a location. " - "Reason: %(exc)s ") % - {'id': image_id, - 'exc': encodeutils.exception_to_unicode(e)}) - raise diff --git a/glance/tests/__init__.py b/glance/tests/__init__.py deleted file mode 100644 index f38fdf8b..00000000 --- a/glance/tests/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# See http://code.google.com/p/python-nose/issues/detail?id=373 -# The code below enables tests to work with i18n _() blocks -import six.moves.builtins as __builtin__ -setattr(__builtin__, '_', lambda x: x) - -# Set up logging to output debugging -import logging -logger = logging.getLogger() -hdlr = logging.FileHandler('run_tests.log', 'w') -formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') -hdlr.setFormatter(formatter) -logger.addHandler(hdlr) -logger.setLevel(logging.DEBUG) - -import eventlet -eventlet.patcher.monkey_patch() diff --git a/glance/tests/etc/glance-swift.conf b/glance/tests/etc/glance-swift.conf deleted file mode 100644 index 956433b2..00000000 --- a/glance/tests/etc/glance-swift.conf +++ /dev/null @@ -1,34 +0,0 @@ -[ref1] -user = tenant:user1 -key = key1 -auth_address = example.com - -[ref2] -user = user2 -key = key2 -auth_address = http://example.com - -[store_2] -user = tenant:user1 -key = key1 -auth_address= https://localhost:8080 - -[store_3] -user= tenant:user2 -key= key2 -auth_address= https://localhost:8080 - -[store_4] -user = tenant:user1 -key = key1 -auth_address = http://localhost:80 - -[store_5] -user = tenant:user1 -key = key1 -auth_address = http://localhost - -[store_6] -user = tenant:user1 -key = key1 -auth_address = https://localhost/v1 diff --git a/glance/tests/etc/policy.json b/glance/tests/etc/policy.json deleted file mode 100644 index fb19f4fa..00000000 --- a/glance/tests/etc/policy.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "context_is_admin": "role:admin", - "default": "", - "glance_creator": "role:admin or role:spl_role", - - "add_image": "", - "delete_image": "", - "get_image": "", - "get_images": "", - "modify_image": "", - "publicize_image": "", - "communitize_image": "", - "copy_from": "", - - "download_image": "", - "upload_image": "", - - "delete_image_location": "", - "get_image_location": "", - "set_image_location": "", - - "add_member": "", - "delete_member": "", - "get_member": "", - "get_members": "", - "modify_member": "", - - "manage_image_cache": "", - - "get_task": "role:admin", - "get_tasks": "role:admin", - "add_task": "role:admin", - "modify_task": "role:admin", - - "get_metadef_namespace": "", - "get_metadef_namespaces":"", - "modify_metadef_namespace":"", - "add_metadef_namespace":"", - - "get_metadef_object":"", - "get_metadef_objects":"", - "modify_metadef_object":"", - "add_metadef_object":"", - - "list_metadef_resource_types":"", - "get_metadef_resource_type":"", - "add_metadef_resource_type_association":"", - - "get_metadef_property":"", - "get_metadef_properties":"", - "modify_metadef_property":"", - "add_metadef_property":"", - - "get_metadef_tag":"", - "get_metadef_tags":"", - "modify_metadef_tag":"", - "add_metadef_tag":"", - "add_metadef_tags":"", - - "deactivate": "", - "reactivate": "" -} diff --git a/glance/tests/etc/property-protections-policies.conf b/glance/tests/etc/property-protections-policies.conf deleted file mode 100644 index 4be3656d..00000000 --- a/glance/tests/etc/property-protections-policies.conf +++ /dev/null @@ -1,59 +0,0 @@ -[spl_creator_policy] -create = glance_creator -read = glance_creator -update = context_is_admin -delete = context_is_admin - -[spl_default_policy] -create = context_is_admin -read = default -update = context_is_admin -delete = context_is_admin - -[^x_all_permitted.*] -create = @ -read = @ -update = @ -delete = @ - -[^x_none_permitted.*] -create = ! -read = ! -update = ! -delete = ! - -[x_none_read] -create = context_is_admin -read = ! -update = ! -delete = ! - -[x_none_update] -create = context_is_admin -read = context_is_admin -update = ! -delete = context_is_admin - -[x_none_delete] -create = context_is_admin -read = context_is_admin -update = context_is_admin -delete = ! - -[x_foo_matcher] -create = context_is_admin -read = context_is_admin -update = context_is_admin -delete = context_is_admin - -[x_foo_*] -create = @ -read = @ -update = @ -delete = @ - -[.*] -create = context_is_admin -read = context_is_admin -update = context_is_admin -delete = context_is_admin diff --git a/glance/tests/etc/property-protections.conf b/glance/tests/etc/property-protections.conf deleted file mode 100644 index d5c254a6..00000000 --- a/glance/tests/etc/property-protections.conf +++ /dev/null @@ -1,101 +0,0 @@ -[^x_owner_.*] -create = admin,member -read = admin,member -update = admin,member -delete = admin,member - -[spl_create_prop] -create = admin,spl_role -read = admin,spl_role -update = admin -delete = admin - -[spl_read_prop] -create = admin,spl_role -read = admin,spl_role -update = admin -delete = admin - -[spl_read_only_prop] -create = admin -read = admin,spl_role -update = admin -delete = admin - -[spl_update_prop] -create = admin,spl_role -read = admin,spl_role -update = admin,spl_role -delete = admin - -[spl_update_only_prop] -create = admin -read = admin -update = admin,spl_role -delete = admin - -[spl_delete_prop] -create = admin,spl_role -read = admin,spl_role -update = admin -delete = admin,spl_role - -[spl_delete_empty_prop] -create = admin,spl_role -read = admin,spl_role -update = admin -delete = admin,spl_role - -[^x_all_permitted.*] -create = @ -read = @ -update = @ -delete = @ - -[^x_none_permitted.*] -create = ! -read = ! -update = ! -delete = ! - -[x_none_read] -create = admin,member -read = ! -update = ! -delete = ! - -[x_none_update] -create = admin,member -read = admin,member -update = ! -delete = admin,member - -[x_none_delete] -create = admin,member -read = admin,member -update = admin,member -delete = ! - -[x_case_insensitive] -create = admin,Member -read = admin,Member -update = admin,Member -delete = admin,Member - -[x_foo_matcher] -create = admin -read = admin -update = admin -delete = admin - -[x_foo_*] -create = @ -read = @ -update = @ -delete = @ - -[.*] -create = admin -read = admin -update = admin -delete = admin diff --git a/glance/tests/etc/schema-image.json b/glance/tests/etc/schema-image.json deleted file mode 100644 index 0967ef42..00000000 --- a/glance/tests/etc/schema-image.json +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/glance/tests/functional/__init__.py b/glance/tests/functional/__init__.py deleted file mode 100644 index 968d043f..00000000 --- a/glance/tests/functional/__init__.py +++ /dev/null @@ -1,950 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Base test class for running non-stubbed tests (functional tests) - -The FunctionalTest class contains helper methods for starting the API -and Registry server, grabbing the logs of each, cleaning up pidfiles, -and spinning down the servers. -""" - -import atexit -import datetime -import errno -import logging -import os -import platform -import shutil -import signal -import socket -import sys -import tempfile -import time - -import fixtures -from oslo_serialization import jsonutils -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range -import six.moves.urllib.parse as urlparse -import testtools - -from glance.common import utils -from glance.db.sqlalchemy import api as db_api -from glance import tests as glance_tests -from glance.tests import utils as test_utils - -execute, get_unused_port = test_utils.execute, test_utils.get_unused_port -tracecmd_osmap = {'Linux': 'strace', 'FreeBSD': 'truss'} - - -class Server(object): - """ - Class used to easily manage starting and stopping - a server during functional test runs. - """ - def __init__(self, test_dir, port, sock=None): - """ - Creates a new Server object. - - :param test_dir: The directory where all test stuff is kept. This is - passed from the FunctionalTestCase. - :param port: The port to start a server up on. - """ - self.debug = True - self.no_venv = False - self.test_dir = test_dir - self.bind_port = port - self.conf_file_name = None - self.conf_base = None - self.paste_conf_base = None - self.exec_env = None - self.deployment_flavor = '' - self.show_image_direct_url = False - self.show_multiple_locations = False - self.property_protection_file = '' - self.enable_v1_api = True - self.enable_v2_api = True - self.enable_v1_registry = True - self.enable_v2_registry = True - self.needs_database = False - self.log_file = None - self.sock = sock - self.fork_socket = True - self.process_pid = None - self.server_module = None - self.stop_kill = False - self.use_user_token = True - self.send_identity_credentials = False - - def write_conf(self, **kwargs): - """ - Writes the configuration file for the server to its intended - destination. Returns the name of the configuration file and - the over-ridden config content (may be useful for populating - error messages). - """ - if not self.conf_base: - raise RuntimeError("Subclass did not populate config_base!") - - conf_override = self.__dict__.copy() - if kwargs: - conf_override.update(**kwargs) - - # A config file and paste.ini to use just for this test...we don't want - # to trample on currently-running Glance servers, now do we? - - conf_dir = os.path.join(self.test_dir, 'etc') - conf_filepath = os.path.join(conf_dir, "%s.conf" % self.server_name) - if os.path.exists(conf_filepath): - os.unlink(conf_filepath) - paste_conf_filepath = conf_filepath.replace(".conf", "-paste.ini") - if os.path.exists(paste_conf_filepath): - os.unlink(paste_conf_filepath) - utils.safe_mkdirs(conf_dir) - - def override_conf(filepath, overridden): - with open(filepath, 'w') as conf_file: - conf_file.write(overridden) - conf_file.flush() - return conf_file.name - - overridden_core = self.conf_base % conf_override - self.conf_file_name = override_conf(conf_filepath, overridden_core) - - overridden_paste = '' - if self.paste_conf_base: - overridden_paste = self.paste_conf_base % conf_override - override_conf(paste_conf_filepath, overridden_paste) - - overridden = ('==Core config==\n%s\n==Paste config==\n%s' % - (overridden_core, overridden_paste)) - - return self.conf_file_name, overridden - - def start(self, expect_exit=True, expected_exitcode=0, **kwargs): - """ - Starts the server. - - Any kwargs passed to this method will override the configuration - value in the conf file used in starting the servers. - """ - - # Ensure the configuration file is written - self.write_conf(**kwargs) - - self.create_database() - - cmd = ("%(server_module)s --config-file %(conf_file_name)s" - % {"server_module": self.server_module, - "conf_file_name": self.conf_file_name}) - cmd = "%s -m %s" % (sys.executable, cmd) - # close the sock and release the unused port closer to start time - if self.exec_env: - exec_env = self.exec_env.copy() - else: - exec_env = {} - pass_fds = set() - if self.sock: - if not self.fork_socket: - self.sock.close() - self.sock = None - else: - fd = os.dup(self.sock.fileno()) - exec_env[utils.GLANCE_TEST_SOCKET_FD_STR] = str(fd) - pass_fds.add(fd) - self.sock.close() - - self.process_pid = test_utils.fork_exec(cmd, - logfile=os.devnull, - exec_env=exec_env, - pass_fds=pass_fds) - - self.stop_kill = not expect_exit - if self.pid_file: - pf = open(self.pid_file, 'w') - pf.write('%d\n' % self.process_pid) - pf.close() - if not expect_exit: - rc = 0 - try: - os.kill(self.process_pid, 0) - except OSError: - raise RuntimeError("The process did not start") - else: - rc = test_utils.wait_for_fork( - self.process_pid, - expected_exitcode=expected_exitcode) - # avoid an FD leak - if self.sock: - os.close(fd) - self.sock = None - return (rc, '', '') - - def reload(self, expect_exit=True, expected_exitcode=0, **kwargs): - """ - Start and stop the service to reload - - Any kwargs passed to this method will override the configuration - value in the conf file used in starting the servers. - """ - self.stop() - return self.start(expect_exit=expect_exit, - expected_exitcode=expected_exitcode, **kwargs) - - def create_database(self): - """Create database if required for this server""" - if self.needs_database: - conf_dir = os.path.join(self.test_dir, 'etc') - utils.safe_mkdirs(conf_dir) - conf_filepath = os.path.join(conf_dir, 'glance-manage.conf') - - with open(conf_filepath, 'w') as conf_file: - conf_file.write('[DEFAULT]\n') - conf_file.write('sql_connection = %s' % self.sql_connection) - conf_file.flush() - - glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE' - if glance_db_env in os.environ: - # use the empty db created and cached as a tempfile - # instead of spending the time creating a new one - db_location = os.environ[glance_db_env] - os.system('cp %s %s/tests.sqlite' - % (db_location, self.test_dir)) - else: - cmd = ('%s -m glance.cmd.manage --config-file %s db sync' % - (sys.executable, conf_filepath)) - execute(cmd, no_venv=self.no_venv, exec_env=self.exec_env, - expect_exit=True) - - # copy the clean db to a temp location so that it - # can be reused for future tests - (osf, db_location) = tempfile.mkstemp() - os.close(osf) - os.system('cp %s/tests.sqlite %s' - % (self.test_dir, db_location)) - os.environ[glance_db_env] = db_location - - # cleanup the temp file when the test suite is - # complete - def _delete_cached_db(): - try: - os.remove(os.environ[glance_db_env]) - except Exception: - glance_tests.logger.exception( - "Error cleaning up the file %s" % - os.environ[glance_db_env]) - atexit.register(_delete_cached_db) - - def stop(self): - """ - Spin down the server. - """ - if not self.process_pid: - raise Exception('why is this being called? %s' % self.server_name) - - if self.stop_kill: - os.kill(self.process_pid, signal.SIGTERM) - rc = test_utils.wait_for_fork(self.process_pid, raise_error=False) - return (rc, '', '') - - def dump_log(self, name): - log = logging.getLogger(name) - if not self.log_file or not os.path.exists(self.log_file): - return - with open(self.log_file, 'r') as fptr: - for line in fptr: - log.info(line.strip()) - - -class ApiServer(Server): - - """ - Server object that starts/stops/manages the API server - """ - - def __init__(self, test_dir, port, policy_file, delayed_delete=False, - pid_file=None, sock=None, **kwargs): - super(ApiServer, self).__init__(test_dir, port, sock=sock) - self.server_name = 'api' - self.server_module = 'glance.cmd.%s' % self.server_name - self.default_store = kwargs.get("default_store", "file") - self.bind_host = "127.0.0.1" - self.registry_host = "127.0.0.1" - self.key_file = "" - self.cert_file = "" - self.metadata_encryption_key = "012345678901234567890123456789ab" - self.image_dir = os.path.join(self.test_dir, "images") - self.pid_file = pid_file or os.path.join(self.test_dir, "api.pid") - self.log_file = os.path.join(self.test_dir, "api.log") - self.image_size_cap = 1099511627776 - self.delayed_delete = delayed_delete - self.owner_is_tenant = True - self.workers = 0 - self.scrub_time = 5 - self.image_cache_dir = os.path.join(self.test_dir, - 'cache') - self.image_cache_driver = 'sqlite' - self.policy_file = policy_file - self.policy_default_rule = 'default' - self.property_protection_rule_format = 'roles' - self.image_member_quota = 10 - self.image_property_quota = 10 - self.image_tag_quota = 10 - self.image_location_quota = 2 - self.disable_path = None - - self.needs_database = True - default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir - self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION', - default_sql_connection) - self.data_api = kwargs.get("data_api", - "glance.db.sqlalchemy.api") - self.user_storage_quota = '0' - self.lock_path = self.test_dir - - self.location_strategy = 'location_order' - self.store_type_location_strategy_preference = "" - - self.send_identity_headers = False - - self.conf_base = """[DEFAULT] -debug = %(debug)s -default_log_levels = eventlet.wsgi.server=DEBUG -bind_host = %(bind_host)s -bind_port = %(bind_port)s -key_file = %(key_file)s -cert_file = %(cert_file)s -metadata_encryption_key = %(metadata_encryption_key)s -registry_host = %(registry_host)s -registry_port = %(registry_port)s -use_user_token = %(use_user_token)s -send_identity_credentials = %(send_identity_credentials)s -log_file = %(log_file)s -image_size_cap = %(image_size_cap)d -delayed_delete = %(delayed_delete)s -owner_is_tenant = %(owner_is_tenant)s -workers = %(workers)s -scrub_time = %(scrub_time)s -send_identity_headers = %(send_identity_headers)s -image_cache_dir = %(image_cache_dir)s -image_cache_driver = %(image_cache_driver)s -data_api = %(data_api)s -sql_connection = %(sql_connection)s -show_image_direct_url = %(show_image_direct_url)s -show_multiple_locations = %(show_multiple_locations)s -user_storage_quota = %(user_storage_quota)s -enable_v1_api = %(enable_v1_api)s -enable_v2_api = %(enable_v2_api)s -lock_path = %(lock_path)s -property_protection_file = %(property_protection_file)s -property_protection_rule_format = %(property_protection_rule_format)s -image_member_quota=%(image_member_quota)s -image_property_quota=%(image_property_quota)s -image_tag_quota=%(image_tag_quota)s -image_location_quota=%(image_location_quota)s -location_strategy=%(location_strategy)s -allow_additional_image_properties = True -[oslo_policy] -policy_file = %(policy_file)s -policy_default_rule = %(policy_default_rule)s -[paste_deploy] -flavor = %(deployment_flavor)s -[store_type_location_strategy] -store_type_preference = %(store_type_location_strategy_preference)s -[glance_store] -filesystem_store_datadir=%(image_dir)s -default_store = %(default_store)s -""" - self.paste_conf_base = """[pipeline:glance-api] -pipeline = - cors - healthcheck - versionnegotiation - gzip - unauthenticated-context - rootapp - -[pipeline:glance-api-caching] -pipeline = cors healthcheck versionnegotiation gzip unauthenticated-context - cache rootapp - -[pipeline:glance-api-cachemanagement] -pipeline = - cors - healthcheck - versionnegotiation - gzip - unauthenticated-context - cache - cache_manage - rootapp - -[pipeline:glance-api-fakeauth] -pipeline = cors healthcheck versionnegotiation gzip fakeauth context rootapp - -[pipeline:glance-api-noauth] -pipeline = cors healthcheck versionnegotiation gzip context rootapp - -[composite:rootapp] -paste.composite_factory = glance.api:root_app_factory -/: apiversions -/v1: apiv1app -/v2: apiv2app - -[app:apiversions] -paste.app_factory = glance.api.versions:create_resource - -[app:apiv1app] -paste.app_factory = glance.api.v1.router:API.factory - -[app:apiv2app] -paste.app_factory = glance.api.v2.router:API.factory - -[filter:healthcheck] -paste.filter_factory = oslo_middleware:Healthcheck.factory -backends = disable_by_file -disable_by_file_path = %(disable_path)s - -[filter:versionnegotiation] -paste.filter_factory = - glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory - -[filter:gzip] -paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory - -[filter:cache] -paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory - -[filter:cache_manage] -paste.filter_factory = - glance.api.middleware.cache_manage:CacheManageFilter.factory - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = - glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:fakeauth] -paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -allowed_origin=http://valid.example.com -""" - - -class RegistryServer(Server): - - """ - Server object that starts/stops/manages the Registry server - """ - - def __init__(self, test_dir, port, policy_file, sock=None): - super(RegistryServer, self).__init__(test_dir, port, sock=sock) - self.server_name = 'registry' - self.server_module = 'glance.cmd.%s' % self.server_name - - self.needs_database = True - default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir - self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION', - default_sql_connection) - - self.bind_host = "127.0.0.1" - self.pid_file = os.path.join(self.test_dir, "registry.pid") - self.log_file = os.path.join(self.test_dir, "registry.log") - self.owner_is_tenant = True - self.workers = 0 - self.api_version = 1 - self.user_storage_quota = '0' - self.metadata_encryption_key = "012345678901234567890123456789ab" - self.policy_file = policy_file - self.policy_default_rule = 'default' - self.disable_path = None - - self.conf_base = """[DEFAULT] -debug = %(debug)s -bind_host = %(bind_host)s -bind_port = %(bind_port)s -log_file = %(log_file)s -sql_connection = %(sql_connection)s -sql_idle_timeout = 3600 -api_limit_max = 1000 -limit_param_default = 25 -owner_is_tenant = %(owner_is_tenant)s -enable_v2_registry = %(enable_v2_registry)s -workers = %(workers)s -user_storage_quota = %(user_storage_quota)s -metadata_encryption_key = %(metadata_encryption_key)s -[oslo_policy] -policy_file = %(policy_file)s -policy_default_rule = %(policy_default_rule)s -[paste_deploy] -flavor = %(deployment_flavor)s -""" - self.paste_conf_base = """[pipeline:glance-registry] -pipeline = healthcheck unauthenticated-context registryapp - -[pipeline:glance-registry-fakeauth] -pipeline = healthcheck fakeauth context registryapp - -[pipeline:glance-registry-trusted-auth] -pipeline = healthcheck context registryapp - -[app:registryapp] -paste.app_factory = glance.registry.api:API.factory - -[filter:healthcheck] -paste.filter_factory = oslo_middleware:Healthcheck.factory -backends = disable_by_file -disable_by_file_path = %(disable_path)s - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = - glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:fakeauth] -paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory -""" - - -class ScrubberDaemon(Server): - """ - Server object that starts/stops/manages the Scrubber server - """ - - def __init__(self, test_dir, policy_file, daemon=False, **kwargs): - # NOTE(jkoelker): Set the port to 0 since we actually don't listen - super(ScrubberDaemon, self).__init__(test_dir, 0) - self.server_name = 'scrubber' - self.server_module = 'glance.cmd.%s' % self.server_name - self.daemon = daemon - - self.registry_host = "127.0.0.1" - - self.image_dir = os.path.join(self.test_dir, "images") - self.scrub_time = 5 - self.pid_file = os.path.join(self.test_dir, "scrubber.pid") - self.log_file = os.path.join(self.test_dir, "scrubber.log") - self.metadata_encryption_key = "012345678901234567890123456789ab" - self.lock_path = self.test_dir - - default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir - self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION', - default_sql_connection) - self.policy_file = policy_file - self.policy_default_rule = 'default' - - self.send_identity_headers = False - self.admin_role = 'admin' - - self.conf_base = """[DEFAULT] -debug = %(debug)s -log_file = %(log_file)s -daemon = %(daemon)s -wakeup_time = 2 -scrub_time = %(scrub_time)s -registry_host = %(registry_host)s -registry_port = %(registry_port)s -metadata_encryption_key = %(metadata_encryption_key)s -lock_path = %(lock_path)s -sql_connection = %(sql_connection)s -sql_idle_timeout = 3600 -send_identity_headers = %(send_identity_headers)s -admin_role = %(admin_role)s -[glance_store] -filesystem_store_datadir=%(image_dir)s -[oslo_policy] -policy_file = %(policy_file)s -policy_default_rule = %(policy_default_rule)s -""" - - def start(self, expect_exit=True, expected_exitcode=0, **kwargs): - if 'daemon' in kwargs: - expect_exit = False - return super(ScrubberDaemon, self).start( - expect_exit=expect_exit, - expected_exitcode=expected_exitcode, - **kwargs) - - -class FunctionalTest(test_utils.BaseTestCase): - - """ - Base test class for any test that wants to test the actual - servers and clients and not just the stubbed out interfaces - """ - - inited = False - disabled = False - launched_servers = [] - - def setUp(self): - super(FunctionalTest, self).setUp() - self.test_dir = self.useFixture(fixtures.TempDir()).path - - self.api_protocol = 'http' - self.api_port, api_sock = test_utils.get_unused_port_and_socket() - self.registry_port, reg_sock = test_utils.get_unused_port_and_socket() - # NOTE: Scrubber is enabled by default for the functional tests. - # Please disbale it by explicitly setting 'self.include_scrubber' to - # False in the test SetUps that do not require Scrubber to run. - self.include_scrubber = True - - self.tracecmd = tracecmd_osmap.get(platform.system()) - - conf_dir = os.path.join(self.test_dir, 'etc') - utils.safe_mkdirs(conf_dir) - self.copy_data_file('schema-image.json', conf_dir) - self.copy_data_file('policy.json', conf_dir) - self.copy_data_file('property-protections.conf', conf_dir) - self.copy_data_file('property-protections-policies.conf', conf_dir) - self.property_file_roles = os.path.join(conf_dir, - 'property-protections.conf') - property_policies = 'property-protections-policies.conf' - self.property_file_policies = os.path.join(conf_dir, - property_policies) - self.policy_file = os.path.join(conf_dir, 'policy.json') - - self.api_server = ApiServer(self.test_dir, - self.api_port, - self.policy_file, - sock=api_sock) - - self.registry_server = RegistryServer(self.test_dir, - self.registry_port, - self.policy_file, - sock=reg_sock) - - self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.policy_file) - - self.pid_files = [self.api_server.pid_file, - self.registry_server.pid_file, - self.scrubber_daemon.pid_file] - self.files_to_destroy = [] - self.launched_servers = [] - - def tearDown(self): - if not self.disabled: - self.cleanup() - # We destroy the test data store between each test case, - # and recreate it, which ensures that we have no side-effects - # from the tests - self._reset_database(self.registry_server.sql_connection) - self._reset_database(self.api_server.sql_connection) - super(FunctionalTest, self).tearDown() - - self.api_server.dump_log('api_server') - self.registry_server.dump_log('registry_server') - self.scrubber_daemon.dump_log('scrubber_daemon') - - def set_policy_rules(self, rules): - fap = open(self.policy_file, 'w') - fap.write(jsonutils.dumps(rules)) - fap.close() - - def _reset_database(self, conn_string): - conn_pieces = urlparse.urlparse(conn_string) - if conn_string.startswith('sqlite'): - # We leave behind the sqlite DB for failing tests to aid - # in diagnosis, as the file size is relatively small and - # won't interfere with subsequent tests as it's in a per- - # test directory (which is blown-away if the test is green) - pass - elif conn_string.startswith('mysql'): - # We can execute the MySQL client to destroy and re-create - # the MYSQL database, which is easier and less error-prone - # than using SQLAlchemy to do this via MetaData...trust me. - database = conn_pieces.path.strip('/') - loc_pieces = conn_pieces.netloc.split('@') - host = loc_pieces[1] - auth_pieces = loc_pieces[0].split(':') - user = auth_pieces[0] - password = "" - if len(auth_pieces) > 1: - if auth_pieces[1].strip(): - password = "-p%s" % auth_pieces[1] - sql = ("drop database if exists %(database)s; " - "create database %(database)s;") % {'database': database} - cmd = ("mysql -u%(user)s %(password)s -h%(host)s " - "-e\"%(sql)s\"") % {'user': user, 'password': password, - 'host': host, 'sql': sql} - exitcode, out, err = execute(cmd) - self.assertEqual(0, exitcode) - - def cleanup(self): - """ - Makes sure anything we created or started up in the - tests are destroyed or spun down - """ - - # NOTE(jbresnah) call stop on each of the servers instead of - # checking the pid file. stop() will wait until the child - # server is dead. This eliminates the possibility of a race - # between a child process listening on a port actually dying - # and a new process being started - servers = [self.api_server, - self.registry_server, - self.scrubber_daemon] - for s in servers: - try: - s.stop() - except Exception: - pass - - for f in self.files_to_destroy: - if os.path.exists(f): - os.unlink(f) - - def start_server(self, - server, - expect_launch, - expect_exit=True, - expected_exitcode=0, - **kwargs): - """ - Starts a server on an unused port. - - Any kwargs passed to this method will override the configuration - value in the conf file used in starting the server. - - :param server: the server to launch - :param expect_launch: true iff the server is expected to - successfully start - :param expect_exit: true iff the launched process is expected - to exit in a timely fashion - :param expected_exitcode: expected exitcode from the launcher - """ - self.cleanup() - - # Start up the requested server - exitcode, out, err = server.start(expect_exit=expect_exit, - expected_exitcode=expected_exitcode, - **kwargs) - if expect_exit: - self.assertEqual(expected_exitcode, exitcode, - "Failed to spin up the requested server. " - "Got: %s" % err) - - self.launched_servers.append(server) - - launch_msg = self.wait_for_servers([server], expect_launch) - self.assertTrue(launch_msg is None, launch_msg) - - def start_with_retry(self, server, port_name, max_retries, - expect_launch=True, - **kwargs): - """ - Starts a server, with retries if the server launches but - fails to start listening on the expected port. - - :param server: the server to launch - :param port_name: the name of the port attribute - :param max_retries: the maximum number of attempts - :param expect_launch: true iff the server is expected to - successfully start - :param expect_exit: true iff the launched process is expected - to exit in a timely fashion - """ - launch_msg = None - for i in range(max_retries): - exitcode, out, err = server.start(expect_exit=not expect_launch, - **kwargs) - name = server.server_name - self.assertEqual(0, exitcode, - "Failed to spin up the %s server. " - "Got: %s" % (name, err)) - launch_msg = self.wait_for_servers([server], expect_launch) - if launch_msg: - server.stop() - server.bind_port = get_unused_port() - setattr(self, port_name, server.bind_port) - else: - self.launched_servers.append(server) - break - self.assertTrue(launch_msg is None, launch_msg) - - def start_servers(self, **kwargs): - """ - Starts the API and Registry servers (glance-control api start - & glance-control registry start) on unused ports. glance-control - should be installed into the python path - - Any kwargs passed to this method will override the configuration - value in the conf file used in starting the servers. - """ - self.cleanup() - - # Start up the API and default registry server - - # We start the registry server first, as the API server config - # depends on the registry port - this ordering allows for - # retrying the launch on a port clash - self.start_with_retry(self.registry_server, 'registry_port', 3, - **kwargs) - kwargs['registry_port'] = self.registry_server.bind_port - - self.start_with_retry(self.api_server, 'api_port', 3, **kwargs) - - if self.include_scrubber: - exitcode, out, err = self.scrubber_daemon.start(**kwargs) - self.assertEqual(0, exitcode, - "Failed to spin up the Scrubber daemon. " - "Got: %s" % err) - - def ping_server(self, port): - """ - Simple ping on the port. If responsive, return True, else - return False. - - :note We use raw sockets, not ping here, since ping uses ICMP and - has no concept of ports... - """ - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - s.connect(("127.0.0.1", port)) - return True - except socket.error: - return False - finally: - s.close() - - def ping_server_ipv6(self, port): - """ - Simple ping on the port. If responsive, return True, else - return False. - - :note We use raw sockets, not ping here, since ping uses ICMP and - has no concept of ports... - - The function uses IPv6 (therefore AF_INET6 and ::1). - """ - s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) - try: - s.connect(("::1", port)) - return True - except socket.error: - return False - finally: - s.close() - - def wait_for_servers(self, servers, expect_launch=True, timeout=30): - """ - Tight loop, waiting for the given server port(s) to be available. - Returns when all are pingable. There is a timeout on waiting - for the servers to come up. - - :param servers: Glance server ports to ping - :param expect_launch: Optional, true iff the server(s) are - expected to successfully start - :param timeout: Optional, defaults to 30 seconds - :returns: None if launch expectation is met, otherwise an - assertion message - """ - now = datetime.datetime.now() - timeout_time = now + datetime.timedelta(seconds=timeout) - replied = [] - while (timeout_time > now): - pinged = 0 - for server in servers: - if self.ping_server(server.bind_port): - pinged += 1 - if server not in replied: - replied.append(server) - if pinged == len(servers): - msg = 'Unexpected server launch status' - return None if expect_launch else msg - now = datetime.datetime.now() - time.sleep(0.05) - - failed = list(set(servers) - set(replied)) - msg = 'Unexpected server launch status for: ' - for f in failed: - msg += ('%s, ' % f.server_name) - if os.path.exists(f.pid_file): - pid = f.process_pid - trace = f.pid_file.replace('.pid', '.trace') - if self.tracecmd: - cmd = '%s -p %d -o %s' % (self.tracecmd, pid, trace) - try: - execute(cmd, raise_error=False, expect_exit=False) - except OSError as e: - if e.errno == errno.ENOENT: - raise RuntimeError('No executable found for "%s" ' - 'command.' % self.tracecmd) - else: - raise - time.sleep(0.5) - if os.path.exists(trace): - msg += ('\n%s:\n%s\n' % (self.tracecmd, - open(trace).read())) - - self.add_log_details(failed) - - return msg if expect_launch else None - - def stop_server(self, server): - """ - Called to stop a single server in a normal fashion using the - glance-control stop method to gracefully shut the server down. - - :param server: the server to stop - """ - # Spin down the requested server - server.stop() - - def stop_servers(self): - """ - Called to stop the started servers in a normal fashion. Note - that cleanup() will stop the servers using a fairly draconian - method of sending a SIGTERM signal to the servers. Here, we use - the glance-control stop method to gracefully shut the server down. - This method also asserts that the shutdown was clean, and so it - is meant to be called during a normal test case sequence. - """ - - # Spin down the API and default registry server - self.stop_server(self.api_server) - self.stop_server(self.registry_server) - if self.include_scrubber: - self.stop_server(self.scrubber_daemon) - - self._reset_database(self.registry_server.sql_connection) - - def run_sql_cmd(self, sql): - """ - Provides a crude mechanism to run manual SQL commands for backend - DB verification within the functional tests. - The raw result set is returned. - """ - engine = db_api.get_engine() - return engine.execute(sql) - - def copy_data_file(self, file_name, dst_dir): - src_file_name = os.path.join('glance/tests/etc', file_name) - shutil.copy(src_file_name, dst_dir) - dst_file_name = os.path.join(dst_dir, file_name) - return dst_file_name - - def add_log_details(self, servers=None): - logs = [s.log_file for s in (servers or self.launched_servers)] - for log in logs: - if os.path.exists(log): - testtools.content.attach_file(self, log) diff --git a/glance/tests/functional/db/__init__.py b/glance/tests/functional/db/__init__.py deleted file mode 100644 index b4a1f155..00000000 --- a/glance/tests/functional/db/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(markwash): These functions are used in the base tests cases to -# set up the db api implementation under test. Rather than accessing them -# directly, test modules should use the load and reset functions below. -get_db = None -reset_db = None - - -def load(get_db_fn, reset_db_fn): - global get_db, reset_db - get_db = get_db_fn - reset_db = reset_db_fn - - -def reset(): - global get_db, reset_db - get_db = None - reset_db = None diff --git a/glance/tests/functional/db/base.py b/glance/tests/functional/db/base.py deleted file mode 100644 index 80302527..00000000 --- a/glance/tests/functional/db/base.py +++ /dev/null @@ -1,2561 +0,0 @@ -# Copyright 2010-2012 OpenStack Foundation -# Copyright 2012 Justin Santa Barbara -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import datetime -import uuid - -import mock -from oslo_db import exception as db_exception -from oslo_db.sqlalchemy import utils as sqlalchemyutils -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range -from six.moves import reduce -from sqlalchemy.dialects import sqlite - -from glance.common import exception -from glance.common import timeutils -from glance import context -from glance.db.sqlalchemy import api as db_api -from glance.tests import functional -import glance.tests.functional.db as db_tests -from glance.tests import utils as test_utils - - -# The default sort order of results is whatever sort key is specified, -# plus created_at and id for ties. When we're not specifying a sort_key, -# we get the default (created_at). Some tests below expect the fixtures to be -# returned in array-order, so if the created_at timestamps are the same, -# these tests rely on the UUID* values being in order -UUID1, UUID2, UUID3 = sorted([str(uuid.uuid4()) for x in range(3)]) - - -def build_image_fixture(**kwargs): - default_datetime = timeutils.utcnow() - image = { - 'id': str(uuid.uuid4()), - 'name': 'fake image #2', - 'status': 'active', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'is_public': True, - 'created_at': default_datetime, - 'updated_at': default_datetime, - 'deleted_at': None, - 'deleted': False, - 'checksum': None, - 'min_disk': 5, - 'min_ram': 256, - 'size': 19, - 'locations': [{'url': "file:///tmp/glance-tests/2", - 'metadata': {}, 'status': 'active'}], - 'properties': {}, - } - if 'visibility' in kwargs: - image.pop('is_public') - image.update(kwargs) - return image - - -def build_task_fixture(**kwargs): - default_datetime = timeutils.utcnow() - task = { - 'id': str(uuid.uuid4()), - 'type': 'import', - 'status': 'pending', - 'input': {'ping': 'pong'}, - 'owner': str(uuid.uuid4()), - 'message': None, - 'expires_at': None, - 'created_at': default_datetime, - 'updated_at': default_datetime, - } - task.update(kwargs) - return task - - -class FunctionalInitWrapper(functional.FunctionalTest): - - def setUp(self): - super(FunctionalInitWrapper, self).setUp() - self.config(policy_file=self.policy_file, group='oslo_policy') - - -class TestDriver(test_utils.BaseTestCase): - - def setUp(self): - super(TestDriver, self).setUp() - context_cls = context.RequestContext - self.adm_context = context_cls(is_admin=True, - auth_token='user:user:admin') - self.context = context_cls(is_admin=False, - auth_token='user:user:user') - self.db_api = db_tests.get_db(self.config) - db_tests.reset_db(self.db_api) - self.fixtures = self.build_image_fixtures() - self.create_images(self.fixtures) - - def build_image_fixtures(self): - dt1 = timeutils.utcnow() - dt2 = dt1 + datetime.timedelta(microseconds=5) - fixtures = [ - { - 'id': UUID1, - 'created_at': dt1, - 'updated_at': dt1, - 'properties': {'foo': 'bar', 'far': 'boo'}, - 'protected': True, - 'size': 13, - }, - { - 'id': UUID2, - 'created_at': dt1, - 'updated_at': dt2, - 'size': 17, - }, - { - 'id': UUID3, - 'created_at': dt2, - 'updated_at': dt2, - }, - ] - return [build_image_fixture(**fixture) for fixture in fixtures] - - def create_images(self, images): - for fixture in images: - self.db_api.image_create(self.adm_context, fixture) - - -class DriverTests(object): - - def test_image_create_requires_status(self): - fixture = {'name': 'mark', 'size': 12} - self.assertRaises(exception.Invalid, - self.db_api.image_create, self.context, fixture) - fixture = {'name': 'mark', 'size': 12, 'status': 'queued'} - self.db_api.image_create(self.context, fixture) - - @mock.patch.object(timeutils, 'utcnow') - def test_image_create_defaults(self, mock_utcnow): - mock_utcnow.return_value = datetime.datetime.utcnow() - create_time = timeutils.utcnow() - values = {'status': 'queued', - 'created_at': create_time, - 'updated_at': create_time} - image = self.db_api.image_create(self.context, values) - - self.assertIsNone(image['name']) - self.assertIsNone(image['container_format']) - self.assertEqual(0, image['min_ram']) - self.assertEqual(0, image['min_disk']) - self.assertIsNone(image['owner']) - self.assertEqual('shared', image['visibility']) - self.assertIsNone(image['size']) - self.assertIsNone(image['checksum']) - self.assertIsNone(image['disk_format']) - self.assertEqual([], image['locations']) - self.assertFalse(image['protected']) - self.assertFalse(image['deleted']) - self.assertIsNone(image['deleted_at']) - self.assertEqual([], image['properties']) - self.assertEqual(create_time, image['created_at']) - self.assertEqual(create_time, image['updated_at']) - - # Image IDs aren't predictable, but they should be populated - self.assertTrue(uuid.UUID(image['id'])) - - # NOTE(bcwaldon): the tags attribute should not be returned as a part - # of a core image entity - self.assertNotIn('tags', image) - - def test_image_create_duplicate_id(self): - self.assertRaises(exception.Duplicate, - self.db_api.image_create, - self.context, {'id': UUID1, 'status': 'queued'}) - - def test_image_create_with_locations(self): - locations = [{'url': 'a', 'metadata': {}, 'status': 'active'}, - {'url': 'b', 'metadata': {}, 'status': 'active'}] - - fixture = {'status': 'queued', - 'locations': locations} - image = self.db_api.image_create(self.context, fixture) - actual = [{'url': l['url'], 'metadata': l['metadata'], - 'status': l['status']} - for l in image['locations']] - self.assertEqual(locations, actual) - - def test_image_create_without_locations(self): - locations = [] - fixture = {'status': 'queued', - 'locations': locations} - self.db_api.image_create(self.context, fixture) - - def test_image_create_with_location_data(self): - location_data = [{'url': 'a', 'metadata': {'key': 'value'}, - 'status': 'active'}, - {'url': 'b', 'metadata': {}, - 'status': 'active'}] - fixture = {'status': 'queued', 'locations': location_data} - image = self.db_api.image_create(self.context, fixture) - actual = [{'url': l['url'], 'metadata': l['metadata'], - 'status': l['status']} - for l in image['locations']] - self.assertEqual(location_data, actual) - - def test_image_create_properties(self): - fixture = {'status': 'queued', 'properties': {'ping': 'pong'}} - image = self.db_api.image_create(self.context, fixture) - expected = [{'name': 'ping', 'value': 'pong'}] - actual = [{'name': p['name'], 'value': p['value']} - for p in image['properties']] - self.assertEqual(expected, actual) - - def test_image_create_unknown_attributes(self): - fixture = {'ping': 'pong'} - self.assertRaises(exception.Invalid, - self.db_api.image_create, self.context, fixture) - - def test_image_create_bad_name(self): - bad_name = u'A name with forbidden symbol \U0001f62a' - fixture = {'name': bad_name, 'size': 12, 'status': 'queued'} - self.assertRaises(exception.Invalid, self.db_api.image_create, - self.context, fixture) - - def test_image_create_bad_checksum(self): - # checksum should be no longer than 32 characters - bad_checksum = "42" * 42 - fixture = {'checksum': bad_checksum} - self.assertRaises(exception.Invalid, self.db_api.image_create, - self.context, fixture) - # if checksum is not longer than 32 characters but non-ascii -> - # still raise 400 - fixture = {'checksum': u'\u042f' * 32} - self.assertRaises(exception.Invalid, self.db_api.image_create, - self.context, fixture) - - def test_image_create_bad_int_params(self): - int_too_long = 2 ** 31 + 42 - for param in ['min_disk', 'min_ram']: - fixture = {param: int_too_long} - self.assertRaises(exception.Invalid, self.db_api.image_create, - self.context, fixture) - - def test_image_create_bad_property(self): - # bad value - fixture = {'status': 'queued', - 'properties': {'bad': u'Bad \U0001f62a'}} - self.assertRaises(exception.Invalid, self.db_api.image_create, - self.context, fixture) - # bad property names are also not allowed - fixture = {'status': 'queued', 'properties': {u'Bad \U0001f62a': 'ok'}} - self.assertRaises(exception.Invalid, self.db_api.image_create, - self.context, fixture) - - def test_image_create_bad_location(self): - location_data = [{'url': 'a', 'metadata': {'key': 'value'}, - 'status': 'active'}, - {'url': u'Bad \U0001f60a', 'metadata': {}, - 'status': 'active'}] - fixture = {'status': 'queued', 'locations': location_data} - self.assertRaises(exception.Invalid, self.db_api.image_create, - self.context, fixture) - - def test_image_update_core_attribute(self): - fixture = {'status': 'queued'} - image = self.db_api.image_update(self.adm_context, UUID3, fixture) - self.assertEqual('queued', image['status']) - self.assertNotEqual(image['created_at'], image['updated_at']) - - def test_image_update_with_locations(self): - locations = [{'url': 'a', 'metadata': {}, 'status': 'active'}, - {'url': 'b', 'metadata': {}, 'status': 'active'}] - fixture = {'locations': locations} - image = self.db_api.image_update(self.adm_context, UUID3, fixture) - self.assertEqual(2, len(image['locations'])) - self.assertIn('id', image['locations'][0]) - self.assertIn('id', image['locations'][1]) - image['locations'][0].pop('id') - image['locations'][1].pop('id') - self.assertEqual(locations, image['locations']) - - def test_image_update_with_location_data(self): - location_data = [{'url': 'a', 'metadata': {'key': 'value'}, - 'status': 'active'}, - {'url': 'b', 'metadata': {}, 'status': 'active'}] - fixture = {'locations': location_data} - image = self.db_api.image_update(self.adm_context, UUID3, fixture) - self.assertEqual(2, len(image['locations'])) - self.assertIn('id', image['locations'][0]) - self.assertIn('id', image['locations'][1]) - image['locations'][0].pop('id') - image['locations'][1].pop('id') - self.assertEqual(location_data, image['locations']) - - def test_image_update(self): - fixture = {'status': 'queued', 'properties': {'ping': 'pong'}} - image = self.db_api.image_update(self.adm_context, UUID3, fixture) - expected = [{'name': 'ping', 'value': 'pong'}] - actual = [{'name': p['name'], 'value': p['value']} - for p in image['properties']] - self.assertEqual(expected, actual) - self.assertEqual('queued', image['status']) - self.assertNotEqual(image['created_at'], image['updated_at']) - - def test_image_update_properties(self): - fixture = {'properties': {'ping': 'pong'}} - image = self.db_api.image_update(self.adm_context, UUID1, fixture) - expected = {'ping': 'pong', 'foo': 'bar', 'far': 'boo'} - actual = {p['name']: p['value'] for p in image['properties']} - self.assertEqual(expected, actual) - self.assertNotEqual(image['created_at'], image['updated_at']) - - def test_image_update_purge_properties(self): - fixture = {'properties': {'ping': 'pong'}} - image = self.db_api.image_update(self.adm_context, UUID1, - fixture, purge_props=True) - properties = {p['name']: p for p in image['properties']} - - # New properties are set - self.assertIn('ping', properties) - self.assertEqual('pong', properties['ping']['value']) - self.assertFalse(properties['ping']['deleted']) - - # Original properties still show up, but with deleted=True - # TODO(markwash): db api should not return deleted properties - self.assertIn('foo', properties) - self.assertEqual('bar', properties['foo']['value']) - self.assertTrue(properties['foo']['deleted']) - - def test_image_update_bad_name(self): - fixture = {'name': u'A new name with forbidden symbol \U0001f62a'} - self.assertRaises(exception.Invalid, self.db_api.image_update, - self.adm_context, UUID1, fixture) - - def test_image_update_bad_property(self): - # bad value - fixture = {'status': 'queued', - 'properties': {'bad': u'Bad \U0001f62a'}} - self.assertRaises(exception.Invalid, self.db_api.image_update, - self.adm_context, UUID1, fixture) - # bad property names are also not allowed - fixture = {'status': 'queued', 'properties': {u'Bad \U0001f62a': 'ok'}} - self.assertRaises(exception.Invalid, self.db_api.image_update, - self.adm_context, UUID1, fixture) - - def test_image_update_bad_location(self): - location_data = [{'url': 'a', 'metadata': {'key': 'value'}, - 'status': 'active'}, - {'url': u'Bad \U0001f60a', 'metadata': {}, - 'status': 'active'}] - fixture = {'status': 'queued', 'locations': location_data} - self.assertRaises(exception.Invalid, self.db_api.image_update, - self.adm_context, UUID1, fixture) - - def test_update_locations_direct(self): - """ - For some reasons update_locations can be called directly - (not via image_update), so better check that everything is ok if passed - 4 byte unicode characters - """ - # update locations correctly first to retrieve existing location id - location_data = [{'url': 'a', 'metadata': {'key': 'value'}, - 'status': 'active'}] - fixture = {'locations': location_data} - image = self.db_api.image_update(self.adm_context, UUID1, fixture) - self.assertEqual(1, len(image['locations'])) - self.assertIn('id', image['locations'][0]) - loc_id = image['locations'][0].pop('id') - bad_location = {'url': u'Bad \U0001f60a', 'metadata': {}, - 'status': 'active', 'id': loc_id} - self.assertRaises(exception.Invalid, - self.db_api.image_location_update, - self.adm_context, UUID1, bad_location) - - def test_image_property_delete(self): - fixture = {'name': 'ping', 'value': 'pong', 'image_id': UUID1} - prop = self.db_api.image_property_create(self.context, fixture) - prop = self.db_api.image_property_delete(self.context, - prop['name'], UUID1) - self.assertIsNotNone(prop['deleted_at']) - self.assertTrue(prop['deleted']) - - def test_image_get(self): - image = self.db_api.image_get(self.context, UUID1) - self.assertEqual(self.fixtures[0]['id'], image['id']) - - def test_image_get_disallow_deleted(self): - self.db_api.image_destroy(self.adm_context, UUID1) - self.assertRaises(exception.NotFound, self.db_api.image_get, - self.context, UUID1) - - def test_image_get_allow_deleted(self): - self.db_api.image_destroy(self.adm_context, UUID1) - image = self.db_api.image_get(self.adm_context, UUID1) - self.assertEqual(self.fixtures[0]['id'], image['id']) - self.assertTrue(image['deleted']) - - def test_image_get_force_allow_deleted(self): - self.db_api.image_destroy(self.adm_context, UUID1) - image = self.db_api.image_get(self.context, UUID1, - force_show_deleted=True) - self.assertEqual(self.fixtures[0]['id'], image['id']) - - def test_image_get_not_owned(self): - TENANT1 = str(uuid.uuid4()) - TENANT2 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1) - ctxt2 = context.RequestContext(is_admin=False, tenant=TENANT2, - auth_token='user:%s:user' % TENANT2) - image = self.db_api.image_create( - ctxt1, {'status': 'queued', 'owner': TENANT1}) - self.assertRaises(exception.Forbidden, - self.db_api.image_get, ctxt2, image['id']) - - def test_image_get_not_found(self): - UUID = str(uuid.uuid4()) - self.assertRaises(exception.NotFound, - self.db_api.image_get, self.context, UUID) - - def test_image_get_all(self): - images = self.db_api.image_get_all(self.context) - self.assertEqual(3, len(images)) - - def test_image_get_all_with_filter(self): - images = self.db_api.image_get_all(self.context, - filters={ - 'id': self.fixtures[0]['id'], - }) - self.assertEqual(1, len(images)) - self.assertEqual(self.fixtures[0]['id'], images[0]['id']) - - def test_image_get_all_with_filter_user_defined_property(self): - images = self.db_api.image_get_all(self.context, - filters={'foo': 'bar'}) - self.assertEqual(1, len(images)) - self.assertEqual(self.fixtures[0]['id'], images[0]['id']) - - def test_image_get_all_with_filter_nonexistent_userdef_property(self): - images = self.db_api.image_get_all(self.context, - filters={'faz': 'boo'}) - self.assertEqual(0, len(images)) - - def test_image_get_all_with_filter_userdef_prop_nonexistent_value(self): - images = self.db_api.image_get_all(self.context, - filters={'foo': 'baz'}) - self.assertEqual(0, len(images)) - - def test_image_get_all_with_filter_multiple_user_defined_properties(self): - images = self.db_api.image_get_all(self.context, - filters={'foo': 'bar', - 'far': 'boo'}) - self.assertEqual(1, len(images)) - self.assertEqual(images[0]['id'], self.fixtures[0]['id']) - - def test_image_get_all_with_filter_nonexistent_user_defined_property(self): - images = self.db_api.image_get_all(self.context, - filters={'foo': 'bar', - 'faz': 'boo'}) - self.assertEqual(0, len(images)) - - def test_image_get_all_with_filter_user_deleted_property(self): - fixture = {'name': 'poo', 'value': 'bear', 'image_id': UUID1} - prop = self.db_api.image_property_create(self.context, - fixture) - - images = self.db_api.image_get_all(self.context, - filters={ - 'properties': {'poo': 'bear'}, - }) - self.assertEqual(1, len(images)) - self.db_api.image_property_delete(self.context, - prop['name'], images[0]['id']) - images = self.db_api.image_get_all(self.context, - filters={ - 'properties': {'poo': 'bear'}, - }) - self.assertEqual(0, len(images)) - - def test_image_get_all_with_filter_undefined_property(self): - images = self.db_api.image_get_all(self.context, - filters={'poo': 'bear'}) - self.assertEqual(0, len(images)) - - def test_image_get_all_with_filter_protected(self): - images = self.db_api.image_get_all(self.context, - filters={'protected': - True}) - self.assertEqual(1, len(images)) - images = self.db_api.image_get_all(self.context, - filters={'protected': - False}) - self.assertEqual(2, len(images)) - - def test_image_get_all_with_filter_comparative_created_at(self): - anchor = timeutils.isotime(self.fixtures[0]['created_at']) - time_expr = 'lt:' + anchor - - images = self.db_api.image_get_all(self.context, - filters={'created_at': time_expr}) - self.assertEqual(0, len(images)) - - def test_image_get_all_with_filter_comparative_updated_at(self): - anchor = timeutils.isotime(self.fixtures[0]['updated_at']) - time_expr = 'lt:' + anchor - - images = self.db_api.image_get_all(self.context, - filters={'updated_at': time_expr}) - self.assertEqual(0, len(images)) - - def test_filter_image_by_invalid_operator(self): - self.assertRaises(exception.InvalidFilterOperatorValue, - self.db_api.image_get_all, - self.context, filters={'status': 'lala:active'}) - - def test_image_get_all_with_filter_in_status(self): - images = self.db_api.image_get_all(self.context, - filters={'status': 'in:active'}) - self.assertEqual(3, len(images)) - - def test_image_get_all_with_filter_in_name(self): - data = 'in:%s' % self.fixtures[0]['name'] - images = self.db_api.image_get_all(self.context, - filters={'name': data}) - self.assertEqual(3, len(images)) - - def test_image_get_all_with_filter_in_container_format(self): - images = self.db_api.image_get_all(self.context, - filters={'container_format': - 'in:ami,bare,ovf'}) - self.assertEqual(3, len(images)) - - def test_image_get_all_with_filter_in_disk_format(self): - images = self.db_api.image_get_all(self.context, - filters={'disk_format': - 'in:vhd'}) - self.assertEqual(3, len(images)) - - def test_image_get_all_with_filter_in_id(self): - data = 'in:%s,%s' % (UUID1, UUID2) - images = self.db_api.image_get_all(self.context, - filters={'id': data}) - self.assertEqual(2, len(images)) - - def test_image_get_all_with_quotes(self): - fixture = {'name': 'fake\\\"name'} - self.db_api.image_update(self.adm_context, UUID3, fixture) - - fixture = {'name': 'fake,name'} - self.db_api.image_update(self.adm_context, UUID2, fixture) - - fixture = {'name': 'fakename'} - self.db_api.image_update(self.adm_context, UUID1, fixture) - - data = 'in:\"fake\\\"name\",fakename,\"fake,name\"' - - images = self.db_api.image_get_all(self.context, - filters={'name': data}) - self.assertEqual(3, len(images)) - - def test_image_get_all_with_invalid_quotes(self): - invalid_expr = ['in:\"name', 'in:\"name\"name', 'in:name\"dd\"', - 'in:na\"me', 'in:\"name\"\"name\"'] - for expr in invalid_expr: - self.assertRaises(exception.InvalidParameterValue, - self.db_api.image_get_all, - self.context, filters={'name': expr}) - - def test_image_get_all_size_min_max(self): - images = self.db_api.image_get_all(self.context, - filters={ - 'size_min': 10, - 'size_max': 15, - }) - self.assertEqual(1, len(images)) - self.assertEqual(self.fixtures[0]['id'], images[0]['id']) - - def test_image_get_all_size_min(self): - images = self.db_api.image_get_all(self.context, - filters={'size_min': 15}) - self.assertEqual(2, len(images)) - self.assertEqual(self.fixtures[2]['id'], images[0]['id']) - self.assertEqual(self.fixtures[1]['id'], images[1]['id']) - - def test_image_get_all_size_range(self): - images = self.db_api.image_get_all(self.context, - filters={'size_max': 15, - 'size_min': 20}) - self.assertEqual(0, len(images)) - - def test_image_get_all_size_max(self): - images = self.db_api.image_get_all(self.context, - filters={'size_max': 15}) - self.assertEqual(1, len(images)) - self.assertEqual(self.fixtures[0]['id'], images[0]['id']) - - def test_image_get_all_with_filter_min_range_bad_value(self): - self.assertRaises(exception.InvalidFilterRangeValue, - self.db_api.image_get_all, - self.context, filters={'size_min': 'blah'}) - - def test_image_get_all_with_filter_max_range_bad_value(self): - self.assertRaises(exception.InvalidFilterRangeValue, - self.db_api.image_get_all, - self.context, filters={'size_max': 'blah'}) - - def test_image_get_all_marker(self): - images = self.db_api.image_get_all(self.context, marker=UUID3) - self.assertEqual(2, len(images)) - - def test_image_get_all_marker_with_size(self): - # Use sort_key=size to test BigInteger - images = self.db_api.image_get_all(self.context, sort_key=['size'], - marker=UUID3) - self.assertEqual(2, len(images)) - self.assertEqual(17, images[0]['size']) - self.assertEqual(13, images[1]['size']) - - def test_image_get_all_marker_deleted(self): - """Cannot specify a deleted image as a marker.""" - self.db_api.image_destroy(self.adm_context, UUID1) - filters = {'deleted': False} - self.assertRaises(exception.NotFound, self.db_api.image_get_all, - self.context, marker=UUID1, filters=filters) - - def test_image_get_all_marker_deleted_showing_deleted_as_admin(self): - """Specify a deleted image as a marker if showing deleted images.""" - self.db_api.image_destroy(self.adm_context, UUID3) - images = self.db_api.image_get_all(self.adm_context, marker=UUID3) - # NOTE(bcwaldon): an admin should see all images (deleted or not) - self.assertEqual(2, len(images)) - - def test_image_get_all_marker_deleted_showing_deleted(self): - """Specify a deleted image as a marker if showing deleted images. - - A non-admin user has to explicitly ask for deleted - images, and should only see deleted images in the results - """ - self.db_api.image_destroy(self.adm_context, UUID3) - self.db_api.image_destroy(self.adm_context, UUID1) - filters = {'deleted': True} - images = self.db_api.image_get_all(self.context, marker=UUID3, - filters=filters) - self.assertEqual(1, len(images)) - - def test_image_get_all_marker_null_name_desc(self): - """Check an image with name null is handled - - Check an image with name null is handled - marker is specified and order is descending - """ - TENANT1 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1) - UUIDX = str(uuid.uuid4()) - self.db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'name': None, - 'owner': TENANT1}) - - images = self.db_api.image_get_all(ctxt1, marker=UUIDX, - sort_key=['name'], - sort_dir=['desc']) - image_ids = [image['id'] for image in images] - expected = [] - self.assertEqual(sorted(expected), sorted(image_ids)) - - def test_image_get_all_marker_null_disk_format_desc(self): - """Check an image with disk_format null is handled - - Check an image with disk_format null is handled when - marker is specified and order is descending - """ - TENANT1 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1) - UUIDX = str(uuid.uuid4()) - self.db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'disk_format': None, - 'owner': TENANT1}) - - images = self.db_api.image_get_all(ctxt1, marker=UUIDX, - sort_key=['disk_format'], - sort_dir=['desc']) - image_ids = [image['id'] for image in images] - expected = [] - self.assertEqual(sorted(expected), sorted(image_ids)) - - def test_image_get_all_marker_null_container_format_desc(self): - """Check an image with container_format null is handled - - Check an image with container_format null is handled when - marker is specified and order is descending - """ - TENANT1 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1) - UUIDX = str(uuid.uuid4()) - self.db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'container_format': None, - 'owner': TENANT1}) - - images = self.db_api.image_get_all(ctxt1, marker=UUIDX, - sort_key=['container_format'], - sort_dir=['desc']) - image_ids = [image['id'] for image in images] - expected = [] - self.assertEqual(sorted(expected), sorted(image_ids)) - - def test_image_get_all_marker_null_name_asc(self): - """Check an image with name null is handled - - Check an image with name null is handled when - marker is specified and order is ascending - """ - TENANT1 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1) - UUIDX = str(uuid.uuid4()) - self.db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'name': None, - 'owner': TENANT1}) - - images = self.db_api.image_get_all(ctxt1, marker=UUIDX, - sort_key=['name'], - sort_dir=['asc']) - image_ids = [image['id'] for image in images] - expected = [UUID3, UUID2, UUID1] - self.assertEqual(sorted(expected), sorted(image_ids)) - - def test_image_get_all_marker_null_disk_format_asc(self): - """Check an image with disk_format null is handled - - Check an image with disk_format null is handled when - marker is specified and order is ascending - """ - TENANT1 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1) - UUIDX = str(uuid.uuid4()) - self.db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'disk_format': None, - 'owner': TENANT1}) - - images = self.db_api.image_get_all(ctxt1, marker=UUIDX, - sort_key=['disk_format'], - sort_dir=['asc']) - image_ids = [image['id'] for image in images] - expected = [UUID3, UUID2, UUID1] - self.assertEqual(sorted(expected), sorted(image_ids)) - - def test_image_get_all_marker_null_container_format_asc(self): - """Check an image with container_format null is handled - - Check an image with container_format null is handled when - marker is specified and order is ascending - """ - TENANT1 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1) - UUIDX = str(uuid.uuid4()) - self.db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'container_format': None, - 'owner': TENANT1}) - - images = self.db_api.image_get_all(ctxt1, marker=UUIDX, - sort_key=['container_format'], - sort_dir=['asc']) - image_ids = [image['id'] for image in images] - expected = [UUID3, UUID2, UUID1] - self.assertEqual(sorted(expected), sorted(image_ids)) - - def test_image_get_all_limit(self): - images = self.db_api.image_get_all(self.context, limit=2) - self.assertEqual(2, len(images)) - - # A limit of None should not equate to zero - images = self.db_api.image_get_all(self.context, limit=None) - self.assertEqual(3, len(images)) - - # A limit of zero should actually mean zero - images = self.db_api.image_get_all(self.context, limit=0) - self.assertEqual(0, len(images)) - - def test_image_get_all_owned(self): - TENANT1 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, - tenant=TENANT1, - auth_token='user:%s:user' % TENANT1) - UUIDX = str(uuid.uuid4()) - image_meta_data = {'id': UUIDX, 'status': 'queued', 'owner': TENANT1} - self.db_api.image_create(ctxt1, image_meta_data) - - TENANT2 = str(uuid.uuid4()) - ctxt2 = context.RequestContext(is_admin=False, - tenant=TENANT2, - auth_token='user:%s:user' % TENANT2) - UUIDY = str(uuid.uuid4()) - image_meta_data = {'id': UUIDY, 'status': 'queued', 'owner': TENANT2} - self.db_api.image_create(ctxt2, image_meta_data) - - images = self.db_api.image_get_all(ctxt1) - - image_ids = [image['id'] for image in images] - expected = [UUIDX, UUID3, UUID2, UUID1] - self.assertEqual(sorted(expected), sorted(image_ids)) - - def test_image_get_all_owned_checksum(self): - TENANT1 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, - tenant=TENANT1, - auth_token='user:%s:user' % TENANT1) - UUIDX = str(uuid.uuid4()) - CHECKSUM1 = '91264c3edf5972c9f1cb309543d38a5c' - image_meta_data = { - 'id': UUIDX, - 'status': 'queued', - 'checksum': CHECKSUM1, - 'owner': TENANT1 - } - self.db_api.image_create(ctxt1, image_meta_data) - image_member_data = { - 'image_id': UUIDX, - 'member': TENANT1, - 'can_share': False, - "status": "accepted", - } - self.db_api.image_member_create(ctxt1, image_member_data) - - TENANT2 = str(uuid.uuid4()) - ctxt2 = context.RequestContext(is_admin=False, - tenant=TENANT2, - auth_token='user:%s:user' % TENANT2) - UUIDY = str(uuid.uuid4()) - CHECKSUM2 = '92264c3edf5972c9f1cb309543d38a5c' - image_meta_data = { - 'id': UUIDY, - 'status': 'queued', - 'checksum': CHECKSUM2, - 'owner': TENANT2 - } - self.db_api.image_create(ctxt2, image_meta_data) - image_member_data = { - 'image_id': UUIDY, - 'member': TENANT2, - 'can_share': False, - "status": "accepted", - } - self.db_api.image_member_create(ctxt2, image_member_data) - - filters = {'visibility': 'shared', 'checksum': CHECKSUM2} - images = self.db_api.image_get_all(ctxt2, filters) - - self.assertEqual(1, len(images)) - self.assertEqual(UUIDY, images[0]['id']) - - def test_image_get_all_with_filter_tags(self): - self.db_api.image_tag_create(self.context, UUID1, 'x86') - self.db_api.image_tag_create(self.context, UUID1, '64bit') - self.db_api.image_tag_create(self.context, UUID2, 'power') - self.db_api.image_tag_create(self.context, UUID2, '64bit') - images = self.db_api.image_get_all(self.context, - filters={'tags': ['64bit']}) - self.assertEqual(2, len(images)) - image_ids = [image['id'] for image in images] - expected = [UUID1, UUID2] - self.assertEqual(sorted(expected), sorted(image_ids)) - - def test_image_get_all_with_filter_multi_tags(self): - self.db_api.image_tag_create(self.context, UUID1, 'x86') - self.db_api.image_tag_create(self.context, UUID1, '64bit') - self.db_api.image_tag_create(self.context, UUID2, 'power') - self.db_api.image_tag_create(self.context, UUID2, '64bit') - images = self.db_api.image_get_all(self.context, - filters={'tags': ['64bit', 'power'] - }) - self.assertEqual(1, len(images)) - self.assertEqual(UUID2, images[0]['id']) - - def test_image_get_all_with_filter_tags_and_nonexistent(self): - self.db_api.image_tag_create(self.context, UUID1, 'x86') - images = self.db_api.image_get_all(self.context, - filters={'tags': ['x86', 'fake'] - }) - self.assertEqual(0, len(images)) - - def test_image_get_all_with_filter_deleted_tags(self): - tag = self.db_api.image_tag_create(self.context, UUID1, 'AIX') - images = self.db_api.image_get_all(self.context, - filters={ - 'tags': [tag], - }) - self.assertEqual(1, len(images)) - self.db_api.image_tag_delete(self.context, UUID1, tag) - images = self.db_api.image_get_all(self.context, - filters={ - 'tags': [tag], - }) - self.assertEqual(0, len(images)) - - def test_image_get_all_with_filter_undefined_tags(self): - images = self.db_api.image_get_all(self.context, - filters={'tags': ['fake']}) - self.assertEqual(0, len(images)) - - def test_image_paginate(self): - """Paginate through a list of images using limit and marker""" - now = timeutils.utcnow() - extra_uuids = [(str(uuid.uuid4()), - now + datetime.timedelta(seconds=i * 5)) - for i in range(2)] - extra_images = [build_image_fixture(id=_id, - created_at=_dt, - updated_at=_dt) - for _id, _dt in extra_uuids] - self.create_images(extra_images) - - # Reverse uuids to match default sort of created_at - extra_uuids.reverse() - - page = self.db_api.image_get_all(self.context, limit=2) - self.assertEqual([i[0] for i in extra_uuids], [i['id'] for i in page]) - last = page[-1]['id'] - - page = self.db_api.image_get_all(self.context, limit=2, marker=last) - self.assertEqual([UUID3, UUID2], [i['id'] for i in page]) - - page = self.db_api.image_get_all(self.context, limit=2, marker=UUID2) - self.assertEqual([UUID1], [i['id'] for i in page]) - - def test_image_get_all_invalid_sort_key(self): - self.assertRaises(exception.InvalidSortKey, self.db_api.image_get_all, - self.context, sort_key=['blah']) - - def test_image_get_all_limit_marker(self): - images = self.db_api.image_get_all(self.context, limit=2) - self.assertEqual(2, len(images)) - - def test_image_get_all_with_tag_returning(self): - expected_tags = {UUID1: ['foo'], UUID2: ['bar'], UUID3: ['baz']} - - self.db_api.image_tag_create(self.context, UUID1, - expected_tags[UUID1][0]) - self.db_api.image_tag_create(self.context, UUID2, - expected_tags[UUID2][0]) - self.db_api.image_tag_create(self.context, UUID3, - expected_tags[UUID3][0]) - - images = self.db_api.image_get_all(self.context, return_tag=True) - self.assertEqual(3, len(images)) - - for image in images: - self.assertIn('tags', image) - self.assertEqual(expected_tags[image['id']], image['tags']) - - self.db_api.image_tag_delete(self.context, UUID1, - expected_tags[UUID1][0]) - expected_tags[UUID1] = [] - - images = self.db_api.image_get_all(self.context, return_tag=True) - self.assertEqual(3, len(images)) - - for image in images: - self.assertIn('tags', image) - self.assertEqual(expected_tags[image['id']], image['tags']) - - def test_image_destroy(self): - location_data = [{'url': 'a', 'metadata': {'key': 'value'}, - 'status': 'active'}, - {'url': 'b', 'metadata': {}, - 'status': 'active'}] - fixture = {'status': 'queued', 'locations': location_data} - image = self.db_api.image_create(self.context, fixture) - IMG_ID = image['id'] - - fixture = {'name': 'ping', 'value': 'pong', 'image_id': IMG_ID} - prop = self.db_api.image_property_create(self.context, fixture) - TENANT2 = str(uuid.uuid4()) - fixture = {'image_id': IMG_ID, 'member': TENANT2, 'can_share': False} - member = self.db_api.image_member_create(self.context, fixture) - self.db_api.image_tag_create(self.context, IMG_ID, 'snarf') - - self.assertEqual(2, len(image['locations'])) - self.assertIn('id', image['locations'][0]) - self.assertIn('id', image['locations'][1]) - image['locations'][0].pop('id') - image['locations'][1].pop('id') - self.assertEqual(location_data, image['locations']) - self.assertEqual(('ping', 'pong', IMG_ID, False), - (prop['name'], prop['value'], - prop['image_id'], prop['deleted'])) - self.assertEqual((TENANT2, IMG_ID, False), - (member['member'], member['image_id'], - member['can_share'])) - self.assertEqual(['snarf'], - self.db_api.image_tag_get_all(self.context, IMG_ID)) - - image = self.db_api.image_destroy(self.adm_context, IMG_ID) - self.assertTrue(image['deleted']) - self.assertTrue(image['deleted_at']) - self.assertRaises(exception.NotFound, self.db_api.image_get, - self.context, IMG_ID) - - self.assertEqual([], image['locations']) - prop = image['properties'][0] - self.assertEqual(('ping', IMG_ID, True), - (prop['name'], prop['image_id'], prop['deleted'])) - self.context.auth_token = 'user:%s:user' % TENANT2 - members = self.db_api.image_member_find(self.context, IMG_ID) - self.assertEqual([], members) - tags = self.db_api.image_tag_get_all(self.context, IMG_ID) - self.assertEqual([], tags) - - def test_image_destroy_with_delete_all(self): - """Check the image child element's _image_delete_all methods. - - checks if all the image_delete_all methods deletes only the child - elements of the image to be deleted. - """ - TENANT2 = str(uuid.uuid4()) - location_data = [{'url': 'a', 'metadata': {'key': 'value'}, - 'status': 'active'}, - {'url': 'b', 'metadata': {}, 'status': 'active'}] - - def _create_image_with_child_entries(): - fixture = {'status': 'queued', 'locations': location_data} - - image_id = self.db_api.image_create(self.context, fixture)['id'] - - fixture = {'name': 'ping', 'value': 'pong', 'image_id': image_id} - self.db_api.image_property_create(self.context, fixture) - fixture = {'image_id': image_id, 'member': TENANT2, - 'can_share': False} - self.db_api.image_member_create(self.context, fixture) - self.db_api.image_tag_create(self.context, image_id, 'snarf') - return image_id - - ACTIVE_IMG_ID = _create_image_with_child_entries() - DEL_IMG_ID = _create_image_with_child_entries() - - deleted_image = self.db_api.image_destroy(self.adm_context, DEL_IMG_ID) - self.assertTrue(deleted_image['deleted']) - self.assertTrue(deleted_image['deleted_at']) - self.assertRaises(exception.NotFound, self.db_api.image_get, - self.context, DEL_IMG_ID) - - active_image = self.db_api.image_get(self.context, ACTIVE_IMG_ID) - self.assertFalse(active_image['deleted']) - self.assertFalse(active_image['deleted_at']) - - self.assertEqual(2, len(active_image['locations'])) - self.assertIn('id', active_image['locations'][0]) - self.assertIn('id', active_image['locations'][1]) - active_image['locations'][0].pop('id') - active_image['locations'][1].pop('id') - self.assertEqual(location_data, active_image['locations']) - self.assertEqual(1, len(active_image['properties'])) - prop = active_image['properties'][0] - self.assertEqual(('ping', 'pong', ACTIVE_IMG_ID), - (prop['name'], prop['value'], - prop['image_id'])) - self.assertEqual((False, None), - (prop['deleted'], prop['deleted_at'])) - self.context.auth_token = 'user:%s:user' % TENANT2 - members = self.db_api.image_member_find(self.context, ACTIVE_IMG_ID) - self.assertEqual(1, len(members)) - member = members[0] - self.assertEqual((TENANT2, ACTIVE_IMG_ID, False), - (member['member'], member['image_id'], - member['can_share'])) - tags = self.db_api.image_tag_get_all(self.context, ACTIVE_IMG_ID) - self.assertEqual(['snarf'], tags) - - def test_image_get_multiple_members(self): - TENANT1 = str(uuid.uuid4()) - TENANT2 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1, - owner_is_tenant=True) - ctxt2 = context.RequestContext(is_admin=False, user=TENANT2, - auth_token='user:%s:user' % TENANT2, - owner_is_tenant=False) - UUIDX = str(uuid.uuid4()) - # We need a shared image and context.owner should not match image - # owner - self.db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'is_public': False, - 'owner': TENANT1}) - values = {'image_id': UUIDX, 'member': TENANT2, 'can_share': False} - self.db_api.image_member_create(ctxt1, values) - - image = self.db_api.image_get(ctxt2, UUIDX) - self.assertEqual(UUIDX, image['id']) - - # by default get_all displays only images with status 'accepted' - images = self.db_api.image_get_all(ctxt2) - self.assertEqual(3, len(images)) - - # filter by rejected - images = self.db_api.image_get_all(ctxt2, member_status='rejected') - self.assertEqual(3, len(images)) - - # filter by visibility - images = self.db_api.image_get_all(ctxt2, - filters={'visibility': 'shared'}) - self.assertEqual(0, len(images)) - - # filter by visibility - images = self.db_api.image_get_all(ctxt2, member_status='pending', - filters={'visibility': 'shared'}) - self.assertEqual(1, len(images)) - - # filter by visibility - images = self.db_api.image_get_all(ctxt2, member_status='all', - filters={'visibility': 'shared'}) - self.assertEqual(1, len(images)) - - # filter by status pending - images = self.db_api.image_get_all(ctxt2, member_status='pending') - self.assertEqual(4, len(images)) - - # filter by status all - images = self.db_api.image_get_all(ctxt2, member_status='all') - self.assertEqual(4, len(images)) - - def test_is_image_visible(self): - TENANT1 = str(uuid.uuid4()) - TENANT2 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1, - owner_is_tenant=True) - ctxt2 = context.RequestContext(is_admin=False, user=TENANT2, - auth_token='user:%s:user' % TENANT2, - owner_is_tenant=False) - UUIDX = str(uuid.uuid4()) - # We need a shared image and context.owner should not match image - # owner - image = self.db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'is_public': False, - 'owner': TENANT1}) - - values = {'image_id': UUIDX, 'member': TENANT2, 'can_share': False} - self.db_api.image_member_create(ctxt1, values) - - result = self.db_api.is_image_visible(ctxt2, image) - self.assertTrue(result) - - # image should not be visible for a deleted member - members = self.db_api.image_member_find(ctxt1, image_id=UUIDX) - self.db_api.image_member_delete(ctxt1, members[0]['id']) - - result = self.db_api.is_image_visible(ctxt2, image) - self.assertFalse(result) - - def test_is_community_image_visible(self): - TENANT1 = str(uuid.uuid4()) - TENANT2 = str(uuid.uuid4()) - owners_ctxt = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' - % TENANT1, owner_is_tenant=True) - viewing_ctxt = context.RequestContext(is_admin=False, user=TENANT2, - auth_token='user:%s:user' - % TENANT2, owner_is_tenant=False) - UUIDX = str(uuid.uuid4()) - # We need a community image and context.owner should not match image - # owner - image = self.db_api.image_create(owners_ctxt, - {'id': UUIDX, - 'status': 'queued', - 'visibility': 'community', - 'owner': TENANT1}) - - # image should be visible in every context - result = self.db_api.is_image_visible(owners_ctxt, image) - self.assertTrue(result) - - result = self.db_api.is_image_visible(viewing_ctxt, image) - self.assertTrue(result) - - def test_image_tag_create(self): - tag = self.db_api.image_tag_create(self.context, UUID1, 'snap') - self.assertEqual('snap', tag) - - def test_image_tag_create_bad_value(self): - self.assertRaises(exception.Invalid, - self.db_api.image_tag_create, self.context, - UUID1, u'Bad \U0001f62a') - - def test_image_tag_set_all(self): - tags = self.db_api.image_tag_get_all(self.context, UUID1) - self.assertEqual([], tags) - - self.db_api.image_tag_set_all(self.context, UUID1, ['ping', 'pong']) - - tags = self.db_api.image_tag_get_all(self.context, UUID1) - # NOTE(bcwaldon): tag ordering should match exactly what was provided - self.assertEqual(['ping', 'pong'], tags) - - def test_image_tag_get_all(self): - self.db_api.image_tag_create(self.context, UUID1, 'snap') - self.db_api.image_tag_create(self.context, UUID1, 'snarf') - self.db_api.image_tag_create(self.context, UUID2, 'snarf') - - # Check the tags for the first image - tags = self.db_api.image_tag_get_all(self.context, UUID1) - expected = ['snap', 'snarf'] - self.assertEqual(expected, tags) - - # Check the tags for the second image - tags = self.db_api.image_tag_get_all(self.context, UUID2) - expected = ['snarf'] - self.assertEqual(expected, tags) - - def test_image_tag_get_all_no_tags(self): - actual = self.db_api.image_tag_get_all(self.context, UUID1) - self.assertEqual([], actual) - - def test_image_tag_get_all_non_existent_image(self): - bad_image_id = str(uuid.uuid4()) - actual = self.db_api.image_tag_get_all(self.context, bad_image_id) - self.assertEqual([], actual) - - def test_image_tag_delete(self): - self.db_api.image_tag_create(self.context, UUID1, 'snap') - self.db_api.image_tag_delete(self.context, UUID1, 'snap') - self.assertRaises(exception.NotFound, self.db_api.image_tag_delete, - self.context, UUID1, 'snap') - - @mock.patch.object(timeutils, 'utcnow') - def test_image_member_create(self, mock_utcnow): - mock_utcnow.return_value = datetime.datetime.utcnow() - memberships = self.db_api.image_member_find(self.context) - self.assertEqual([], memberships) - - TENANT1 = str(uuid.uuid4()) - # NOTE(flaper87): Update auth token, otherwise - # non visible members won't be returned. - self.context.auth_token = 'user:%s:user' % TENANT1 - self.db_api.image_member_create(self.context, - {'member': TENANT1, 'image_id': UUID1}) - - memberships = self.db_api.image_member_find(self.context) - self.assertEqual(1, len(memberships)) - actual = memberships[0] - self.assertIsNotNone(actual['created_at']) - self.assertIsNotNone(actual['updated_at']) - actual.pop('id') - actual.pop('created_at') - actual.pop('updated_at') - expected = { - 'member': TENANT1, - 'image_id': UUID1, - 'can_share': False, - 'status': 'pending', - 'deleted': False, - } - self.assertEqual(expected, actual) - - def test_image_member_update(self): - TENANT1 = str(uuid.uuid4()) - - # NOTE(flaper87): Update auth token, otherwise - # non visible members won't be returned. - self.context.auth_token = 'user:%s:user' % TENANT1 - member = self.db_api.image_member_create(self.context, - {'member': TENANT1, - 'image_id': UUID1}) - member_id = member.pop('id') - member.pop('created_at') - member.pop('updated_at') - - expected = {'member': TENANT1, - 'image_id': UUID1, - 'status': 'pending', - 'can_share': False, - 'deleted': False} - self.assertEqual(expected, member) - - member = self.db_api.image_member_update(self.context, - member_id, - {'can_share': True}) - - self.assertNotEqual(member['created_at'], member['updated_at']) - member.pop('id') - member.pop('created_at') - member.pop('updated_at') - expected = {'member': TENANT1, - 'image_id': UUID1, - 'status': 'pending', - 'can_share': True, - 'deleted': False} - self.assertEqual(expected, member) - - members = self.db_api.image_member_find(self.context, - member=TENANT1, - image_id=UUID1) - member = members[0] - member.pop('id') - member.pop('created_at') - member.pop('updated_at') - self.assertEqual(expected, member) - - def test_image_member_update_status(self): - TENANT1 = str(uuid.uuid4()) - # NOTE(flaper87): Update auth token, otherwise - # non visible members won't be returned. - self.context.auth_token = 'user:%s:user' % TENANT1 - member = self.db_api.image_member_create(self.context, - {'member': TENANT1, - 'image_id': UUID1}) - member_id = member.pop('id') - member.pop('created_at') - member.pop('updated_at') - - expected = {'member': TENANT1, - 'image_id': UUID1, - 'status': 'pending', - 'can_share': False, - 'deleted': False} - self.assertEqual(expected, member) - - member = self.db_api.image_member_update(self.context, - member_id, - {'status': 'accepted'}) - - self.assertNotEqual(member['created_at'], member['updated_at']) - member.pop('id') - member.pop('created_at') - member.pop('updated_at') - expected = {'member': TENANT1, - 'image_id': UUID1, - 'status': 'accepted', - 'can_share': False, - 'deleted': False} - self.assertEqual(expected, member) - - members = self.db_api.image_member_find(self.context, - member=TENANT1, - image_id=UUID1) - member = members[0] - member.pop('id') - member.pop('created_at') - member.pop('updated_at') - self.assertEqual(expected, member) - - def test_image_member_find(self): - TENANT1 = str(uuid.uuid4()) - TENANT2 = str(uuid.uuid4()) - fixtures = [ - {'member': TENANT1, 'image_id': UUID1}, - {'member': TENANT1, 'image_id': UUID2, 'status': 'rejected'}, - {'member': TENANT2, 'image_id': UUID1, 'status': 'accepted'}, - ] - for f in fixtures: - self.db_api.image_member_create(self.context, copy.deepcopy(f)) - - def _simplify(output): - return - - def _assertMemberListMatch(list1, list2): - _simple = lambda x: set([(o['member'], o['image_id']) for o in x]) - self.assertEqual(_simple(list1), _simple(list2)) - - # NOTE(flaper87): Update auth token, otherwise - # non visible members won't be returned. - self.context.auth_token = 'user:%s:user' % TENANT1 - output = self.db_api.image_member_find(self.context, member=TENANT1) - _assertMemberListMatch([fixtures[0], fixtures[1]], output) - - output = self.db_api.image_member_find(self.adm_context, - image_id=UUID1) - _assertMemberListMatch([fixtures[0], fixtures[2]], output) - - # NOTE(flaper87): Update auth token, otherwise - # non visible members won't be returned. - self.context.auth_token = 'user:%s:user' % TENANT2 - output = self.db_api.image_member_find(self.context, - member=TENANT2, - image_id=UUID1) - _assertMemberListMatch([fixtures[2]], output) - - output = self.db_api.image_member_find(self.context, - status='accepted') - _assertMemberListMatch([fixtures[2]], output) - - # NOTE(flaper87): Update auth token, otherwise - # non visible members won't be returned. - self.context.auth_token = 'user:%s:user' % TENANT1 - output = self.db_api.image_member_find(self.context, - status='rejected') - _assertMemberListMatch([fixtures[1]], output) - - output = self.db_api.image_member_find(self.context, - status='pending') - _assertMemberListMatch([fixtures[0]], output) - - output = self.db_api.image_member_find(self.context, - status='pending', - image_id=UUID2) - _assertMemberListMatch([], output) - - image_id = str(uuid.uuid4()) - output = self.db_api.image_member_find(self.context, - member=TENANT2, - image_id=image_id) - _assertMemberListMatch([], output) - - def test_image_member_count(self): - TENANT1 = str(uuid.uuid4()) - self.db_api.image_member_create(self.context, - {'member': TENANT1, - 'image_id': UUID1}) - - actual = self.db_api.image_member_count(self.context, UUID1) - - self.assertEqual(1, actual) - - def test_image_member_count_invalid_image_id(self): - TENANT1 = str(uuid.uuid4()) - self.db_api.image_member_create(self.context, - {'member': TENANT1, - 'image_id': UUID1}) - - self.assertRaises(exception.Invalid, self.db_api.image_member_count, - self.context, None) - - def test_image_member_count_empty_image_id(self): - TENANT1 = str(uuid.uuid4()) - self.db_api.image_member_create(self.context, - {'member': TENANT1, - 'image_id': UUID1}) - - self.assertRaises(exception.Invalid, self.db_api.image_member_count, - self.context, "") - - def test_image_member_delete(self): - TENANT1 = str(uuid.uuid4()) - # NOTE(flaper87): Update auth token, otherwise - # non visible members won't be returned. - self.context.auth_token = 'user:%s:user' % TENANT1 - fixture = {'member': TENANT1, 'image_id': UUID1, 'can_share': True} - member = self.db_api.image_member_create(self.context, fixture) - self.assertEqual(1, len(self.db_api.image_member_find(self.context))) - member = self.db_api.image_member_delete(self.context, member['id']) - self.assertEqual(0, len(self.db_api.image_member_find(self.context))) - - -class DriverQuotaTests(test_utils.BaseTestCase): - - def setUp(self): - super(DriverQuotaTests, self).setUp() - self.owner_id1 = str(uuid.uuid4()) - self.context1 = context.RequestContext( - is_admin=False, user=self.owner_id1, tenant=self.owner_id1, - auth_token='%s:%s:user' % (self.owner_id1, self.owner_id1)) - self.db_api = db_tests.get_db(self.config) - db_tests.reset_db(self.db_api) - dt1 = timeutils.utcnow() - dt2 = dt1 + datetime.timedelta(microseconds=5) - fixtures = [ - { - 'id': UUID1, - 'created_at': dt1, - 'updated_at': dt1, - 'size': 13, - 'owner': self.owner_id1, - }, - { - 'id': UUID2, - 'created_at': dt1, - 'updated_at': dt2, - 'size': 17, - 'owner': self.owner_id1, - }, - { - 'id': UUID3, - 'created_at': dt2, - 'updated_at': dt2, - 'size': 7, - 'owner': self.owner_id1, - }, - ] - self.owner1_fixtures = [ - build_image_fixture(**fixture) for fixture in fixtures] - - for fixture in self.owner1_fixtures: - self.db_api.image_create(self.context1, fixture) - - def test_storage_quota(self): - total = reduce(lambda x, y: x + y, - [f['size'] for f in self.owner1_fixtures]) - x = self.db_api.user_get_storage_usage(self.context1, self.owner_id1) - self.assertEqual(total, x) - - def test_storage_quota_without_image_id(self): - total = reduce(lambda x, y: x + y, - [f['size'] for f in self.owner1_fixtures]) - total = total - self.owner1_fixtures[0]['size'] - x = self.db_api.user_get_storage_usage( - self.context1, self.owner_id1, - image_id=self.owner1_fixtures[0]['id']) - self.assertEqual(total, x) - - def test_storage_quota_multiple_locations(self): - dt1 = timeutils.utcnow() - sz = 53 - new_fixture_dict = {'id': str(uuid.uuid4()), 'created_at': dt1, - 'updated_at': dt1, 'size': sz, - 'owner': self.owner_id1} - new_fixture = build_image_fixture(**new_fixture_dict) - new_fixture['locations'].append({'url': 'file:///some/path/file', - 'metadata': {}, - 'status': 'active'}) - self.db_api.image_create(self.context1, new_fixture) - - total = reduce(lambda x, y: x + y, - [f['size'] for f in self.owner1_fixtures]) + (sz * 2) - x = self.db_api.user_get_storage_usage(self.context1, self.owner_id1) - self.assertEqual(total, x) - - def test_storage_quota_deleted_image(self): - # NOTE(flaper87): This needs to be tested for - # soft deleted images as well. Currently there's no - # good way to delete locations. - dt1 = timeutils.utcnow() - sz = 53 - image_id = str(uuid.uuid4()) - new_fixture_dict = {'id': image_id, 'created_at': dt1, - 'updated_at': dt1, 'size': sz, - 'owner': self.owner_id1} - new_fixture = build_image_fixture(**new_fixture_dict) - new_fixture['locations'].append({'url': 'file:///some/path/file', - 'metadata': {}, - 'status': 'active'}) - self.db_api.image_create(self.context1, new_fixture) - - total = reduce(lambda x, y: x + y, - [f['size'] for f in self.owner1_fixtures]) - x = self.db_api.user_get_storage_usage(self.context1, self.owner_id1) - self.assertEqual(total + (sz * 2), x) - - self.db_api.image_destroy(self.context1, image_id) - x = self.db_api.user_get_storage_usage(self.context1, self.owner_id1) - self.assertEqual(total, x) - - -class TaskTests(test_utils.BaseTestCase): - - def setUp(self): - super(TaskTests, self).setUp() - self.admin_id = 'admin' - self.owner_id = 'user' - self.adm_context = context.RequestContext( - is_admin=True, auth_token='user:admin:admin', tenant=self.admin_id) - self.context = context.RequestContext( - is_admin=False, auth_token='user:user:user', user=self.owner_id) - self.db_api = db_tests.get_db(self.config) - self.fixtures = self.build_task_fixtures() - db_tests.reset_db(self.db_api) - - def build_task_fixtures(self): - self.context.tenant = str(uuid.uuid4()) - fixtures = [ - { - 'owner': self.context.owner, - 'type': 'import', - 'input': {'import_from': 'file:///a.img', - 'import_from_format': 'qcow2', - 'image_properties': { - "name": "GreatStack 1.22", - "tags": ["lamp", "custom"] - }}, - }, - { - 'owner': self.context.owner, - 'type': 'import', - 'input': {'import_from': 'file:///b.img', - 'import_from_format': 'qcow2', - 'image_properties': { - "name": "GreatStack 1.23", - "tags": ["lamp", "good"] - }}, - }, - { - 'owner': self.context.owner, - "type": "export", - "input": { - "export_uuid": "deadbeef-dead-dead-dead-beefbeefbeef", - "export_to": - "swift://cloud.foo/myaccount/mycontainer/path", - "export_format": "qcow2" - } - }, - ] - return [build_task_fixture(**fixture) for fixture in fixtures] - - def test_task_get_all_with_filter(self): - for fixture in self.fixtures: - self.db_api.task_create(self.adm_context, - build_task_fixture(**fixture)) - - import_tasks = self.db_api.task_get_all(self.adm_context, - filters={'type': 'import'}) - - self.assertTrue(import_tasks) - self.assertEqual(2, len(import_tasks)) - for task in import_tasks: - self.assertEqual('import', task['type']) - self.assertEqual(self.context.owner, task['owner']) - - def test_task_get_all_as_admin(self): - tasks = [] - for fixture in self.fixtures: - task = self.db_api.task_create(self.adm_context, - build_task_fixture(**fixture)) - tasks.append(task) - import_tasks = self.db_api.task_get_all(self.adm_context) - self.assertTrue(import_tasks) - self.assertEqual(3, len(import_tasks)) - - def test_task_get_all_marker(self): - for fixture in self.fixtures: - self.db_api.task_create(self.adm_context, - build_task_fixture(**fixture)) - tasks = self.db_api.task_get_all(self.adm_context, sort_key='id') - task_ids = [t['id'] for t in tasks] - tasks = self.db_api.task_get_all(self.adm_context, sort_key='id', - marker=task_ids[0]) - self.assertEqual(2, len(tasks)) - - def test_task_get_all_limit(self): - for fixture in self.fixtures: - self.db_api.task_create(self.adm_context, - build_task_fixture(**fixture)) - - tasks = self.db_api.task_get_all(self.adm_context, limit=2) - self.assertEqual(2, len(tasks)) - - # A limit of None should not equate to zero - tasks = self.db_api.task_get_all(self.adm_context, limit=None) - self.assertEqual(3, len(tasks)) - - # A limit of zero should actually mean zero - tasks = self.db_api.task_get_all(self.adm_context, limit=0) - self.assertEqual(0, len(tasks)) - - def test_task_get_all_owned(self): - then = timeutils.utcnow() + datetime.timedelta(days=365) - TENANT1 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, - tenant=TENANT1, - auth_token='user:%s:user' % TENANT1) - - task_values = {'type': 'import', 'status': 'pending', - 'input': '{"loc": "fake"}', 'owner': TENANT1, - 'expires_at': then} - self.db_api.task_create(ctxt1, task_values) - - TENANT2 = str(uuid.uuid4()) - ctxt2 = context.RequestContext(is_admin=False, - tenant=TENANT2, - auth_token='user:%s:user' % TENANT2) - - task_values = {'type': 'export', 'status': 'pending', - 'input': '{"loc": "fake"}', 'owner': TENANT2, - 'expires_at': then} - self.db_api.task_create(ctxt2, task_values) - - tasks = self.db_api.task_get_all(ctxt1) - - task_owners = set([task['owner'] for task in tasks]) - expected = set([TENANT1]) - self.assertEqual(sorted(expected), sorted(task_owners)) - - def test_task_get(self): - expires_at = timeutils.utcnow() - image_id = str(uuid.uuid4()) - fixture = { - 'owner': self.context.owner, - 'type': 'import', - 'status': 'pending', - 'input': '{"loc": "fake"}', - 'result': "{'image_id': %s}" % image_id, - 'message': 'blah', - 'expires_at': expires_at - } - - task = self.db_api.task_create(self.adm_context, fixture) - - self.assertIsNotNone(task) - self.assertIsNotNone(task['id']) - - task_id = task['id'] - task = self.db_api.task_get(self.adm_context, task_id) - - self.assertIsNotNone(task) - self.assertEqual(task_id, task['id']) - self.assertEqual(self.context.owner, task['owner']) - self.assertEqual('import', task['type']) - self.assertEqual('pending', task['status']) - self.assertEqual(fixture['input'], task['input']) - self.assertEqual(fixture['result'], task['result']) - self.assertEqual(fixture['message'], task['message']) - self.assertEqual(expires_at, task['expires_at']) - - def test_task_get_all(self): - now = timeutils.utcnow() - then = now + datetime.timedelta(days=365) - image_id = str(uuid.uuid4()) - fixture1 = { - 'owner': self.context.owner, - 'type': 'import', - 'status': 'pending', - 'input': '{"loc": "fake_1"}', - 'result': "{'image_id': %s}" % image_id, - 'message': 'blah_1', - 'expires_at': then, - 'created_at': now, - 'updated_at': now - } - - fixture2 = { - 'owner': self.context.owner, - 'type': 'import', - 'status': 'pending', - 'input': '{"loc": "fake_2"}', - 'result': "{'image_id': %s}" % image_id, - 'message': 'blah_2', - 'expires_at': then, - 'created_at': now, - 'updated_at': now - } - - task1 = self.db_api.task_create(self.adm_context, fixture1) - task2 = self.db_api.task_create(self.adm_context, fixture2) - - self.assertIsNotNone(task1) - self.assertIsNotNone(task2) - - task1_id = task1['id'] - task2_id = task2['id'] - task_fixtures = {task1_id: fixture1, task2_id: fixture2} - tasks = self.db_api.task_get_all(self.adm_context) - - self.assertEqual(2, len(tasks)) - self.assertEqual(set((tasks[0]['id'], tasks[1]['id'])), - set((task1_id, task2_id))) - for task in tasks: - fixture = task_fixtures[task['id']] - - self.assertEqual(self.context.owner, task['owner']) - self.assertEqual(fixture['type'], task['type']) - self.assertEqual(fixture['status'], task['status']) - self.assertEqual(fixture['expires_at'], task['expires_at']) - self.assertFalse(task['deleted']) - self.assertIsNone(task['deleted_at']) - self.assertEqual(fixture['created_at'], task['created_at']) - self.assertEqual(fixture['updated_at'], task['updated_at']) - task_details_keys = ['input', 'message', 'result'] - for key in task_details_keys: - self.assertNotIn(key, task) - - def test_task_soft_delete(self): - now = timeutils.utcnow() - then = now + datetime.timedelta(days=365) - - fixture1 = build_task_fixture(id='1', expires_at=now, - owner=self.adm_context.owner) - fixture2 = build_task_fixture(id='2', expires_at=now, - owner=self.adm_context.owner) - fixture3 = build_task_fixture(id='3', expires_at=then, - owner=self.adm_context.owner) - fixture4 = build_task_fixture(id='4', expires_at=then, - owner=self.adm_context.owner) - - task1 = self.db_api.task_create(self.adm_context, fixture1) - task2 = self.db_api.task_create(self.adm_context, fixture2) - task3 = self.db_api.task_create(self.adm_context, fixture3) - task4 = self.db_api.task_create(self.adm_context, fixture4) - - self.assertIsNotNone(task1) - self.assertIsNotNone(task2) - self.assertIsNotNone(task3) - self.assertIsNotNone(task4) - - tasks = self.db_api.task_get_all( - self.adm_context, sort_key='id', sort_dir='asc') - - self.assertEqual(4, len(tasks)) - - self.assertTrue(tasks[0]['deleted']) - self.assertTrue(tasks[1]['deleted']) - self.assertFalse(tasks[2]['deleted']) - self.assertFalse(tasks[3]['deleted']) - - def test_task_create(self): - task_id = str(uuid.uuid4()) - self.context.tenant = self.context.owner - values = { - 'id': task_id, - 'owner': self.context.owner, - 'type': 'export', - 'status': 'pending', - } - task_values = build_task_fixture(**values) - task = self.db_api.task_create(self.adm_context, task_values) - self.assertIsNotNone(task) - self.assertEqual(task_id, task['id']) - self.assertEqual(self.context.owner, task['owner']) - self.assertEqual('export', task['type']) - self.assertEqual('pending', task['status']) - self.assertEqual({'ping': 'pong'}, task['input']) - - def test_task_create_with_all_task_info_null(self): - task_id = str(uuid.uuid4()) - self.context.tenant = str(uuid.uuid4()) - values = { - 'id': task_id, - 'owner': self.context.owner, - 'type': 'export', - 'status': 'pending', - 'input': None, - 'result': None, - 'message': None, - } - task_values = build_task_fixture(**values) - task = self.db_api.task_create(self.adm_context, task_values) - self.assertIsNotNone(task) - self.assertEqual(task_id, task['id']) - self.assertEqual(self.context.owner, task['owner']) - self.assertEqual('export', task['type']) - self.assertEqual('pending', task['status']) - self.assertIsNone(task['input']) - self.assertIsNone(task['result']) - self.assertIsNone(task['message']) - - def test_task_update(self): - self.context.tenant = str(uuid.uuid4()) - result = {'foo': 'bar'} - task_values = build_task_fixture(owner=self.context.owner, - result=result) - task = self.db_api.task_create(self.adm_context, task_values) - - task_id = task['id'] - fixture = { - 'status': 'processing', - 'message': 'This is a error string', - } - task = self.db_api.task_update(self.adm_context, task_id, fixture) - - self.assertEqual(task_id, task['id']) - self.assertEqual(self.context.owner, task['owner']) - self.assertEqual('import', task['type']) - self.assertEqual('processing', task['status']) - self.assertEqual({'ping': 'pong'}, task['input']) - self.assertEqual(result, task['result']) - self.assertEqual('This is a error string', task['message']) - self.assertFalse(task['deleted']) - self.assertIsNone(task['deleted_at']) - self.assertIsNone(task['expires_at']) - self.assertEqual(task_values['created_at'], task['created_at']) - self.assertGreater(task['updated_at'], task['created_at']) - - def test_task_update_with_all_task_info_null(self): - self.context.tenant = str(uuid.uuid4()) - task_values = build_task_fixture(owner=self.context.owner, - input=None, - result=None, - message=None) - task = self.db_api.task_create(self.adm_context, task_values) - - task_id = task['id'] - fixture = {'status': 'processing'} - task = self.db_api.task_update(self.adm_context, task_id, fixture) - - self.assertEqual(task_id, task['id']) - self.assertEqual(self.context.owner, task['owner']) - self.assertEqual('import', task['type']) - self.assertEqual('processing', task['status']) - self.assertIsNone(task['input']) - self.assertIsNone(task['result']) - self.assertIsNone(task['message']) - self.assertFalse(task['deleted']) - self.assertIsNone(task['deleted_at']) - self.assertIsNone(task['expires_at']) - self.assertEqual(task_values['created_at'], task['created_at']) - self.assertGreater(task['updated_at'], task['created_at']) - - def test_task_delete(self): - task_values = build_task_fixture(owner=self.context.owner) - task = self.db_api.task_create(self.adm_context, task_values) - - self.assertIsNotNone(task) - self.assertFalse(task['deleted']) - self.assertIsNone(task['deleted_at']) - - task_id = task['id'] - self.db_api.task_delete(self.adm_context, task_id) - self.assertRaises(exception.TaskNotFound, self.db_api.task_get, - self.context, task_id) - - def test_task_delete_as_admin(self): - task_values = build_task_fixture(owner=self.context.owner) - task = self.db_api.task_create(self.adm_context, task_values) - - self.assertIsNotNone(task) - self.assertFalse(task['deleted']) - self.assertIsNone(task['deleted_at']) - - task_id = task['id'] - self.db_api.task_delete(self.adm_context, task_id) - del_task = self.db_api.task_get(self.adm_context, - task_id, - force_show_deleted=True) - self.assertIsNotNone(del_task) - self.assertEqual(task_id, del_task['id']) - self.assertTrue(del_task['deleted']) - self.assertIsNotNone(del_task['deleted_at']) - - -class DBPurgeTests(test_utils.BaseTestCase): - - def setUp(self): - super(DBPurgeTests, self).setUp() - self.adm_context = context.get_admin_context(show_deleted=True) - self.db_api = db_tests.get_db(self.config) - db_tests.reset_db(self.db_api) - self.image_fixtures, self.task_fixtures = self.build_fixtures() - self.create_tasks(self.task_fixtures) - self.create_images(self.image_fixtures) - - def build_fixtures(self): - dt1 = timeutils.utcnow() - datetime.timedelta(days=5) - dt2 = dt1 + datetime.timedelta(days=1) - dt3 = dt2 + datetime.timedelta(days=1) - fixtures = [ - { - 'created_at': dt1, - 'updated_at': dt1, - 'deleted_at': dt3, - 'deleted': True, - }, - { - 'created_at': dt1, - 'updated_at': dt2, - 'deleted_at': timeutils.utcnow(), - 'deleted': True, - }, - { - 'created_at': dt2, - 'updated_at': dt2, - 'deleted_at': None, - 'deleted': False, - }, - ] - return ( - [build_image_fixture(**fixture) for fixture in fixtures], - [build_task_fixture(**fixture) for fixture in fixtures], - ) - - def create_images(self, images): - for fixture in images: - self.db_api.image_create(self.adm_context, fixture) - - def create_tasks(self, tasks): - for fixture in tasks: - self.db_api.task_create(self.adm_context, fixture) - - def test_db_purge(self): - self.db_api.purge_deleted_rows(self.adm_context, 1, 5) - images = self.db_api.image_get_all(self.adm_context) - self.assertEqual(len(images), 2) - tasks = self.db_api.task_get_all(self.adm_context) - self.assertEqual(len(tasks), 2) - - def test_purge_fk_constraint_failure(self): - """Test foreign key constraint failure - - Test whether foreign key constraint failure during purge - operation is raising DBReferenceError or not. - """ - session = db_api.get_session() - engine = db_api.get_engine() - connection = engine.connect() - - dialect = engine.url.get_dialect() - if dialect == sqlite.dialect: - # We're seeing issues with foreign key support in SQLite 3.6.20 - # SQLAlchemy doesn't support it at all with SQLite < 3.6.19 - # It works fine in SQLite 3.7. - # So return early to skip this test if running SQLite < 3.7 - if test_utils.is_sqlite_version_prior_to(3, 7): - self.skipTest( - 'sqlite version too old for reliable SQLA foreign_keys') - # This is required for enforcing Foreign Key Constraint - # in SQLite 3.x - connection.execute("PRAGMA foreign_keys = ON") - - images = sqlalchemyutils.get_table( - engine, "images") - image_tags = sqlalchemyutils.get_table( - engine, "image_tags") - - # Add a 4th row in images table and set it deleted 15 days ago - uuidstr = uuid.uuid4().hex - created_time = timeutils.utcnow() - datetime.timedelta(days=20) - deleted_time = created_time + datetime.timedelta(days=5) - images_row_fixture = { - 'id': uuidstr, - 'status': 'status', - 'created_at': created_time, - 'deleted_at': deleted_time, - 'deleted': 1, - 'visibility': 'public', - 'min_disk': 1, - 'min_ram': 1, - 'protected': 0 - } - ins_stmt = images.insert().values(**images_row_fixture) - connection.execute(ins_stmt) - - # Add a record in image_tags referencing the above images record - # but do not set it as deleted - image_tags_row_fixture = { - 'image_id': uuidstr, - 'value': 'tag_value', - 'created_at': created_time, - 'deleted': 0 - } - ins_stmt = image_tags.insert().values(**image_tags_row_fixture) - connection.execute(ins_stmt) - - # Purge all records deleted at least 10 days ago - self.assertRaises(db_exception.DBReferenceError, - db_api.purge_deleted_rows, - self.adm_context, - age_in_days=10, - max_rows=50) - - # Verify that no records from images have been deleted - # due to DBReferenceError being raised - images_rows = session.query(images).count() - self.assertEqual(4, images_rows) - - -class TestVisibility(test_utils.BaseTestCase): - def setUp(self): - super(TestVisibility, self).setUp() - self.db_api = db_tests.get_db(self.config) - db_tests.reset_db(self.db_api) - self.setup_tenants() - self.setup_contexts() - self.fixtures = self.build_image_fixtures() - self.create_images(self.fixtures) - - def setup_tenants(self): - self.admin_tenant = str(uuid.uuid4()) - self.tenant1 = str(uuid.uuid4()) - self.tenant2 = str(uuid.uuid4()) - - def setup_contexts(self): - self.admin_context = context.RequestContext( - is_admin=True, tenant=self.admin_tenant) - self.admin_none_context = context.RequestContext( - is_admin=True, tenant=None) - self.tenant1_context = context.RequestContext(tenant=self.tenant1) - self.tenant2_context = context.RequestContext(tenant=self.tenant2) - self.none_context = context.RequestContext(tenant=None) - - def build_image_fixtures(self): - fixtures = [] - owners = { - 'Unowned': None, - 'Admin Tenant': self.admin_tenant, - 'Tenant 1': self.tenant1, - 'Tenant 2': self.tenant2, - } - visibilities = ['community', 'private', 'public', 'shared'] - for owner_label, owner in owners.items(): - for visibility in visibilities: - fixture = { - 'name': '%s, %s' % (owner_label, visibility), - 'owner': owner, - 'visibility': visibility, - } - fixtures.append(fixture) - return [build_image_fixture(**f) for f in fixtures] - - def create_images(self, images): - for fixture in images: - self.db_api.image_create(self.admin_context, fixture) - - -class VisibilityTests(object): - - def test_unknown_admin_sees_all_but_community(self): - images = self.db_api.image_get_all(self.admin_none_context) - self.assertEqual(12, len(images)) - - def test_unknown_admin_is_public_true(self): - images = self.db_api.image_get_all(self.admin_none_context, - is_public=True) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('public', i['visibility']) - - def test_unknown_admin_is_public_false(self): - images = self.db_api.image_get_all(self.admin_none_context, - is_public=False) - self.assertEqual(8, len(images)) - for i in images: - self.assertTrue(i['visibility'] in ['shared', 'private']) - - def test_unknown_admin_is_public_none(self): - images = self.db_api.image_get_all(self.admin_none_context) - self.assertEqual(12, len(images)) - - def test_unknown_admin_visibility_public(self): - images = self.db_api.image_get_all(self.admin_none_context, - filters={'visibility': 'public'}) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('public', i['visibility']) - - def test_unknown_admin_visibility_shared(self): - images = self.db_api.image_get_all(self.admin_none_context, - filters={'visibility': 'shared'}) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('shared', i['visibility']) - - def test_unknown_admin_visibility_private(self): - images = self.db_api.image_get_all(self.admin_none_context, - filters={'visibility': 'private'}) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('private', i['visibility']) - - def test_unknown_admin_visibility_community(self): - images = self.db_api.image_get_all(self.admin_none_context, - filters={'visibility': 'community'}) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('community', i['visibility']) - - def test_known_admin_sees_all_but_others_community_images(self): - images = self.db_api.image_get_all(self.admin_context) - self.assertEqual(13, len(images)) - - def test_known_admin_is_public_true(self): - images = self.db_api.image_get_all(self.admin_context, is_public=True) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('public', i['visibility']) - - def test_known_admin_is_public_false(self): - images = self.db_api.image_get_all(self.admin_context, - is_public=False) - self.assertEqual(9, len(images)) - for i in images: - self.assertTrue(i['visibility'] - in ['shared', 'private', 'community']) - - def test_known_admin_is_public_none(self): - images = self.db_api.image_get_all(self.admin_context) - self.assertEqual(13, len(images)) - - def test_admin_as_user_true(self): - images = self.db_api.image_get_all(self.admin_context, - admin_as_user=True) - self.assertEqual(7, len(images)) - for i in images: - self.assertTrue(('public' == i['visibility']) - or i['owner'] == self.admin_tenant) - - def test_known_admin_visibility_public(self): - images = self.db_api.image_get_all(self.admin_context, - filters={'visibility': 'public'}) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('public', i['visibility']) - - def test_known_admin_visibility_shared(self): - images = self.db_api.image_get_all(self.admin_context, - filters={'visibility': 'shared'}) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('shared', i['visibility']) - - def test_known_admin_visibility_private(self): - images = self.db_api.image_get_all(self.admin_context, - filters={'visibility': 'private'}) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('private', i['visibility']) - - def test_known_admin_visibility_community(self): - images = self.db_api.image_get_all(self.admin_context, - filters={'visibility': 'community'}) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('community', i['visibility']) - - def test_what_unknown_user_sees(self): - images = self.db_api.image_get_all(self.none_context) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('public', i['visibility']) - - def test_unknown_user_is_public_true(self): - images = self.db_api.image_get_all(self.none_context, is_public=True) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('public', i['visibility']) - - def test_unknown_user_is_public_false(self): - images = self.db_api.image_get_all(self.none_context, is_public=False) - self.assertEqual(0, len(images)) - - def test_unknown_user_is_public_none(self): - images = self.db_api.image_get_all(self.none_context) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('public', i['visibility']) - - def test_unknown_user_visibility_public(self): - images = self.db_api.image_get_all(self.none_context, - filters={'visibility': 'public'}) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('public', i['visibility']) - - def test_unknown_user_visibility_shared(self): - images = self.db_api.image_get_all(self.none_context, - filters={'visibility': 'shared'}) - self.assertEqual(0, len(images)) - - def test_unknown_user_visibility_private(self): - images = self.db_api.image_get_all(self.none_context, - filters={'visibility': 'private'}) - self.assertEqual(0, len(images)) - - def test_unknown_user_visibility_community(self): - images = self.db_api.image_get_all(self.none_context, - filters={'visibility': 'community'}) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('community', i['visibility']) - - def test_what_tenant1_sees(self): - images = self.db_api.image_get_all(self.tenant1_context) - self.assertEqual(7, len(images)) - for i in images: - if not ('public' == i['visibility']): - self.assertEqual(i['owner'], self.tenant1) - - def test_tenant1_is_public_true(self): - images = self.db_api.image_get_all(self.tenant1_context, - is_public=True) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('public', i['visibility']) - - def test_tenant1_is_public_false(self): - images = self.db_api.image_get_all(self.tenant1_context, - is_public=False) - self.assertEqual(3, len(images)) - for i in images: - self.assertEqual(i['owner'], self.tenant1) - self.assertTrue(i['visibility'] - in ['private', 'shared', 'community']) - - def test_tenant1_is_public_none(self): - images = self.db_api.image_get_all(self.tenant1_context) - self.assertEqual(7, len(images)) - for i in images: - if not ('public' == i['visibility']): - self.assertEqual(self.tenant1, i['owner']) - - def test_tenant1_visibility_public(self): - images = self.db_api.image_get_all(self.tenant1_context, - filters={'visibility': 'public'}) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('public', i['visibility']) - - def test_tenant1_visibility_shared(self): - images = self.db_api.image_get_all(self.tenant1_context, - filters={'visibility': 'shared'}) - self.assertEqual(1, len(images)) - self.assertEqual('shared', images[0]['visibility']) - self.assertEqual(self.tenant1, images[0]['owner']) - - def test_tenant1_visibility_private(self): - images = self.db_api.image_get_all(self.tenant1_context, - filters={'visibility': 'private'}) - self.assertEqual(1, len(images)) - self.assertEqual('private', images[0]['visibility']) - self.assertEqual(self.tenant1, images[0]['owner']) - - def test_tenant1_visibility_community(self): - images = self.db_api.image_get_all(self.tenant1_context, - filters={'visibility': 'community'}) - self.assertEqual(4, len(images)) - for i in images: - self.assertEqual('community', i['visibility']) - - def _setup_is_public_red_herring(self): - values = { - 'name': 'Red Herring', - 'owner': self.tenant1, - 'visibility': 'shared', - 'properties': {'is_public': 'silly'} - } - fixture = build_image_fixture(**values) - self.db_api.image_create(self.admin_context, fixture) - - def test_is_public_is_a_normal_filter_for_admin(self): - self._setup_is_public_red_herring() - images = self.db_api.image_get_all(self.admin_context, - filters={'is_public': 'silly'}) - self.assertEqual(1, len(images)) - self.assertEqual('Red Herring', images[0]['name']) - - def test_is_public_is_a_normal_filter_for_user(self): - self._setup_is_public_red_herring() - images = self.db_api.image_get_all(self.tenant1_context, - filters={'is_public': 'silly'}) - self.assertEqual(1, len(images)) - self.assertEqual('Red Herring', images[0]['name']) - - # NOTE(markwash): the following tests are sanity checks to make sure - # visibility filtering and is_public=(True|False) do not interact in - # unexpected ways. However, using both of the filtering techniques - # simultaneously is not an anticipated use case. - - def test_admin_is_public_true_and_visibility_public(self): - images = self.db_api.image_get_all(self.admin_context, is_public=True, - filters={'visibility': 'public'}) - self.assertEqual(4, len(images)) - - def test_admin_is_public_false_and_visibility_public(self): - images = self.db_api.image_get_all(self.admin_context, is_public=False, - filters={'visibility': 'public'}) - self.assertEqual(0, len(images)) - - def test_admin_is_public_true_and_visibility_shared(self): - images = self.db_api.image_get_all(self.admin_context, is_public=True, - filters={'visibility': 'shared'}) - self.assertEqual(0, len(images)) - - def test_admin_is_public_false_and_visibility_shared(self): - images = self.db_api.image_get_all(self.admin_context, is_public=False, - filters={'visibility': 'shared'}) - self.assertEqual(4, len(images)) - - def test_admin_is_public_true_and_visibility_private(self): - images = self.db_api.image_get_all(self.admin_context, is_public=True, - filters={'visibility': 'private'}) - self.assertEqual(0, len(images)) - - def test_admin_is_public_false_and_visibility_private(self): - images = self.db_api.image_get_all(self.admin_context, is_public=False, - filters={'visibility': 'private'}) - self.assertEqual(4, len(images)) - - def test_admin_is_public_true_and_visibility_community(self): - images = self.db_api.image_get_all(self.admin_context, is_public=True, - filters={'visibility': 'community'}) - self.assertEqual(0, len(images)) - - def test_admin_is_public_false_and_visibility_community(self): - images = self.db_api.image_get_all(self.admin_context, is_public=False, - filters={'visibility': 'community'}) - self.assertEqual(4, len(images)) - - def test_tenant1_is_public_true_and_visibility_public(self): - images = self.db_api.image_get_all(self.tenant1_context, - is_public=True, - filters={'visibility': 'public'}) - self.assertEqual(4, len(images)) - - def test_tenant1_is_public_false_and_visibility_public(self): - images = self.db_api.image_get_all(self.tenant1_context, - is_public=False, - filters={'visibility': 'public'}) - self.assertEqual(0, len(images)) - - def test_tenant1_is_public_true_and_visibility_shared(self): - images = self.db_api.image_get_all(self.tenant1_context, - is_public=True, - filters={'visibility': 'shared'}) - self.assertEqual(0, len(images)) - - def test_tenant1_is_public_false_and_visibility_shared(self): - images = self.db_api.image_get_all(self.tenant1_context, - is_public=False, - filters={'visibility': 'shared'}) - self.assertEqual(1, len(images)) - - def test_tenant1_is_public_true_and_visibility_private(self): - images = self.db_api.image_get_all(self.tenant1_context, - is_public=True, - filters={'visibility': 'private'}) - self.assertEqual(0, len(images)) - - def test_tenant1_is_public_false_and_visibility_private(self): - images = self.db_api.image_get_all(self.tenant1_context, - is_public=False, - filters={'visibility': 'private'}) - self.assertEqual(1, len(images)) - - def test_tenant1_is_public_true_and_visibility_community(self): - images = self.db_api.image_get_all(self.tenant1_context, - is_public=True, - filters={'visibility': 'community'}) - self.assertEqual(0, len(images)) - - def test_tenant1_is_public_false_and_visibility_community(self): - images = self.db_api.image_get_all(self.tenant1_context, - is_public=False, - filters={'visibility': 'community'}) - self.assertEqual(4, len(images)) - - -class TestMembershipVisibility(test_utils.BaseTestCase): - def setUp(self): - super(TestMembershipVisibility, self).setUp() - self.db_api = db_tests.get_db(self.config) - db_tests.reset_db(self.db_api) - self._create_contexts() - self._create_images() - - def _create_contexts(self): - self.owner1, self.owner1_ctx = self._user_fixture() - self.owner2, self.owner2_ctx = self._user_fixture() - self.tenant1, self.user1_ctx = self._user_fixture() - self.tenant2, self.user2_ctx = self._user_fixture() - self.tenant3, self.user3_ctx = self._user_fixture() - self.admin_tenant, self.admin_ctx = self._user_fixture(admin=True) - - def _user_fixture(self, admin=False): - tenant_id = str(uuid.uuid4()) - ctx = context.RequestContext(tenant=tenant_id, is_admin=admin) - return tenant_id, ctx - - def _create_images(self): - self.image_ids = {} - for owner in [self.owner1, self.owner2]: - self._create_image('not_shared', owner) - self._create_image('shared-with-1', owner, members=[self.tenant1]) - self._create_image('shared-with-2', owner, members=[self.tenant2]) - self._create_image('shared-with-both', owner, - members=[self.tenant1, self.tenant2]) - - def _create_image(self, name, owner, members=None): - image = build_image_fixture(name=name, owner=owner, - visibility='shared') - self.image_ids[(owner, name)] = image['id'] - self.db_api.image_create(self.admin_ctx, image) - for member in members or []: - member = {'image_id': image['id'], 'member': member} - self.db_api.image_member_create(self.admin_ctx, member) - - -class MembershipVisibilityTests(object): - def _check_by_member(self, ctx, member_id, expected): - members = self.db_api.image_member_find(ctx, member=member_id) - images = [self.db_api.image_get(self.admin_ctx, member['image_id']) - for member in members] - facets = [(image['owner'], image['name']) for image in images] - self.assertEqual(set(expected), set(facets)) - - def test_owner1_finding_user1_memberships(self): - """Owner1 should see images it owns that are shared with User1.""" - expected = [ - (self.owner1, 'shared-with-1'), - (self.owner1, 'shared-with-both'), - ] - self._check_by_member(self.owner1_ctx, self.tenant1, expected) - - def test_user1_finding_user1_memberships(self): - """User1 should see all images shared with User1 """ - expected = [ - (self.owner1, 'shared-with-1'), - (self.owner1, 'shared-with-both'), - (self.owner2, 'shared-with-1'), - (self.owner2, 'shared-with-both'), - ] - self._check_by_member(self.user1_ctx, self.tenant1, expected) - - def test_user2_finding_user1_memberships(self): - """User2 should see no images shared with User1 """ - expected = [] - self._check_by_member(self.user2_ctx, self.tenant1, expected) - - def test_admin_finding_user1_memberships(self): - """Admin should see all images shared with User1 """ - expected = [ - (self.owner1, 'shared-with-1'), - (self.owner1, 'shared-with-both'), - (self.owner2, 'shared-with-1'), - (self.owner2, 'shared-with-both'), - ] - self._check_by_member(self.admin_ctx, self.tenant1, expected) - - def _check_by_image(self, context, image_id, expected): - members = self.db_api.image_member_find(context, image_id=image_id) - member_ids = [member['member'] for member in members] - self.assertEqual(set(expected), set(member_ids)) - - def test_owner1_finding_owner1s_image_members(self): - """Owner1 should see all memberships of its image """ - expected = [self.tenant1, self.tenant2] - image_id = self.image_ids[(self.owner1, 'shared-with-both')] - self._check_by_image(self.owner1_ctx, image_id, expected) - - def test_admin_finding_owner1s_image_members(self): - """Admin should see all memberships of owner1's image """ - expected = [self.tenant1, self.tenant2] - image_id = self.image_ids[(self.owner1, 'shared-with-both')] - self._check_by_image(self.admin_ctx, image_id, expected) - - def test_user1_finding_owner1s_image_members(self): - """User1 should see its own membership of owner1's image """ - expected = [self.tenant1] - image_id = self.image_ids[(self.owner1, 'shared-with-both')] - self._check_by_image(self.user1_ctx, image_id, expected) - - def test_user2_finding_owner1s_image_members(self): - """User2 should see its own membership of owner1's image """ - expected = [self.tenant2] - image_id = self.image_ids[(self.owner1, 'shared-with-both')] - self._check_by_image(self.user2_ctx, image_id, expected) - - def test_user3_finding_owner1s_image_members(self): - """User3 should see no memberships of owner1's image """ - expected = [] - image_id = self.image_ids[(self.owner1, 'shared-with-both')] - self._check_by_image(self.user3_ctx, image_id, expected) diff --git a/glance/tests/functional/db/base_metadef.py b/glance/tests/functional/db/base_metadef.py deleted file mode 100644 index a2af0980..00000000 --- a/glance/tests/functional/db/base_metadef.py +++ /dev/null @@ -1,707 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from glance.common import config -from glance.common import exception -from glance import context -import glance.tests.functional.db as db_tests -from glance.tests import utils as test_utils - - -def build_namespace_fixture(**kwargs): - namespace = { - 'namespace': u'MyTestNamespace', - 'display_name': u'test-display-name', - 'description': u'test-description', - 'visibility': u'public', - 'protected': 0, - 'owner': u'test-owner' - } - namespace.update(kwargs) - return namespace - - -def build_resource_type_fixture(**kwargs): - resource_type = { - 'name': u'MyTestResourceType', - 'protected': 0 - } - resource_type.update(kwargs) - return resource_type - - -def build_association_fixture(**kwargs): - association = { - 'name': u'MyTestResourceType', - 'properties_target': 'test-properties-target', - 'prefix': 'test-prefix' - } - association.update(kwargs) - return association - - -def build_object_fixture(**kwargs): - # Full testing of required and schema done via rest api tests - object = { - 'namespace_id': 1, - 'name': u'test-object-name', - 'description': u'test-object-description', - 'required': u'fake-required-properties-list', - 'json_schema': u'{fake-schema}' - } - object.update(kwargs) - return object - - -def build_property_fixture(**kwargs): - # Full testing of required and schema done via rest api tests - property = { - 'namespace_id': 1, - 'name': u'test-property-name', - 'json_schema': u'{fake-schema}' - } - property.update(kwargs) - return property - - -def build_tag_fixture(**kwargs): - # Full testing of required and schema done via rest api tests - tag = { - 'namespace_id': 1, - 'name': u'test-tag-name', - } - tag.update(kwargs) - return tag - - -def build_tags_fixture(tag_name_list): - tag_list = [] - for tag_name in tag_name_list: - tag_list.append({'name': tag_name}) - return tag_list - - -class TestMetadefDriver(test_utils.BaseTestCase): - - """Test Driver class for Metadef tests.""" - - def setUp(self): - """Run before each test method to initialize test environment.""" - super(TestMetadefDriver, self).setUp() - config.parse_args(args=[]) - context_cls = context.RequestContext - self.adm_context = context_cls(is_admin=True, - auth_token='user:user:admin') - self.context = context_cls(is_admin=False, - auth_token='user:user:user') - self.db_api = db_tests.get_db(self.config) - db_tests.reset_db(self.db_api) - - def _assert_saved_fields(self, expected, actual): - for k in expected.keys(): - self.assertEqual(expected[k], actual[k]) - - -class MetadefNamespaceTests(object): - - def test_namespace_create(self): - fixture = build_namespace_fixture() - created = self.db_api.metadef_namespace_create(self.context, fixture) - self.assertIsNotNone(created) - self._assert_saved_fields(fixture, created) - - def test_namespace_create_duplicate(self): - fixture = build_namespace_fixture() - created = self.db_api.metadef_namespace_create(self.context, fixture) - self.assertIsNotNone(created) - self._assert_saved_fields(fixture, created) - self.assertRaises(exception.Duplicate, - self.db_api.metadef_namespace_create, - self.context, fixture) - - def test_namespace_get(self): - fixture = build_namespace_fixture() - created = self.db_api.metadef_namespace_create(self.context, fixture) - self.assertIsNotNone(created) - self._assert_saved_fields(fixture, created) - - found = self.db_api.metadef_namespace_get( - self.context, created['namespace']) - self.assertIsNotNone(found, "Namespace not found.") - - def test_namespace_get_all_with_resource_types_filter(self): - ns_fixture = build_namespace_fixture() - ns_created = self.db_api.metadef_namespace_create( - self.context, ns_fixture) - self.assertIsNotNone(ns_created, "Could not create a namespace.") - self._assert_saved_fields(ns_fixture, ns_created) - - fixture = build_association_fixture() - created = self.db_api.metadef_resource_type_association_create( - self.context, ns_created['namespace'], fixture) - self.assertIsNotNone(created, "Could not create an association.") - - rt_filters = {'resource_types': fixture['name']} - found = self.db_api.metadef_namespace_get_all( - self.context, filters=rt_filters, sort_key='created_at') - self.assertEqual(1, len(found)) - for item in found: - self._assert_saved_fields(ns_fixture, item) - - def test_namespace_update(self): - delta = {'owner': u'New Owner'} - fixture = build_namespace_fixture() - - created = self.db_api.metadef_namespace_create(self.context, fixture) - self.assertIsNotNone(created['namespace']) - self.assertEqual(fixture['namespace'], created['namespace']) - delta_dict = copy.deepcopy(created) - delta_dict.update(delta.copy()) - - updated = self.db_api.metadef_namespace_update( - self.context, created['id'], delta_dict) - self.assertEqual(delta['owner'], updated['owner']) - - def test_namespace_delete(self): - fixture = build_namespace_fixture() - created = self.db_api.metadef_namespace_create(self.context, fixture) - self.assertIsNotNone(created, "Could not create a Namespace.") - self.db_api.metadef_namespace_delete( - self.context, created['namespace']) - self.assertRaises(exception.NotFound, - self.db_api.metadef_namespace_get, - self.context, created['namespace']) - - def test_namespace_delete_with_content(self): - fixture_ns = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create( - self.context, fixture_ns) - self._assert_saved_fields(fixture_ns, created_ns) - - # Create object content for the namespace - fixture_obj = build_object_fixture() - created_obj = self.db_api.metadef_object_create( - self.context, created_ns['namespace'], fixture_obj) - self.assertIsNotNone(created_obj) - - # Create property content for the namespace - fixture_prop = build_property_fixture(namespace_id=created_ns['id']) - created_prop = self.db_api.metadef_property_create( - self.context, created_ns['namespace'], fixture_prop) - self.assertIsNotNone(created_prop) - - # Create associations - fixture_assn = build_association_fixture() - created_assn = self.db_api.metadef_resource_type_association_create( - self.context, created_ns['namespace'], fixture_assn) - self.assertIsNotNone(created_assn) - - deleted_ns = self.db_api.metadef_namespace_delete( - self.context, created_ns['namespace']) - - self.assertRaises(exception.NotFound, - self.db_api.metadef_namespace_get, - self.context, deleted_ns['namespace']) - - -class MetadefPropertyTests(object): - - def test_property_create(self): - fixture = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create( - self.context, fixture) - self.assertIsNotNone(created_ns) - self._assert_saved_fields(fixture, created_ns) - - fixture_prop = build_property_fixture(namespace_id=created_ns['id']) - created_prop = self.db_api.metadef_property_create( - self.context, created_ns['namespace'], fixture_prop) - self._assert_saved_fields(fixture_prop, created_prop) - - def test_property_create_duplicate(self): - fixture = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create( - self.context, fixture) - self.assertIsNotNone(created_ns) - self._assert_saved_fields(fixture, created_ns) - - fixture_prop = build_property_fixture(namespace_id=created_ns['id']) - created_prop = self.db_api.metadef_property_create( - self.context, created_ns['namespace'], fixture_prop) - self._assert_saved_fields(fixture_prop, created_prop) - - self.assertRaises(exception.Duplicate, - self.db_api.metadef_property_create, - self.context, created_ns['namespace'], fixture_prop) - - def test_property_get(self): - fixture_ns = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create( - self.context, fixture_ns) - self.assertIsNotNone(created_ns) - self._assert_saved_fields(fixture_ns, created_ns) - - fixture_prop = build_property_fixture(namespace_id=created_ns['id']) - created_prop = self.db_api.metadef_property_create( - self.context, created_ns['namespace'], fixture_prop) - - found_prop = self.db_api.metadef_property_get( - self.context, created_ns['namespace'], created_prop['name']) - self._assert_saved_fields(fixture_prop, found_prop) - - def test_property_get_all(self): - ns_fixture = build_namespace_fixture() - ns_created = self.db_api.metadef_namespace_create( - self.context, ns_fixture) - self.assertIsNotNone(ns_created, "Could not create a namespace.") - self._assert_saved_fields(ns_fixture, ns_created) - - fixture1 = build_property_fixture(namespace_id=ns_created['id']) - created_p1 = self.db_api.metadef_property_create( - self.context, ns_created['namespace'], fixture1) - self.assertIsNotNone(created_p1, "Could not create a property.") - - fixture2 = build_property_fixture(namespace_id=ns_created['id'], - name='test-prop-2') - created_p2 = self.db_api.metadef_property_create( - self.context, ns_created['namespace'], fixture2) - self.assertIsNotNone(created_p2, "Could not create a property.") - - found = self.db_api.metadef_property_get_all( - self.context, ns_created['namespace']) - self.assertEqual(2, len(found)) - - def test_property_update(self): - delta = {'name': u'New-name', 'json_schema': u'new-schema'} - - fixture_ns = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create( - self.context, fixture_ns) - self.assertIsNotNone(created_ns['namespace']) - - prop_fixture = build_property_fixture(namespace_id=created_ns['id']) - created_prop = self.db_api.metadef_property_create( - self.context, created_ns['namespace'], prop_fixture) - self.assertIsNotNone(created_prop, "Could not create a property.") - - delta_dict = copy.deepcopy(created_prop) - delta_dict.update(delta.copy()) - - updated = self.db_api.metadef_property_update( - self.context, created_ns['namespace'], - created_prop['id'], delta_dict) - self.assertEqual(delta['name'], updated['name']) - self.assertEqual(delta['json_schema'], updated['json_schema']) - - def test_property_delete(self): - fixture_ns = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create( - self.context, fixture_ns) - self.assertIsNotNone(created_ns['namespace']) - - prop_fixture = build_property_fixture(namespace_id=created_ns['id']) - created_prop = self.db_api.metadef_property_create( - self.context, created_ns['namespace'], prop_fixture) - self.assertIsNotNone(created_prop, "Could not create a property.") - - self.db_api.metadef_property_delete( - self.context, created_ns['namespace'], created_prop['name']) - self.assertRaises(exception.NotFound, - self.db_api.metadef_property_get, - self.context, created_ns['namespace'], - created_prop['name']) - - def test_property_delete_namespace_content(self): - fixture_ns = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create( - self.context, fixture_ns) - self.assertIsNotNone(created_ns['namespace']) - - prop_fixture = build_property_fixture(namespace_id=created_ns['id']) - created_prop = self.db_api.metadef_property_create( - self.context, created_ns['namespace'], prop_fixture) - self.assertIsNotNone(created_prop, "Could not create a property.") - - self.db_api.metadef_property_delete_namespace_content( - self.context, created_ns['namespace']) - self.assertRaises(exception.NotFound, - self.db_api.metadef_property_get, - self.context, created_ns['namespace'], - created_prop['name']) - - -class MetadefObjectTests(object): - - def test_object_create(self): - fixture = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create(self.context, - fixture) - self.assertIsNotNone(created_ns) - self._assert_saved_fields(fixture, created_ns) - - fixture_object = build_object_fixture(namespace_id=created_ns['id']) - created_object = self.db_api.metadef_object_create( - self.context, created_ns['namespace'], fixture_object) - self._assert_saved_fields(fixture_object, created_object) - - def test_object_create_duplicate(self): - fixture = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create(self.context, - fixture) - self.assertIsNotNone(created_ns) - self._assert_saved_fields(fixture, created_ns) - - fixture_object = build_object_fixture(namespace_id=created_ns['id']) - created_object = self.db_api.metadef_object_create( - self.context, created_ns['namespace'], fixture_object) - self._assert_saved_fields(fixture_object, created_object) - - self.assertRaises(exception.Duplicate, - self.db_api.metadef_object_create, - self.context, created_ns['namespace'], - fixture_object) - - def test_object_get(self): - fixture_ns = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create(self.context, - fixture_ns) - self.assertIsNotNone(created_ns) - self._assert_saved_fields(fixture_ns, created_ns) - - fixture_object = build_object_fixture(namespace_id=created_ns['id']) - created_object = self.db_api.metadef_object_create( - self.context, created_ns['namespace'], fixture_object) - - found_object = self.db_api.metadef_object_get( - self.context, created_ns['namespace'], created_object['name']) - self._assert_saved_fields(fixture_object, found_object) - - def test_object_get_all(self): - ns_fixture = build_namespace_fixture() - ns_created = self.db_api.metadef_namespace_create(self.context, - ns_fixture) - self.assertIsNotNone(ns_created, "Could not create a namespace.") - self._assert_saved_fields(ns_fixture, ns_created) - - fixture1 = build_object_fixture(namespace_id=ns_created['id']) - created_o1 = self.db_api.metadef_object_create( - self.context, ns_created['namespace'], fixture1) - self.assertIsNotNone(created_o1, "Could not create an object.") - - fixture2 = build_object_fixture(namespace_id=ns_created['id'], - name='test-object-2') - created_o2 = self.db_api.metadef_object_create( - self.context, ns_created['namespace'], fixture2) - self.assertIsNotNone(created_o2, "Could not create an object.") - - found = self.db_api.metadef_object_get_all( - self.context, ns_created['namespace']) - self.assertEqual(2, len(found)) - - def test_object_update(self): - delta = {'name': u'New-name', 'json_schema': u'new-schema', - 'required': u'new-required'} - - fixture_ns = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create(self.context, - fixture_ns) - self.assertIsNotNone(created_ns['namespace']) - - object_fixture = build_object_fixture(namespace_id=created_ns['id']) - created_object = self.db_api.metadef_object_create( - self.context, created_ns['namespace'], object_fixture) - self.assertIsNotNone(created_object, "Could not create an object.") - - delta_dict = {} - delta_dict.update(delta.copy()) - - updated = self.db_api.metadef_object_update( - self.context, created_ns['namespace'], - created_object['id'], delta_dict) - self.assertEqual(delta['name'], updated['name']) - self.assertEqual(delta['json_schema'], updated['json_schema']) - - def test_object_delete(self): - fixture_ns = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create( - self.context, fixture_ns) - self.assertIsNotNone(created_ns['namespace']) - - object_fixture = build_object_fixture(namespace_id=created_ns['id']) - created_object = self.db_api.metadef_object_create( - self.context, created_ns['namespace'], object_fixture) - self.assertIsNotNone(created_object, "Could not create an object.") - - self.db_api.metadef_object_delete( - self.context, created_ns['namespace'], created_object['name']) - self.assertRaises(exception.NotFound, - self.db_api.metadef_object_get, - self.context, created_ns['namespace'], - created_object['name']) - - -class MetadefResourceTypeTests(object): - - def test_resource_type_get_all(self): - resource_types_orig = self.db_api.metadef_resource_type_get_all( - self.context) - - fixture = build_resource_type_fixture() - self.db_api.metadef_resource_type_create(self.context, fixture) - - resource_types = self.db_api.metadef_resource_type_get_all( - self.context) - - test_len = len(resource_types_orig) + 1 - self.assertEqual(test_len, len(resource_types)) - - -class MetadefResourceTypeAssociationTests(object): - - def test_association_create(self): - ns_fixture = build_namespace_fixture() - ns_created = self.db_api.metadef_namespace_create( - self.context, ns_fixture) - self.assertIsNotNone(ns_created) - self._assert_saved_fields(ns_fixture, ns_created) - - assn_fixture = build_association_fixture() - assn_created = self.db_api.metadef_resource_type_association_create( - self.context, ns_created['namespace'], assn_fixture) - self.assertIsNotNone(assn_created) - self._assert_saved_fields(assn_fixture, assn_created) - - def test_association_create_duplicate(self): - ns_fixture = build_namespace_fixture() - ns_created = self.db_api.metadef_namespace_create( - self.context, ns_fixture) - self.assertIsNotNone(ns_created) - self._assert_saved_fields(ns_fixture, ns_created) - - assn_fixture = build_association_fixture() - assn_created = self.db_api.metadef_resource_type_association_create( - self.context, ns_created['namespace'], assn_fixture) - self.assertIsNotNone(assn_created) - self._assert_saved_fields(assn_fixture, assn_created) - - self.assertRaises(exception.Duplicate, - self.db_api. - metadef_resource_type_association_create, - self.context, ns_created['namespace'], assn_fixture) - - def test_association_delete(self): - ns_fixture = build_namespace_fixture() - ns_created = self.db_api.metadef_namespace_create( - self.context, ns_fixture) - self.assertIsNotNone(ns_created, "Could not create a namespace.") - self._assert_saved_fields(ns_fixture, ns_created) - - fixture = build_association_fixture() - created = self.db_api.metadef_resource_type_association_create( - self.context, ns_created['namespace'], fixture) - self.assertIsNotNone(created, "Could not create an association.") - - created_resource = self.db_api.metadef_resource_type_get( - self.context, fixture['name']) - self.assertIsNotNone(created_resource, "resource_type not created") - - self.db_api.metadef_resource_type_association_delete( - self.context, ns_created['namespace'], created_resource['name']) - self.assertRaises(exception.NotFound, - self.db_api.metadef_resource_type_association_get, - self.context, ns_created['namespace'], - created_resource['name']) - - def test_association_get_all_by_namespace(self): - ns_fixture = build_namespace_fixture() - ns_created = self.db_api.metadef_namespace_create( - self.context, ns_fixture) - self.assertIsNotNone(ns_created, "Could not create a namespace.") - self._assert_saved_fields(ns_fixture, ns_created) - - fixture = build_association_fixture() - created = self.db_api.metadef_resource_type_association_create( - self.context, ns_created['namespace'], fixture) - self.assertIsNotNone(created, "Could not create an association.") - - found = ( - self.db_api.metadef_resource_type_association_get_all_by_namespace( - self.context, ns_created['namespace'])) - self.assertEqual(1, len(found)) - for item in found: - self._assert_saved_fields(fixture, item) - - -class MetadefTagTests(object): - - def test_tag_create(self): - fixture = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create(self.context, - fixture) - self.assertIsNotNone(created_ns) - self._assert_saved_fields(fixture, created_ns) - - fixture_tag = build_tag_fixture(namespace_id=created_ns['id']) - created_tag = self.db_api.metadef_tag_create( - self.context, created_ns['namespace'], fixture_tag) - self._assert_saved_fields(fixture_tag, created_tag) - - def test_tag_create_duplicate(self): - fixture = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create(self.context, - fixture) - self.assertIsNotNone(created_ns) - self._assert_saved_fields(fixture, created_ns) - - fixture_tag = build_tag_fixture(namespace_id=created_ns['id']) - created_tag = self.db_api.metadef_tag_create( - self.context, created_ns['namespace'], fixture_tag) - self._assert_saved_fields(fixture_tag, created_tag) - - self.assertRaises(exception.Duplicate, - self.db_api.metadef_tag_create, - self.context, created_ns['namespace'], - fixture_tag) - - def test_tag_create_tags(self): - fixture = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create(self.context, - fixture) - self.assertIsNotNone(created_ns) - self._assert_saved_fields(fixture, created_ns) - - tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3']) - created_tags = self.db_api.metadef_tag_create_tags( - self.context, created_ns['namespace'], tags) - actual = set([tag['name'] for tag in created_tags]) - expected = set(['Tag1', 'Tag2', 'Tag3']) - self.assertEqual(expected, actual) - - def test_tag_create_duplicate_tags_1(self): - fixture = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create(self.context, - fixture) - self.assertIsNotNone(created_ns) - self._assert_saved_fields(fixture, created_ns) - - tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3', 'Tag2']) - self.assertRaises(exception.Duplicate, - self.db_api.metadef_tag_create_tags, - self.context, created_ns['namespace'], - tags) - - def test_tag_create_duplicate_tags_2(self): - fixture = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create(self.context, - fixture) - self.assertIsNotNone(created_ns) - self._assert_saved_fields(fixture, created_ns) - - tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3']) - self.db_api.metadef_tag_create_tags(self.context, - created_ns['namespace'], tags) - dup_tag = build_tag_fixture(namespace_id=created_ns['id'], - name='Tag3') - self.assertRaises(exception.Duplicate, - self.db_api.metadef_tag_create, - self.context, created_ns['namespace'], dup_tag) - - def test_tag_get(self): - fixture_ns = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create(self.context, - fixture_ns) - self.assertIsNotNone(created_ns) - self._assert_saved_fields(fixture_ns, created_ns) - - fixture_tag = build_tag_fixture(namespace_id=created_ns['id']) - created_tag = self.db_api.metadef_tag_create( - self.context, created_ns['namespace'], fixture_tag) - - found_tag = self.db_api.metadef_tag_get( - self.context, created_ns['namespace'], created_tag['name']) - self._assert_saved_fields(fixture_tag, found_tag) - - def test_tag_get_all(self): - ns_fixture = build_namespace_fixture() - ns_created = self.db_api.metadef_namespace_create(self.context, - ns_fixture) - self.assertIsNotNone(ns_created, "Could not create a namespace.") - self._assert_saved_fields(ns_fixture, ns_created) - - fixture1 = build_tag_fixture(namespace_id=ns_created['id']) - created_tag1 = self.db_api.metadef_tag_create( - self.context, ns_created['namespace'], fixture1) - self.assertIsNotNone(created_tag1, "Could not create tag 1.") - - fixture2 = build_tag_fixture(namespace_id=ns_created['id'], - name='test-tag-2') - created_tag2 = self.db_api.metadef_tag_create( - self.context, ns_created['namespace'], fixture2) - self.assertIsNotNone(created_tag2, "Could not create tag 2.") - - found = self.db_api.metadef_tag_get_all( - self.context, ns_created['namespace'], sort_key='created_at') - self.assertEqual(2, len(found)) - - def test_tag_update(self): - delta = {'name': u'New-name'} - - fixture_ns = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create(self.context, - fixture_ns) - self.assertIsNotNone(created_ns['namespace']) - - tag_fixture = build_tag_fixture(namespace_id=created_ns['id']) - created_tag = self.db_api.metadef_tag_create( - self.context, created_ns['namespace'], tag_fixture) - self.assertIsNotNone(created_tag, "Could not create a tag.") - - delta_dict = {} - delta_dict.update(delta.copy()) - - updated = self.db_api.metadef_tag_update( - self.context, created_ns['namespace'], - created_tag['id'], delta_dict) - self.assertEqual(delta['name'], updated['name']) - - def test_tag_delete(self): - fixture_ns = build_namespace_fixture() - created_ns = self.db_api.metadef_namespace_create( - self.context, fixture_ns) - self.assertIsNotNone(created_ns['namespace']) - - tag_fixture = build_tag_fixture(namespace_id=created_ns['id']) - created_tag = self.db_api.metadef_tag_create( - self.context, created_ns['namespace'], tag_fixture) - self.assertIsNotNone(created_tag, "Could not create a tag.") - - self.db_api.metadef_tag_delete( - self.context, created_ns['namespace'], created_tag['name']) - - self.assertRaises(exception.NotFound, - self.db_api.metadef_tag_get, - self.context, created_ns['namespace'], - created_tag['name']) - - -class MetadefDriverTests(MetadefNamespaceTests, - MetadefResourceTypeTests, - MetadefResourceTypeAssociationTests, - MetadefPropertyTests, - MetadefObjectTests, - MetadefTagTests): - # collection class - pass diff --git a/glance/tests/functional/db/migrations/__init__.py b/glance/tests/functional/db/migrations/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/functional/db/migrations/test_mitaka01.py b/glance/tests/functional/db/migrations/test_mitaka01.py deleted file mode 100644 index c2223139..00000000 --- a/glance/tests/functional/db/migrations/test_mitaka01.py +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_db.sqlalchemy import test_base -import sqlalchemy - -from glance.tests.functional.db import test_migrations - - -def get_indexes(table, engine): - inspector = sqlalchemy.inspect(engine) - return [idx['name'] for idx in inspector.get_indexes(table)] - - -class TestMitaka01Mixin(test_migrations.AlembicMigrationsMixin): - - def _pre_upgrade_mitaka01(self, engine): - indexes = get_indexes('images', engine) - self.assertNotIn('created_at_image_idx', indexes) - self.assertNotIn('updated_at_image_idx', indexes) - - def _check_mitaka01(self, engine, data): - indexes = get_indexes('images', engine) - self.assertIn('created_at_image_idx', indexes) - self.assertIn('updated_at_image_idx', indexes) - - -class TestMitaka01MySQL(TestMitaka01Mixin, - test_base.MySQLOpportunisticTestCase): - pass - - -class TestMitaka01PostgresSQL(TestMitaka01Mixin, - test_base.PostgreSQLOpportunisticTestCase): - pass - - -class TestMitaka01Sqlite(TestMitaka01Mixin, test_base.DbTestCase): - pass diff --git a/glance/tests/functional/db/migrations/test_mitaka02.py b/glance/tests/functional/db/migrations/test_mitaka02.py deleted file mode 100644 index 48eb858d..00000000 --- a/glance/tests/functional/db/migrations/test_mitaka02.py +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import utils as db_utils - -from glance.tests.functional.db import test_migrations - - -class TestMitaka02Mixin(test_migrations.AlembicMigrationsMixin): - - def _pre_upgrade_mitaka02(self, engine): - metadef_resource_types = db_utils.get_table(engine, - 'metadef_resource_types') - now = datetime.datetime.now() - db_rec1 = dict(id='9580', - name='OS::Nova::Instance', - protected=False, - created_at=now, - updated_at=now,) - db_rec2 = dict(id='9581', - name='OS::Nova::Blah', - protected=False, - created_at=now, - updated_at=now,) - db_values = (db_rec1, db_rec2) - metadef_resource_types.insert().values(db_values).execute() - - def _check_mitaka02(self, engine, data): - metadef_resource_types = db_utils.get_table(engine, - 'metadef_resource_types') - result = (metadef_resource_types.select() - .where(metadef_resource_types.c.name == 'OS::Nova::Instance') - .execute().fetchall()) - self.assertEqual(0, len(result)) - - result = (metadef_resource_types.select() - .where(metadef_resource_types.c.name == 'OS::Nova::Server') - .execute().fetchall()) - self.assertEqual(1, len(result)) - - -class TestMitaka02MySQL(TestMitaka02Mixin, - test_base.MySQLOpportunisticTestCase): - pass - - -class TestMitaka02PostgresSQL(TestMitaka02Mixin, - test_base.PostgreSQLOpportunisticTestCase): - pass - - -class TestMitaka02Sqlite(TestMitaka02Mixin, test_base.DbTestCase): - pass diff --git a/glance/tests/functional/db/migrations/test_ocata01.py b/glance/tests/functional/db/migrations/test_ocata01.py deleted file mode 100644 index 323fee31..00000000 --- a/glance/tests/functional/db/migrations/test_ocata01.py +++ /dev/null @@ -1,142 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import utils as db_utils - -from glance.tests.functional.db import test_migrations - - -class TestOcata01Mixin(test_migrations.AlembicMigrationsMixin): - - def _pre_upgrade_ocata01(self, engine): - images = db_utils.get_table(engine, 'images') - now = datetime.datetime.now() - image_members = db_utils.get_table(engine, 'image_members') - - # inserting a public image record - public_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=True, - min_disk=0, - min_ram=0, - id='public_id') - images.insert().values(public_temp).execute() - - # inserting a non-public image record for 'shared' visibility test - shared_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=False, - min_disk=0, - min_ram=0, - id='shared_id') - images.insert().values(shared_temp).execute() - - # inserting a non-public image records for 'private' visibility test - private_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=False, - min_disk=0, - min_ram=0, - id='private_id_1') - images.insert().values(private_temp).execute() - private_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=False, - min_disk=0, - min_ram=0, - id='private_id_2') - images.insert().values(private_temp).execute() - - # adding an active as well as a deleted image member for checking - # 'shared' visibility - temp = dict(deleted=False, - created_at=now, - image_id='shared_id', - member='fake_member_452', - can_share=True, - id=45) - image_members.insert().values(temp).execute() - - temp = dict(deleted=True, - created_at=now, - image_id='shared_id', - member='fake_member_453', - can_share=True, - id=453) - image_members.insert().values(temp).execute() - - # adding an image member, but marking it deleted, - # for testing 'private' visibility - temp = dict(deleted=True, - created_at=now, - image_id='private_id_2', - member='fake_member_451', - can_share=True, - id=451) - image_members.insert().values(temp).execute() - - # adding an active image member for the 'public' image, - # to test it remains public regardless. - temp = dict(deleted=False, - created_at=now, - image_id='public_id', - member='fake_member_450', - can_share=True, - id=450) - image_members.insert().values(temp).execute() - - def _check_ocata01(self, engine, data): - # check that after migration, 'visibility' column is introduced - images = db_utils.get_table(engine, 'images') - self.assertIn('visibility', images.c) - self.assertNotIn('is_public', images.c) - - # tests to identify the visibilities of images created above - rows = images.select().where( - images.c.id == 'public_id').execute().fetchall() - self.assertEqual(1, len(rows)) - self.assertEqual('public', rows[0][16]) - - rows = images.select().where( - images.c.id == 'shared_id').execute().fetchall() - self.assertEqual(1, len(rows)) - self.assertEqual('shared', rows[0][16]) - - rows = images.select().where( - images.c.id == 'private_id_1').execute().fetchall() - self.assertEqual(1, len(rows)) - self.assertEqual('private', rows[0][16]) - - rows = images.select().where( - images.c.id == 'private_id_2').execute().fetchall() - self.assertEqual(1, len(rows)) - self.assertEqual('private', rows[0][16]) - - -class TestOcata01MySQL(TestOcata01Mixin, test_base.MySQLOpportunisticTestCase): - pass - - -class TestOcata01PostgresSQL(TestOcata01Mixin, - test_base.PostgreSQLOpportunisticTestCase): - pass - - -class TestOcata01Sqlite(TestOcata01Mixin, test_base.DbTestCase): - pass diff --git a/glance/tests/functional/db/migrations/test_ocata_contract01.py b/glance/tests/functional/db/migrations/test_ocata_contract01.py deleted file mode 100644 index b2049f63..00000000 --- a/glance/tests/functional/db/migrations/test_ocata_contract01.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import utils as db_utils - -from glance.tests.functional.db import test_migrations - - -class TestOcataContract01Mixin(test_migrations.AlembicMigrationsMixin): - - def _get_revisions(self, config): - return test_migrations.AlembicMigrationsMixin._get_revisions( - self, config, head='ocata_contract01') - - def _pre_upgrade_ocata_contract01(self, engine): - images = db_utils.get_table(engine, 'images') - now = datetime.datetime.now() - self.assertIn('is_public', images.c) - self.assertIn('visibility', images.c) - self.assertTrue(images.c.is_public.nullable) - self.assertTrue(images.c.visibility.nullable) - - # inserting a public image record - public_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=True, - min_disk=0, - min_ram=0, - id='public_id_before_expand') - images.insert().values(public_temp).execute() - - # inserting a private image record - shared_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=False, - min_disk=0, - min_ram=0, - id='private_id_before_expand') - images.insert().values(shared_temp).execute() - - def _check_ocata_contract01(self, engine, data): - # check that after contract 'is_public' column is dropped - images = db_utils.get_table(engine, 'images') - self.assertNotIn('is_public', images.c) - self.assertIn('visibility', images.c) - - -class TestOcataContract01MySQL(TestOcataContract01Mixin, - test_base.MySQLOpportunisticTestCase): - pass diff --git a/glance/tests/functional/db/migrations/test_ocata_expand01.py b/glance/tests/functional/db/migrations/test_ocata_expand01.py deleted file mode 100644 index ef684987..00000000 --- a/glance/tests/functional/db/migrations/test_ocata_expand01.py +++ /dev/null @@ -1,174 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import utils as db_utils - -from glance.tests.functional.db import test_migrations - - -class TestOcataExpand01Mixin(test_migrations.AlembicMigrationsMixin): - - def _get_revisions(self, config): - return test_migrations.AlembicMigrationsMixin._get_revisions( - self, config, head='ocata_expand01') - - def _pre_upgrade_ocata_expand01(self, engine): - images = db_utils.get_table(engine, 'images') - now = datetime.datetime.now() - self.assertIn('is_public', images.c) - self.assertNotIn('visibility', images.c) - self.assertFalse(images.c.is_public.nullable) - - # inserting a public image record - public_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=True, - min_disk=0, - min_ram=0, - id='public_id_before_expand') - images.insert().values(public_temp).execute() - - # inserting a private image record - shared_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=False, - min_disk=0, - min_ram=0, - id='private_id_before_expand') - images.insert().values(shared_temp).execute() - - def _check_ocata_expand01(self, engine, data): - # check that after migration, 'visibility' column is introduced - images = db_utils.get_table(engine, 'images') - self.assertIn('visibility', images.c) - self.assertIn('is_public', images.c) - self.assertTrue(images.c.is_public.nullable) - self.assertTrue(images.c.visibility.nullable) - - # tests visibility set to None for existing images - rows = (images.select() - .where(images.c.id.like('%_before_expand')) - .order_by(images.c.id) - .execute() - .fetchall()) - - self.assertEqual(2, len(rows)) - # private image first - self.assertEqual(0, rows[0]['is_public']) - self.assertEqual('private_id_before_expand', rows[0]['id']) - self.assertIsNone(rows[0]['visibility']) - # then public image - self.assertEqual(1, rows[1]['is_public']) - self.assertEqual('public_id_before_expand', rows[1]['id']) - self.assertIsNone(rows[1]['visibility']) - - self._test_trigger_old_to_new(images) - self._test_trigger_new_to_old(images) - - def _test_trigger_new_to_old(self, images): - now = datetime.datetime.now() - # inserting a public image record after expand - public_temp = dict(deleted=False, - created_at=now, - status='active', - visibility='public', - min_disk=0, - min_ram=0, - id='public_id_new_to_old') - images.insert().values(public_temp).execute() - - # inserting a private image record after expand - shared_temp = dict(deleted=False, - created_at=now, - status='active', - visibility='private', - min_disk=0, - min_ram=0, - id='private_id_new_to_old') - images.insert().values(shared_temp).execute() - - # inserting a shared image record after expand - shared_temp = dict(deleted=False, - created_at=now, - status='active', - visibility='shared', - min_disk=0, - min_ram=0, - id='shared_id_new_to_old') - images.insert().values(shared_temp).execute() - - # test visibility is set appropriately by the trigger for new images - rows = (images.select() - .where(images.c.id.like('%_new_to_old')) - .order_by(images.c.id) - .execute() - .fetchall()) - - self.assertEqual(3, len(rows)) - # private image first - self.assertEqual(0, rows[0]['is_public']) - self.assertEqual('private_id_new_to_old', rows[0]['id']) - self.assertEqual('private', rows[0]['visibility']) - # then public image - self.assertEqual(1, rows[1]['is_public']) - self.assertEqual('public_id_new_to_old', rows[1]['id']) - self.assertEqual('public', rows[1]['visibility']) - # then shared image - self.assertEqual(0, rows[2]['is_public']) - self.assertEqual('shared_id_new_to_old', rows[2]['id']) - self.assertEqual('shared', rows[2]['visibility']) - - def _test_trigger_old_to_new(self, images): - now = datetime.datetime.now() - # inserting a public image record after expand - public_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=True, - min_disk=0, - min_ram=0, - id='public_id_old_to_new') - images.insert().values(public_temp).execute() - # inserting a private image record after expand - shared_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=False, - min_disk=0, - min_ram=0, - id='private_id_old_to_new') - images.insert().values(shared_temp).execute() - # tests visibility is set appropriately by the trigger for new images - rows = (images.select() - .where(images.c.id.like('%_old_to_new')) - .order_by(images.c.id) - .execute() - .fetchall()) - self.assertEqual(2, len(rows)) - # private image first - self.assertEqual(0, rows[0]['is_public']) - self.assertEqual('private_id_old_to_new', rows[0]['id']) - self.assertEqual('shared', rows[0]['visibility']) - # then public image - self.assertEqual(1, rows[1]['is_public']) - self.assertEqual('public_id_old_to_new', rows[1]['id']) - self.assertEqual('public', rows[1]['visibility']) - - -class TestOcataExpand01MySQL(TestOcataExpand01Mixin, - test_base.MySQLOpportunisticTestCase): - pass diff --git a/glance/tests/functional/db/migrations/test_ocata_migrate01.py b/glance/tests/functional/db/migrations/test_ocata_migrate01.py deleted file mode 100644 index 4027e8b0..00000000 --- a/glance/tests/functional/db/migrations/test_ocata_migrate01.py +++ /dev/null @@ -1,179 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import utils as db_utils - -from glance.db.sqlalchemy.alembic_migrations import data_migrations -from glance.tests.functional.db import test_migrations - - -class TestOcataMigrate01Mixin(test_migrations.AlembicMigrationsMixin): - - def _get_revisions(self, config): - return test_migrations.AlembicMigrationsMixin._get_revisions( - self, config, head='ocata_expand01') - - def _pre_upgrade_ocata_expand01(self, engine): - images = db_utils.get_table(engine, 'images') - image_members = db_utils.get_table(engine, 'image_members') - now = datetime.datetime.now() - - # inserting a public image record - public_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=True, - min_disk=0, - min_ram=0, - id='public_id') - images.insert().values(public_temp).execute() - - # inserting a non-public image record for 'shared' visibility test - shared_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=False, - min_disk=0, - min_ram=0, - id='shared_id') - images.insert().values(shared_temp).execute() - - # inserting a non-public image records for 'private' visibility test - private_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=False, - min_disk=0, - min_ram=0, - id='private_id_1') - images.insert().values(private_temp).execute() - - private_temp = dict(deleted=False, - created_at=now, - status='active', - is_public=False, - min_disk=0, - min_ram=0, - id='private_id_2') - images.insert().values(private_temp).execute() - - # adding an active as well as a deleted image member for checking - # 'shared' visibility - temp = dict(deleted=False, - created_at=now, - image_id='shared_id', - member='fake_member_452', - can_share=True, - id=45) - image_members.insert().values(temp).execute() - - temp = dict(deleted=True, - created_at=now, - image_id='shared_id', - member='fake_member_453', - can_share=True, - id=453) - image_members.insert().values(temp).execute() - - # adding an image member, but marking it deleted, - # for testing 'private' visibility - temp = dict(deleted=True, - created_at=now, - image_id='private_id_2', - member='fake_member_451', - can_share=True, - id=451) - image_members.insert().values(temp).execute() - - # adding an active image member for the 'public' image, - # to test it remains public regardless. - temp = dict(deleted=False, - created_at=now, - image_id='public_id', - member='fake_member_450', - can_share=True, - id=450) - image_members.insert().values(temp).execute() - - def _check_ocata_expand01(self, engine, data): - images = db_utils.get_table(engine, 'images') - - # check that visibility is null for existing images - rows = (images.select() - .order_by(images.c.id) - .execute() - .fetchall()) - self.assertEqual(4, len(rows)) - for row in rows: - self.assertIsNone(row['visibility']) - - # run data migrations - data_migrations.migrate(engine) - - # check that visibility is set appropriately for all images - rows = (images.select() - .order_by(images.c.id) - .execute() - .fetchall()) - self.assertEqual(4, len(rows)) - # private_id_1 has private visibility - self.assertEqual('private_id_1', rows[0]['id']) - self.assertEqual('private', rows[0]['visibility']) - # private_id_2 has private visibility - self.assertEqual('private_id_2', rows[1]['id']) - self.assertEqual('private', rows[1]['visibility']) - # public_id has public visibility - self.assertEqual('public_id', rows[2]['id']) - self.assertEqual('public', rows[2]['visibility']) - # shared_id has shared visibility - self.assertEqual('shared_id', rows[3]['id']) - self.assertEqual('shared', rows[3]['visibility']) - - -class TestOcataMigrate01MySQL(TestOcataMigrate01Mixin, - test_base.MySQLOpportunisticTestCase): - pass - - -class TestOcataMigrate01_EmptyDBMixin(test_migrations.AlembicMigrationsMixin): - """This mixin is used to create an initial glance database and upgrade it - up to the ocata_expand01 revision. - """ - def _get_revisions(self, config): - return test_migrations.AlembicMigrationsMixin._get_revisions( - self, config, head='ocata_expand01') - - def _pre_upgrade_ocata_expand01(self, engine): - # New/empty database - pass - - def _check_ocata_expand01(self, engine, data): - images = db_utils.get_table(engine, 'images') - - # check that there are no rows in the images table - rows = (images.select() - .order_by(images.c.id) - .execute() - .fetchall()) - self.assertEqual(0, len(rows)) - - # run data migrations - data_migrations.migrate(engine) - - -class TestOcataMigrate01_EmptyDBMySQL(TestOcataMigrate01_EmptyDBMixin, - test_base.MySQLOpportunisticTestCase): - """This test runs the Ocata data migrations on an empty databse.""" - pass diff --git a/glance/tests/functional/db/migrations/test_pike01.py b/glance/tests/functional/db/migrations/test_pike01.py deleted file mode 100644 index fcb6db46..00000000 --- a/glance/tests/functional/db/migrations/test_pike01.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import utils as db_utils -import sqlalchemy - -from glance.tests.functional.db import test_migrations - - -class TestPike01Mixin(test_migrations.AlembicMigrationsMixin): - - artifacts_table_names = [ - 'artifact_blob_locations', - 'artifact_properties', - 'artifact_blobs', - 'artifact_dependencies', - 'artifact_tags', - 'artifacts' - ] - - def _pre_upgrade_pike01(self, engine): - # verify presence of the artifacts tables - for table_name in self.artifacts_table_names: - table = db_utils.get_table(engine, table_name) - self.assertIsNotNone(table) - - def _check_pike01(self, engine, data): - # verify absence of the artifacts tables - for table_name in self.artifacts_table_names: - self.assertRaises(sqlalchemy.exc.NoSuchTableError, - db_utils.get_table, engine, table_name) - - -class TestPike01MySQL(TestPike01Mixin, test_base.MySQLOpportunisticTestCase): - pass - - -class TestPike01PostgresSQL(TestPike01Mixin, - test_base.PostgreSQLOpportunisticTestCase): - pass - - -class TestPike01Sqlite(TestPike01Mixin, test_base.DbTestCase): - pass diff --git a/glance/tests/functional/db/migrations/test_pike_contract01.py b/glance/tests/functional/db/migrations/test_pike_contract01.py deleted file mode 100644 index 3a04f56a..00000000 --- a/glance/tests/functional/db/migrations/test_pike_contract01.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import utils as db_utils -import sqlalchemy - -from glance.tests.functional.db import test_migrations - - -class TestPikeContract01Mixin(test_migrations.AlembicMigrationsMixin): - - artifacts_table_names = [ - 'artifact_blob_locations', - 'artifact_properties', - 'artifact_blobs', - 'artifact_dependencies', - 'artifact_tags', - 'artifacts' - ] - - def _get_revisions(self, config): - return test_migrations.AlembicMigrationsMixin._get_revisions( - self, config, head='pike_contract01') - - def _pre_upgrade_pike_contract01(self, engine): - # verify presence of the artifacts tables - for table_name in self.artifacts_table_names: - table = db_utils.get_table(engine, table_name) - self.assertIsNotNone(table) - - def _check_pike_contract01(self, engine, data): - # verify absence of the artifacts tables - for table_name in self.artifacts_table_names: - self.assertRaises(sqlalchemy.exc.NoSuchTableError, - db_utils.get_table, engine, table_name) - - -class TestPikeContract01MySQL(TestPikeContract01Mixin, - test_base.MySQLOpportunisticTestCase): - pass diff --git a/glance/tests/functional/db/migrations/test_pike_expand01.py b/glance/tests/functional/db/migrations/test_pike_expand01.py deleted file mode 100644 index 2ad4481a..00000000 --- a/glance/tests/functional/db/migrations/test_pike_expand01.py +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import utils as db_utils - -from glance.tests.functional.db import test_migrations - - -class TestPikeExpand01Mixin(test_migrations.AlembicMigrationsMixin): - - artifacts_table_names = [ - 'artifact_blob_locations', - 'artifact_properties', - 'artifact_blobs', - 'artifact_dependencies', - 'artifact_tags', - 'artifacts' - ] - - def _get_revisions(self, config): - return test_migrations.AlembicMigrationsMixin._get_revisions( - self, config, head='pike_expand01') - - def _pre_upgrade_pike_expand01(self, engine): - # verify presence of the artifacts tables - for table_name in self.artifacts_table_names: - table = db_utils.get_table(engine, table_name) - self.assertIsNotNone(table) - - def _check_pike_expand01(self, engine, data): - # should be no changes, so re-run pre-upgrade check - self._pre_upgrade_pike_expand01(engine) - - -class TestPikeExpand01MySQL(TestPikeExpand01Mixin, - test_base.MySQLOpportunisticTestCase): - pass diff --git a/glance/tests/functional/db/migrations/test_pike_migrate01.py b/glance/tests/functional/db/migrations/test_pike_migrate01.py deleted file mode 100644 index ee13b7c0..00000000 --- a/glance/tests/functional/db/migrations/test_pike_migrate01.py +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_db.sqlalchemy import test_base - -import glance.tests.functional.db.migrations.test_pike_expand01 as tpe01 - - -# no TestPikeMigrate01Mixin class needed, can use TestPikeExpand01Mixin instead - - -class TestPikeMigrate01MySQL(tpe01.TestPikeExpand01Mixin, - test_base.MySQLOpportunisticTestCase): - pass diff --git a/glance/tests/functional/db/test_migrations.py b/glance/tests/functional/db/test_migrations.py deleted file mode 100644 index e7d51a27..00000000 --- a/glance/tests/functional/db/test_migrations.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2016 Rackspace -# Copyright 2016 Intel Corporation -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from alembic import command as alembic_command -from alembic import script as alembic_script -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import test_migrations -import sqlalchemy.types as types - -from glance.db import migration as db_migration -from glance.db.sqlalchemy import alembic_migrations -from glance.db.sqlalchemy.alembic_migrations import versions -from glance.db.sqlalchemy import models -from glance.db.sqlalchemy import models_metadef -import glance.tests.utils as test_utils - - -class AlembicMigrationsMixin(object): - - def _get_revisions(self, config, head=None): - head = head or db_migration.LATEST_REVISION - scripts_dir = alembic_script.ScriptDirectory.from_config(config) - revisions = list(scripts_dir.walk_revisions(base='base', - head=head)) - revisions = list(reversed(revisions)) - revisions = [rev.revision for rev in revisions] - return revisions - - def _migrate_up(self, config, engine, revision, with_data=False): - if with_data: - data = None - pre_upgrade = getattr(self, '_pre_upgrade_%s' % revision, None) - if pre_upgrade: - data = pre_upgrade(engine) - - alembic_command.upgrade(config, revision) - - if with_data: - check = getattr(self, '_check_%s' % revision, None) - if check: - check(engine, data) - - def test_walk_versions(self): - alembic_config = alembic_migrations.get_alembic_config(self.engine) - for revision in self._get_revisions(alembic_config): - self._migrate_up(alembic_config, self.engine, revision, - with_data=True) - - -class TestMysqlMigrations(test_base.MySQLOpportunisticTestCase, - AlembicMigrationsMixin): - - def test_mysql_innodb_tables(self): - test_utils.db_sync(engine=self.engine) - - total = self.engine.execute( - "SELECT COUNT(*) " - "FROM information_schema.TABLES " - "WHERE TABLE_SCHEMA='%s'" - % self.engine.url.database) - self.assertGreater(total.scalar(), 0, "No tables found. Wrong schema?") - - noninnodb = self.engine.execute( - "SELECT count(*) " - "FROM information_schema.TABLES " - "WHERE TABLE_SCHEMA='%s' " - "AND ENGINE!='InnoDB' " - "AND TABLE_NAME!='migrate_version'" - % self.engine.url.database) - count = noninnodb.scalar() - self.assertEqual(0, count, "%d non InnoDB tables created" % count) - - -class TestPostgresqlMigrations(test_base.PostgreSQLOpportunisticTestCase, - AlembicMigrationsMixin): - pass - - -class TestSqliteMigrations(test_base.DbTestCase, AlembicMigrationsMixin): - pass - - -class TestMigrations(test_base.DbTestCase, test_utils.BaseTestCase): - - def test_no_downgrade(self): - migrate_file = versions.__path__[0] - for parent, dirnames, filenames in os.walk(migrate_file): - for filename in filenames: - if filename.split('.')[1] == 'py': - model_name = filename.split('.')[0] - model = __import__( - 'glance.db.sqlalchemy.alembic_migrations.versions.' + - model_name) - obj = getattr(getattr(getattr(getattr(getattr( - model, 'db'), 'sqlalchemy'), 'alembic_migrations'), - 'versions'), model_name) - func = getattr(obj, 'downgrade', None) - self.assertIsNone(func) - - -class ModelsMigrationSyncMixin(object): - - def get_metadata(self): - for table in models_metadef.BASE_DICT.metadata.sorted_tables: - models.BASE.metadata._add_table(table.name, table.schema, table) - return models.BASE.metadata - - def get_engine(self): - return self.engine - - def db_sync(self, engine): - test_utils.db_sync(engine=engine) - - # TODO(akamyshikova): remove this method as soon as comparison with Variant - # will be implemented in oslo.db or alembic - def compare_type(self, ctxt, insp_col, meta_col, insp_type, meta_type): - if isinstance(meta_type, types.Variant): - meta_orig_type = meta_col.type - insp_orig_type = insp_col.type - meta_col.type = meta_type.impl - insp_col.type = meta_type.impl - - try: - return self.compare_type(ctxt, insp_col, meta_col, insp_type, - meta_type.impl) - finally: - meta_col.type = meta_orig_type - insp_col.type = insp_orig_type - else: - ret = super(ModelsMigrationSyncMixin, self).compare_type( - ctxt, insp_col, meta_col, insp_type, meta_type) - if ret is not None: - return ret - return ctxt.impl.compare_type(insp_col, meta_col) - - def include_object(self, object_, name, type_, reflected, compare_to): - if name in ['migrate_version'] and type_ == 'table': - return False - return True - - -class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin, - test_migrations.ModelsMigrationsSync, - test_base.MySQLOpportunisticTestCase): - pass - - -class ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin, - test_migrations.ModelsMigrationsSync, - test_base.PostgreSQLOpportunisticTestCase): - pass - - -class ModelsMigrationsSyncSqlite(ModelsMigrationSyncMixin, - test_migrations.ModelsMigrationsSync, - test_base.DbTestCase): - pass diff --git a/glance/tests/functional/db/test_registry.py b/glance/tests/functional/db/test_registry.py deleted file mode 100644 index 2ce5a27d..00000000 --- a/glance/tests/functional/db/test_registry.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_db import options - -import glance.db -import glance.tests.functional.db as db_tests -from glance.tests.functional.db import base -from glance.tests.functional.db import base_metadef - -CONF = cfg.CONF - - -def get_db(config): - options.set_defaults(CONF, connection='sqlite://') - config(data_api='glance.db.registry.api') - return glance.db.get_api() - - -def reset_db(db_api): - pass - - -class FunctionalInitWrapper(base.FunctionalInitWrapper): - - def setUp(self): - # NOTE(flaper87): We need to start the - # registry service *before* TestDriver's - # setup goes on, since it'll create some - # images that will be later used in tests. - # - # Python's request is way too magical and - # it will make the TestDriver's super call - # FunctionalTest's without letting us start - # the server. - # - # This setUp will be called by TestDriver - # and will be used to call FunctionalTest - # setUp method *and* start the registry - # service right after it. - super(FunctionalInitWrapper, self).setUp() - self.registry_server.deployment_flavor = 'fakeauth' - self.start_with_retry(self.registry_server, - 'registry_port', 3, - api_version=2) - - self.config(registry_port=self.registry_server.bind_port, - use_user_token=True) - - -class TestRegistryDriver(base.TestDriver, - base.DriverTests, - FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestRegistryDriver, self).setUp() - self.addCleanup(db_tests.reset) - - def tearDown(self): - self.registry_server.stop() - super(TestRegistryDriver, self).tearDown() - - -class TestRegistryQuota(base.DriverQuotaTests, FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestRegistryQuota, self).setUp() - self.addCleanup(db_tests.reset) - - def tearDown(self): - self.registry_server.stop() - super(TestRegistryQuota, self).tearDown() - - -class TestRegistryMetadefDriver(base_metadef.TestMetadefDriver, - base_metadef.MetadefDriverTests, - FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestRegistryMetadefDriver, self).setUp() - self.addCleanup(db_tests.reset) - - def tearDown(self): - self.registry_server.stop() - super(TestRegistryMetadefDriver, self).tearDown() - - -class TestTasksDriver(base.TaskTests, FunctionalInitWrapper): - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestTasksDriver, self).setUp() - self.addCleanup(db_tests.reset) - - def tearDown(self): - self.registry_server.stop() - super(TestTasksDriver, self).tearDown() diff --git a/glance/tests/functional/db/test_rpc_endpoint.py b/glance/tests/functional/db/test_rpc_endpoint.py deleted file mode 100644 index 13f1b23f..00000000 --- a/glance/tests/functional/db/test_rpc_endpoint.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -import requests -from six.moves import http_client as http - -from glance.tests import functional - - -class TestRegistryURLVisibility(functional.FunctionalTest): - - def setUp(self): - super(TestRegistryURLVisibility, self).setUp() - self.cleanup() - self.registry_server.deployment_flavor = '' - self.req_body = jsonutils.dumps([{"command": "image_get_all"}]) - - def _url(self, path): - return 'http://127.0.0.1:%d%s' % (self.registry_port, path) - - def _headers(self, custom_headers=None): - base_headers = { - } - base_headers.update(custom_headers or {}) - return base_headers - - def test_v2_not_enabled(self): - self.registry_server.enable_v2_registry = False - self.start_servers(**self.__dict__.copy()) - path = self._url('/rpc') - response = requests.post(path, headers=self._headers(), - data=self.req_body) - self.assertEqual(http.NOT_FOUND, response.status_code) - self.stop_servers() - - def test_v2_enabled(self): - self.registry_server.enable_v2_registry = True - self.start_servers(**self.__dict__.copy()) - path = self._url('/rpc') - response = requests.post(path, headers=self._headers(), - data=self.req_body) - self.assertEqual(http.OK, response.status_code) - self.stop_servers() diff --git a/glance/tests/functional/db/test_simple.py b/glance/tests/functional/db/test_simple.py deleted file mode 100644 index ff7c68d9..00000000 --- a/glance/tests/functional/db/test_simple.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.api import CONF -import glance.db.simple.api -import glance.tests.functional.db as db_tests -from glance.tests.functional.db import base - - -def get_db(config, workers=1): - CONF.set_override('data_api', 'glance.db.simple.api') - CONF.set_override('workers', workers) - db_api = glance.db.get_api() - return db_api - - -def reset_db(db_api): - db_api.reset() - - -class TestSimpleDriver(base.TestDriver, - base.DriverTests, - base.FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestSimpleDriver, self).setUp() - self.addCleanup(db_tests.reset) - - -class TestSimpleQuota(base.DriverQuotaTests, - base.FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestSimpleQuota, self).setUp() - self.addCleanup(db_tests.reset) - - -class TestSimpleVisibility(base.TestVisibility, - base.VisibilityTests, - base.FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestSimpleVisibility, self).setUp() - self.addCleanup(db_tests.reset) - - -class TestSimpleMembershipVisibility(base.TestMembershipVisibility, - base.MembershipVisibilityTests, - base.FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestSimpleMembershipVisibility, self).setUp() - self.addCleanup(db_tests.reset) - - -class TestSimpleTask(base.TaskTests, - base.FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestSimpleTask, self).setUp() - self.addCleanup(db_tests.reset) - - -class TestTooManyWorkers(base.TaskTests): - - def setUp(self): - def get_db_too_many_workers(config): - self.assertRaises(SystemExit, get_db, config, 2) - return get_db(config) - - db_tests.load(get_db_too_many_workers, reset_db) - super(TestTooManyWorkers, self).setUp() - self.addCleanup(db_tests.reset) diff --git a/glance/tests/functional/db/test_sqlalchemy.py b/glance/tests/functional/db/test_sqlalchemy.py deleted file mode 100644 index 57070b2b..00000000 --- a/glance/tests/functional/db/test_sqlalchemy.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_db import options - -from glance.common import exception -import glance.db.sqlalchemy.api -from glance.db.sqlalchemy import models as db_models -from glance.db.sqlalchemy import models_metadef as metadef_models -import glance.tests.functional.db as db_tests -from glance.tests.functional.db import base -from glance.tests.functional.db import base_metadef - -CONF = cfg.CONF - - -def get_db(config): - options.set_defaults(CONF, connection='sqlite://') - config(debug=False) - db_api = glance.db.sqlalchemy.api - return db_api - - -def reset_db(db_api): - db_models.unregister_models(db_api.get_engine()) - db_models.register_models(db_api.get_engine()) - - -def reset_db_metadef(db_api): - metadef_models.unregister_models(db_api.get_engine()) - metadef_models.register_models(db_api.get_engine()) - - -class TestSqlAlchemyDriver(base.TestDriver, - base.DriverTests, - base.FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestSqlAlchemyDriver, self).setUp() - self.addCleanup(db_tests.reset) - - def test_get_image_with_invalid_long_image_id(self): - image_id = '343f9ba5-0197-41be-9543-16bbb32e12aa-xxxxxx' - self.assertRaises(exception.NotFound, self.db_api._image_get, - self.context, image_id) - - def test_image_tag_delete_with_invalid_long_image_id(self): - image_id = '343f9ba5-0197-41be-9543-16bbb32e12aa-xxxxxx' - self.assertRaises(exception.NotFound, self.db_api.image_tag_delete, - self.context, image_id, 'fake') - - def test_image_tag_get_all_with_invalid_long_image_id(self): - image_id = '343f9ba5-0197-41be-9543-16bbb32e12aa-xxxxxx' - self.assertRaises(exception.NotFound, self.db_api.image_tag_get_all, - self.context, image_id) - - def test_user_get_storage_usage_with_invalid_long_image_id(self): - image_id = '343f9ba5-0197-41be-9543-16bbb32e12aa-xxxxxx' - self.assertRaises(exception.NotFound, - self.db_api.user_get_storage_usage, - self.context, 'fake_owner_id', image_id) - - -class TestSqlAlchemyVisibility(base.TestVisibility, - base.VisibilityTests, - base.FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestSqlAlchemyVisibility, self).setUp() - self.addCleanup(db_tests.reset) - - -class TestSqlAlchemyMembershipVisibility(base.TestMembershipVisibility, - base.MembershipVisibilityTests, - base.FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestSqlAlchemyMembershipVisibility, self).setUp() - self.addCleanup(db_tests.reset) - - -class TestSqlAlchemyDBDataIntegrity(base.TestDriver, - base.FunctionalInitWrapper): - """Test class for checking the data integrity in the database. - - Helpful in testing scenarios specific to the sqlalchemy api. - """ - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestSqlAlchemyDBDataIntegrity, self).setUp() - self.addCleanup(db_tests.reset) - - def test_paginate_redundant_sort_keys(self): - original_method = self.db_api._paginate_query - - def fake_paginate_query(query, model, limit, - sort_keys, marker, sort_dir, sort_dirs): - self.assertEqual(['created_at', 'id'], sort_keys) - return original_method(query, model, limit, - sort_keys, marker, sort_dir, sort_dirs) - - self.stubs.Set(self.db_api, '_paginate_query', - fake_paginate_query) - self.db_api.image_get_all(self.context, sort_key=['created_at']) - - def test_paginate_non_redundant_sort_keys(self): - original_method = self.db_api._paginate_query - - def fake_paginate_query(query, model, limit, - sort_keys, marker, sort_dir, sort_dirs): - self.assertEqual(['name', 'created_at', 'id'], sort_keys) - return original_method(query, model, limit, - sort_keys, marker, sort_dir, sort_dirs) - - self.stubs.Set(self.db_api, '_paginate_query', - fake_paginate_query) - self.db_api.image_get_all(self.context, sort_key=['name']) - - -class TestSqlAlchemyTask(base.TaskTests, - base.FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestSqlAlchemyTask, self).setUp() - self.addCleanup(db_tests.reset) - - -class TestSqlAlchemyQuota(base.DriverQuotaTests, - base.FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestSqlAlchemyQuota, self).setUp() - self.addCleanup(db_tests.reset) - - -class TestDBPurge(base.DBPurgeTests, - base.FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db) - super(TestDBPurge, self).setUp() - self.addCleanup(db_tests.reset) - - -class TestMetadefSqlAlchemyDriver(base_metadef.TestMetadefDriver, - base_metadef.MetadefDriverTests, - base.FunctionalInitWrapper): - - def setUp(self): - db_tests.load(get_db, reset_db_metadef) - super(TestMetadefSqlAlchemyDriver, self).setUp() - self.addCleanup(db_tests.reset) diff --git a/glance/tests/functional/store_utils.py b/glance/tests/functional/store_utils.py deleted file mode 100644 index 701bead1..00000000 --- a/glance/tests/functional/store_utils.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2012 Red Hat, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utility methods to set testcases up for Swift tests. -""" - -from __future__ import print_function - -import threading - -from oslo_utils import units -from six.moves import BaseHTTPServer -from six.moves import http_client as http - - -FIVE_KB = 5 * units.Ki - - -class RemoteImageHandler(BaseHTTPServer.BaseHTTPRequestHandler): - def do_HEAD(self): - """ - Respond to an image HEAD request fake metadata - """ - if 'images' in self.path: - self.send_response(http.OK) - self.send_header('Content-Type', 'application/octet-stream') - self.send_header('Content-Length', FIVE_KB) - self.end_headers() - return - else: - self.send_error(http.NOT_FOUND, 'File Not Found: %s' % self.path) - return - - def do_GET(self): - """ - Respond to an image GET request with fake image content. - """ - if 'images' in self.path: - self.send_response(http.OK) - self.send_header('Content-Type', 'application/octet-stream') - self.send_header('Content-Length', FIVE_KB) - self.end_headers() - image_data = b'*' * FIVE_KB - self.wfile.write(image_data) - self.wfile.close() - return - else: - self.send_error(http.NOT_FOUND, 'File Not Found: %s' % self.path) - return - - def log_message(self, format, *args): - """ - Simple override to prevent writing crap to stderr... - """ - pass - - -def setup_http(test): - server_class = BaseHTTPServer.HTTPServer - remote_server = server_class(('127.0.0.1', 0), RemoteImageHandler) - remote_ip, remote_port = remote_server.server_address - - def serve_requests(httpd): - httpd.serve_forever() - - threading.Thread(target=serve_requests, args=(remote_server,)).start() - test.http_server = remote_server - test.http_ip = remote_ip - test.http_port = remote_port - test.addCleanup(test.http_server.shutdown) - - -def get_http_uri(test, image_id): - uri = ('http://%(http_ip)s:%(http_port)d/images/' % - {'http_ip': test.http_ip, 'http_port': test.http_port}) - uri += image_id - return uri diff --git a/glance/tests/functional/test_api.py b/glance/tests/functional/test_api.py deleted file mode 100644 index 26af8c15..00000000 --- a/glance/tests/functional/test_api.py +++ /dev/null @@ -1,375 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Version-independent api tests""" - - -import httplib2 -from oslo_serialization import jsonutils -from six.moves import http_client - -from glance.tests import functional - - -class TestApiVersions(functional.FunctionalTest): - - def test_version_configurations(self): - """Test that versioning is handled properly through all channels""" - # v1 and v2 api enabled - self.start_servers(**self.__dict__.copy()) - - url = 'http://127.0.0.1:%d/v%%s/' % self.api_port - versions = {'versions': [ - { - 'id': 'v2.5', - 'status': 'CURRENT', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.4', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.3', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.2', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.1', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.0', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v1.1', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', 'href': url % '1'}], - }, - { - 'id': 'v1.0', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', 'href': url % '1'}], - }, - ]} - - # Verify version choices returned. - path = 'http://%s:%d' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content_json = http.request(path, 'GET') - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(versions, content) - - def test_v2_api_configuration(self): - self.api_server.enable_v1_api = False - self.api_server.enable_v2_api = True - self.start_servers(**self.__dict__.copy()) - - url = 'http://127.0.0.1:%d/v%%s/' % self.api_port - versions = {'versions': [ - { - 'id': 'v2.5', - 'status': 'CURRENT', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.4', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.3', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.2', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.1', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.0', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - ]} - - # Verify version choices returned. - path = 'http://%s:%d' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content_json = http.request(path, 'GET') - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(versions, content) - - def test_v1_api_configuration(self): - self.api_server.enable_v1_api = True - self.api_server.enable_v2_api = False - self.start_servers(**self.__dict__.copy()) - - url = 'http://127.0.0.1:%d/v%%s/' % self.api_port - versions = {'versions': [ - { - 'id': 'v1.1', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', 'href': url % '1'}], - }, - { - 'id': 'v1.0', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', 'href': url % '1'}], - }, - ]} - - # Verify version choices returned. - path = 'http://%s:%d' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content_json = http.request(path, 'GET') - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(versions, content) - - -class TestApiPaths(functional.FunctionalTest): - def setUp(self): - super(TestApiPaths, self).setUp() - self.start_servers(**self.__dict__.copy()) - - url = 'http://127.0.0.1:%d/v%%s/' % self.api_port - self.versions = {'versions': [ - { - 'id': 'v2.5', - 'status': 'CURRENT', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.4', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.3', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.2', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.1', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v2.0', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', 'href': url % '2'}], - }, - { - 'id': 'v1.1', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', 'href': url % '1'}], - }, - { - 'id': 'v1.0', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', 'href': url % '1'}], - }, - ]} - images = {'images': []} - self.images_json = jsonutils.dumps(images) - - def test_get_root_path(self): - """Assert GET / with `no Accept:` header. - Verify version choices returned. - Bug lp:803260 no Accept header causes a 500 in glance-api - """ - path = 'http://%s:%d' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content_json = http.request(path, 'GET') - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(self.versions, content) - - def test_get_images_path(self): - """Assert GET /images with `no Accept:` header. - Verify version choices returned. - """ - path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content_json = http.request(path, 'GET') - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(self.versions, content) - - def test_get_v1_images_path(self): - """GET /v1/images with `no Accept:` header. - Verify empty images list returned. - """ - path = 'http://%s:%d/v1/images' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - def test_get_root_path_with_unknown_header(self): - """Assert GET / with Accept: unknown header - Verify version choices returned. Verify message in API log about - unknown accept header. - """ - path = 'http://%s:%d/' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - headers = {'Accept': 'unknown'} - response, content_json = http.request(path, 'GET', headers=headers) - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(self.versions, content) - - def test_get_root_path_with_openstack_header(self): - """Assert GET / with an Accept: application/vnd.openstack.images-v1 - Verify empty image list returned - """ - path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - headers = {'Accept': 'application/vnd.openstack.images-v1'} - response, content = http.request(path, 'GET', headers=headers) - self.assertEqual(http_client.OK, response.status) - self.assertEqual(self.images_json, content.decode()) - - def test_get_images_path_with_openstack_header(self): - """Assert GET /images with a - `Accept: application/vnd.openstack.compute-v1` header. - Verify version choices returned. Verify message in API log - about unknown accept header. - """ - path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - headers = {'Accept': 'application/vnd.openstack.compute-v1'} - response, content_json = http.request(path, 'GET', headers=headers) - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(self.versions, content) - - def test_get_v10_images_path(self): - """Assert GET /v1.0/images with no Accept: header - Verify version choices returned - """ - path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - - def test_get_v1a_images_path(self): - """Assert GET /v1.a/images with no Accept: header - Verify version choices returned - """ - path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - - def test_get_va1_images_path(self): - """Assert GET /va.1/images with no Accept: header - Verify version choices returned - """ - path = 'http://%s:%d/va.1/images' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content_json = http.request(path, 'GET') - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(self.versions, content) - - def test_get_versions_path(self): - """Assert GET /versions with no Accept: header - Verify version choices returned - """ - path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content_json = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(self.versions, content) - - def test_get_versions_path_with_openstack_header(self): - """Assert GET /versions with the - `Accept: application/vnd.openstack.images-v1` header. - Verify version choices returned. - """ - path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - headers = {'Accept': 'application/vnd.openstack.images-v1'} - response, content_json = http.request(path, 'GET', headers=headers) - self.assertEqual(http_client.OK, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(self.versions, content) - - def test_get_v1_versions_path(self): - """Assert GET /v1/versions with `no Accept:` header - Verify 404 returned - """ - path = 'http://%s:%d/v1/versions' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.NOT_FOUND, response.status) - - def test_get_versions_choices(self): - """Verify version choices returned""" - path = 'http://%s:%d/v10' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content_json = http.request(path, 'GET') - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(self.versions, content) - - def test_get_images_path_with_openstack_v2_header(self): - """Assert GET /images with a - `Accept: application/vnd.openstack.compute-v2` header. - Verify version choices returned. Verify message in API log - about unknown version in accept header. - """ - path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - headers = {'Accept': 'application/vnd.openstack.images-v10'} - response, content_json = http.request(path, 'GET', headers=headers) - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(self.versions, content) - - def test_get_v12_images_path(self): - """Assert GET /v1.2/images with `no Accept:` header - Verify version choices returned - """ - path = 'http://%s:%d/v1.2/images' % ('127.0.0.1', self.api_port) - http = httplib2.Http() - response, content_json = http.request(path, 'GET') - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - content = jsonutils.loads(content_json.decode()) - self.assertEqual(self.versions, content) diff --git a/glance/tests/functional/test_bin_glance_cache_manage.py b/glance/tests/functional/test_bin_glance_cache_manage.py deleted file mode 100644 index d933f783..00000000 --- a/glance/tests/functional/test_bin_glance_cache_manage.py +++ /dev/null @@ -1,358 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Functional test case that utilizes the bin/glance-cache-manage CLI tool""" - -import datetime -import hashlib -import os -import sys - -import httplib2 -from oslo_serialization import jsonutils -from oslo_utils import units -from six.moves import http_client -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.tests import functional -from glance.tests.utils import execute -from glance.tests.utils import minimal_headers - -FIVE_KB = 5 * units.Ki - - -class TestBinGlanceCacheManage(functional.FunctionalTest): - """Functional tests for the bin/glance CLI tool""" - - def setUp(self): - self.image_cache_driver = "sqlite" - - super(TestBinGlanceCacheManage, self).setUp() - - self.api_server.deployment_flavor = "cachemanagement" - - # NOTE(sirp): This is needed in case we are running the tests under an - # environment in which OS_AUTH_STRATEGY=keystone. The test server we - # spin up won't have keystone support, so we need to switch to the - # NoAuth strategy. - os.environ['OS_AUTH_STRATEGY'] = 'noauth' - os.environ['OS_AUTH_URL'] = '' - - def add_image(self, name): - """ - Adds an image with supplied name and returns the newly-created - image identifier. - """ - image_data = b"*" * FIVE_KB - headers = minimal_headers(name) - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual(name, data['image']['name']) - self.assertTrue(data['image']['is_public']) - return data['image']['id'] - - def is_image_cached(self, image_id): - """ - Return True if supplied image ID is cached, False otherwise - """ - exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable - cmd = "%s --port=%d list-cached" % (exe_cmd, self.api_port) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - out = out.decode('utf-8') - return image_id in out - - def iso_date(self, image_id): - """ - Return True if supplied image ID is cached, False otherwise - """ - exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable - cmd = "%s --port=%d list-cached" % (exe_cmd, self.api_port) - - exitcode, out, err = execute(cmd) - out = out.decode('utf-8') - - return datetime.datetime.utcnow().strftime("%Y-%m-%d") in out - - def test_no_cache_enabled(self): - """ - Test that cache index command works - """ - self.cleanup() - self.api_server.deployment_flavor = '' - self.start_servers() # Not passing in cache_manage in pipeline... - - api_port = self.api_port - - # Verify decent error message returned - exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable - cmd = "%s --port=%d list-cached" % (exe_cmd, api_port) - - exitcode, out, err = execute(cmd, raise_error=False) - - self.assertEqual(1, exitcode) - self.assertIn(b'Cache management middleware not enabled on host', - out.strip()) - - self.stop_servers() - - def test_cache_index(self): - """ - Test that cache index command works - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - api_port = self.api_port - - # Verify no cached images - exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable - cmd = "%s --port=%d list-cached" % (exe_cmd, api_port) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - self.assertIn(b'No cached images', out.strip()) - - ids = {} - - # Add a few images and cache the second one of them - # by GETing the image... - for x in range(4): - ids[x] = self.add_image("Image%s" % x) - - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", api_port, - ids[1]) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - self.assertTrue(self.is_image_cached(ids[1]), - "%s is not cached." % ids[1]) - - self.assertTrue(self.iso_date(ids[1])) - - self.stop_servers() - - def test_queue(self): - """ - Test that we can queue and fetch images using the - CLI utility - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - api_port = self.api_port - - # Verify no cached images - exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable - cmd = "%s --port=%d list-cached" % (exe_cmd, api_port) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - self.assertIn(b'No cached images', out.strip()) - - # Verify no queued images - cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - self.assertIn(b'No queued images', out.strip()) - - ids = {} - - # Add a few images and cache the second one of them - # by GETing the image... - for x in range(4): - ids[x] = self.add_image("Image%s" % x) - - # Queue second image and then cache it - cmd = "%s --port=%d --force queue-image %s" % ( - exe_cmd, api_port, ids[1]) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - - # Verify queued second image - cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - out = out.decode('utf-8') - self.assertIn(ids[1], out, 'Image %s was not queued!' % ids[1]) - - # Cache images in the queue by running the prefetcher - cache_config_filepath = os.path.join(self.test_dir, 'etc', - 'glance-cache.conf') - cache_file_options = { - 'image_cache_dir': self.api_server.image_cache_dir, - 'image_cache_driver': self.image_cache_driver, - 'registry_port': self.registry_server.bind_port, - 'lock_path': self.test_dir, - 'log_file': os.path.join(self.test_dir, 'cache.log'), - 'metadata_encryption_key': "012345678901234567890123456789ab", - 'filesystem_store_datadir': self.test_dir - } - with open(cache_config_filepath, 'w') as cache_file: - cache_file.write("""[DEFAULT] -debug = True -lock_path = %(lock_path)s -image_cache_dir = %(image_cache_dir)s -image_cache_driver = %(image_cache_driver)s -registry_host = 127.0.0.1 -registry_port = %(registry_port)s -metadata_encryption_key = %(metadata_encryption_key)s -log_file = %(log_file)s - -[glance_store] -filesystem_store_datadir=%(filesystem_store_datadir)s -""" % cache_file_options) - - cmd = ("%s -m glance.cmd.cache_prefetcher --config-file %s" % - (sys.executable, cache_config_filepath)) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - self.assertEqual(b'', out.strip(), out) - - # Verify no queued images - cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - self.assertIn(b'No queued images', out.strip()) - - # Verify second image now cached - cmd = "%s --port=%d list-cached" % (exe_cmd, api_port) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - out = out.decode('utf-8') - self.assertIn(ids[1], out, 'Image %s was not cached!' % ids[1]) - - # Queue third image and then delete it from queue - cmd = "%s --port=%d --force queue-image %s" % ( - exe_cmd, api_port, ids[2]) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - - # Verify queued third image - cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - out = out.decode('utf-8') - self.assertIn(ids[2], out, 'Image %s was not queued!' % ids[2]) - - # Delete the image from the queue - cmd = ("%s --port=%d --force " - "delete-queued-image %s") % (exe_cmd, api_port, ids[2]) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - - # Verify no queued images - cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - self.assertIn(b'No queued images', out.strip()) - - # Queue all images - for x in range(4): - cmd = ("%s --port=%d --force " - "queue-image %s") % (exe_cmd, api_port, ids[x]) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - - # Verify queued third image - cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - self.assertIn(b'Found 3 queued images', out) - - # Delete the image from the queue - cmd = ("%s --port=%d --force " - "delete-all-queued-images") % (exe_cmd, api_port) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - - # Verify nothing in queue anymore - cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - self.assertIn(b'No queued images', out.strip()) - - # verify two image id when queue-image - cmd = ("%s --port=%d --force " - "queue-image %s %s") % (exe_cmd, api_port, ids[0], ids[1]) - - exitcode, out, err = execute(cmd, raise_error=False) - - self.assertEqual(1, exitcode) - self.assertIn(b'Please specify one and only ID of ' - b'the image you wish to ', out.strip()) - - # verify two image id when delete-queued-image - cmd = ("%s --port=%d --force delete-queued-image " - "%s %s") % (exe_cmd, api_port, ids[0], ids[1]) - - exitcode, out, err = execute(cmd, raise_error=False) - - self.assertEqual(1, exitcode) - self.assertIn(b'Please specify one and only ID of ' - b'the image you wish to ', out.strip()) - - # verify two image id when delete-cached-image - cmd = ("%s --port=%d --force delete-cached-image " - "%s %s") % (exe_cmd, api_port, ids[0], ids[1]) - - exitcode, out, err = execute(cmd, raise_error=False) - - self.assertEqual(1, exitcode) - self.assertIn(b'Please specify one and only ID of ' - b'the image you wish to ', out.strip()) - - self.stop_servers() diff --git a/glance/tests/functional/test_cache_middleware.py b/glance/tests/functional/test_cache_middleware.py deleted file mode 100644 index 9508d978..00000000 --- a/glance/tests/functional/test_cache_middleware.py +++ /dev/null @@ -1,1163 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests a Glance API server which uses the caching middleware that -uses the default SQLite cache driver. We use the filesystem store, -but that is really not relevant, as the image cache is transparent -to the backend store. -""" - -import hashlib -import os -import shutil -import sys -import time -import uuid - -import httplib2 -from oslo_serialization import jsonutils -from oslo_utils import units -from six.moves import http_client -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.tests import functional -from glance.tests.functional.store_utils import get_http_uri -from glance.tests.functional.store_utils import setup_http -from glance.tests.utils import execute -from glance.tests.utils import minimal_headers -from glance.tests.utils import skip_if_disabled -from glance.tests.utils import xattr_writes_supported - -FIVE_KB = 5 * units.Ki - - -class BaseCacheMiddlewareTest(object): - - @skip_if_disabled - def test_cache_middleware_transparent_v1(self): - """ - We test that putting the cache middleware into the - application pipeline gives us transparent image caching - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - # Add an image and verify a 200 OK is returned - image_data = b"*" * FIVE_KB - headers = minimal_headers('Image1') - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("Image1", data['image']['name']) - self.assertTrue(data['image']['is_public']) - - image_id = data['image']['id'] - - # Verify image not in cache - image_cached_path = os.path.join(self.api_server.image_cache_dir, - image_id) - self.assertFalse(os.path.exists(image_cached_path)) - - # Grab the image - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - # Verify image now in cache - image_cached_path = os.path.join(self.api_server.image_cache_dir, - image_id) - - # You might wonder why the heck this is here... well, it's here - # because it took me forever to figure out that the disk write - # cache in Linux was causing random failures of the os.path.exists - # assert directly below this. Basically, since the cache is writing - # the image file to disk in a different process, the write buffers - # don't flush the cache file during an os.rename() properly, resulting - # in a false negative on the file existence check below. This little - # loop pauses the execution of this process for no more than 1.5 - # seconds. If after that time the cached image file still doesn't - # appear on disk, something really is wrong, and the assert should - # trigger... - i = 0 - while not os.path.exists(image_cached_path) and i < 30: - time.sleep(0.05) - i = i + 1 - - self.assertTrue(os.path.exists(image_cached_path)) - - # Now, we delete the image from the server and verify that - # the image cache no longer contains the deleted image - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - self.assertFalse(os.path.exists(image_cached_path)) - - self.stop_servers() - - @skip_if_disabled - def test_cache_middleware_transparent_v2(self): - """Ensure the v2 API image transfer calls trigger caching""" - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - # Add an image and verify success - path = "http://%s:%d/v2/images" % ("0.0.0.0", self.api_port) - http = httplib2.Http() - headers = {'content-type': 'application/json'} - image_entity = { - 'name': 'Image1', - 'visibility': 'public', - 'container_format': 'bare', - 'disk_format': 'raw', - } - response, content = http.request(path, 'POST', - headers=headers, - body=jsonutils.dumps(image_entity)) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['id'] - - path = "http://%s:%d/v2/images/%s/file" % ("0.0.0.0", self.api_port, - image_id) - headers = {'content-type': 'application/octet-stream'} - image_data = "*" * FIVE_KB - response, content = http.request(path, 'PUT', - headers=headers, - body=image_data) - self.assertEqual(http_client.NO_CONTENT, response.status) - - # Verify image not in cache - image_cached_path = os.path.join(self.api_server.image_cache_dir, - image_id) - self.assertFalse(os.path.exists(image_cached_path)) - - # Grab the image - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - # Verify image now in cache - image_cached_path = os.path.join(self.api_server.image_cache_dir, - image_id) - self.assertTrue(os.path.exists(image_cached_path)) - - # Now, we delete the image from the server and verify that - # the image cache no longer contains the deleted image - path = "http://%s:%d/v2/images/%s" % ("0.0.0.0", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.NO_CONTENT, response.status) - - self.assertFalse(os.path.exists(image_cached_path)) - - self.stop_servers() - - @skip_if_disabled - def test_partially_downloaded_images_are_not_cached_v2_api(self): - """ - Verify that we do not cache images that were downloaded partially - using v2 images API. - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - # Add an image and verify success - path = "http://%s:%d/v2/images" % ("0.0.0.0", self.api_port) - http = httplib2.Http() - headers = {'content-type': 'application/json'} - image_entity = { - 'name': 'Image1', - 'visibility': 'public', - 'container_format': 'bare', - 'disk_format': 'raw', - } - response, content = http.request(path, 'POST', - headers=headers, - body=jsonutils.dumps(image_entity)) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['id'] - - path = "http://%s:%d/v2/images/%s/file" % ("0.0.0.0", self.api_port, - image_id) - headers = {'content-type': 'application/octet-stream'} - image_data = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - response, content = http.request(path, 'PUT', - headers=headers, - body=image_data) - self.assertEqual(http_client.NO_CONTENT, response.status) - - # Verify that this image is not in cache - image_cached_path = os.path.join(self.api_server.image_cache_dir, - image_id) - self.assertFalse(os.path.exists(image_cached_path)) - - # partially download this image and verify status 206 - http = httplib2.Http() - # range download request - range_ = 'bytes=3-5' - headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': str(uuid.uuid4()), - 'X-Roles': 'member', - 'Range': range_ - } - response, content = http.request(path, 'GET', headers=headers) - self.assertEqual(http_client.PARTIAL_CONTENT, response.status) - self.assertEqual(b'DEF', content) - - # content-range download request - # NOTE(dharinic): Glance incorrectly supports Content-Range for partial - # image downloads in requests. This test is included to ensure that - # we prevent regression. - content_range = 'bytes 3-5/*' - headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': str(uuid.uuid4()), - 'X-Roles': 'member', - 'Content-Range': content_range - } - response, content = http.request(path, 'GET', headers=headers) - self.assertEqual(http_client.PARTIAL_CONTENT, response.status) - self.assertEqual(b'DEF', content) - - # verify that we do not cache the partial image - image_cached_path = os.path.join(self.api_server.image_cache_dir, - image_id) - self.assertFalse(os.path.exists(image_cached_path)) - - self.stop_servers() - - @skip_if_disabled - def test_partial_download_of_cached_images_v2_api(self): - """ - Verify that partial download requests for a fully cached image - succeeds; we do not serve it from cache. - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - # Add an image and verify success - path = "http://%s:%d/v2/images" % ("0.0.0.0", self.api_port) - http = httplib2.Http() - headers = {'content-type': 'application/json'} - image_entity = { - 'name': 'Image1', - 'visibility': 'public', - 'container_format': 'bare', - 'disk_format': 'raw', - } - response, content = http.request(path, 'POST', - headers=headers, - body=jsonutils.dumps(image_entity)) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['id'] - - path = "http://%s:%d/v2/images/%s/file" % ("0.0.0.0", self.api_port, - image_id) - headers = {'content-type': 'application/octet-stream'} - image_data = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - response, content = http.request(path, 'PUT', - headers=headers, - body=image_data) - self.assertEqual(http_client.NO_CONTENT, response.status) - - # Verify that this image is not in cache - image_cached_path = os.path.join(self.api_server.image_cache_dir, - image_id) - self.assertFalse(os.path.exists(image_cached_path)) - - # Download the entire image - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ', content) - - # Verify that the image is now in cache - image_cached_path = os.path.join(self.api_server.image_cache_dir, - image_id) - self.assertTrue(os.path.exists(image_cached_path)) - # Modify the data in cache so we can verify the partially downloaded - # content was not from cache indeed. - with open(image_cached_path, 'w') as cache_file: - cache_file.write('0123456789') - - # Partially attempt a download of this image and verify that is not - # from cache - # range download request - range_ = 'bytes=3-5' - headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': str(uuid.uuid4()), - 'X-Roles': 'member', - 'Range': range_, - 'content-type': 'application/json' - } - response, content = http.request(path, 'GET', headers=headers) - self.assertEqual(http_client.PARTIAL_CONTENT, response.status) - self.assertEqual(b'DEF', content) - self.assertNotEqual(b'345', content) - self.assertNotEqual(image_data, content) - - # content-range download request - # NOTE(dharinic): Glance incorrectly supports Content-Range for partial - # image downloads in requests. This test is included to ensure that - # we prevent regression. - content_range = 'bytes 3-5/*' - headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': str(uuid.uuid4()), - 'X-Roles': 'member', - 'Content-Range': content_range, - 'content-type': 'application/json' - } - response, content = http.request(path, 'GET', headers=headers) - self.assertEqual(http_client.PARTIAL_CONTENT, response.status) - self.assertEqual(b'DEF', content) - self.assertNotEqual(b'345', content) - self.assertNotEqual(image_data, content) - - self.stop_servers() - - @skip_if_disabled - def test_cache_remote_image(self): - """ - We test that caching is no longer broken for remote images - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - setup_http(self) - - # Add a remote image and verify a 201 Created is returned - remote_uri = get_http_uri(self, '2') - headers = {'X-Image-Meta-Name': 'Image2', - 'X-Image-Meta-disk_format': 'raw', - 'X-Image-Meta-container_format': 'ovf', - 'X-Image-Meta-Is-Public': 'True', - 'X-Image-Meta-Location': remote_uri} - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - self.assertEqual(FIVE_KB, data['image']['size']) - - image_id = data['image']['id'] - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - - # Grab the image - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - # Grab the image again to ensure it can be served out from - # cache with the correct size - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual(FIVE_KB, int(response['content-length'])) - - self.stop_servers() - - @skip_if_disabled - def test_cache_middleware_trans_v1_without_download_image_policy(self): - """ - Ensure the image v1 API image transfer applied 'download_image' - policy enforcement. - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - # Add an image and verify a 200 OK is returned - image_data = b"*" * FIVE_KB - headers = minimal_headers('Image1') - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("Image1", data['image']['name']) - self.assertTrue(data['image']['is_public']) - - image_id = data['image']['id'] - - # Verify image not in cache - image_cached_path = os.path.join(self.api_server.image_cache_dir, - image_id) - self.assertFalse(os.path.exists(image_cached_path)) - - rules = {"context_is_admin": "role:admin", "default": "", - "download_image": "!"} - self.set_policy_rules(rules) - - # Grab the image - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.FORBIDDEN, response.status) - - # Now, we delete the image from the server and verify that - # the image cache no longer contains the deleted image - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - self.assertFalse(os.path.exists(image_cached_path)) - - self.stop_servers() - - @skip_if_disabled - def test_cache_middleware_trans_v2_without_download_image_policy(self): - """ - Ensure the image v2 API image transfer applied 'download_image' - policy enforcement. - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - # Add an image and verify success - path = "http://%s:%d/v2/images" % ("0.0.0.0", self.api_port) - http = httplib2.Http() - headers = {'content-type': 'application/json'} - image_entity = { - 'name': 'Image1', - 'visibility': 'public', - 'container_format': 'bare', - 'disk_format': 'raw', - } - response, content = http.request(path, 'POST', - headers=headers, - body=jsonutils.dumps(image_entity)) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['id'] - - path = "http://%s:%d/v2/images/%s/file" % ("0.0.0.0", self.api_port, - image_id) - headers = {'content-type': 'application/octet-stream'} - image_data = "*" * FIVE_KB - response, content = http.request(path, 'PUT', - headers=headers, - body=image_data) - self.assertEqual(http_client.NO_CONTENT, response.status) - - # Verify image not in cache - image_cached_path = os.path.join(self.api_server.image_cache_dir, - image_id) - self.assertFalse(os.path.exists(image_cached_path)) - - rules = {"context_is_admin": "role:admin", "default": "", - "download_image": "!"} - self.set_policy_rules(rules) - - # Grab the image - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.FORBIDDEN, response.status) - - # Now, we delete the image from the server and verify that - # the image cache no longer contains the deleted image - path = "http://%s:%d/v2/images/%s" % ("0.0.0.0", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.NO_CONTENT, response.status) - - self.assertFalse(os.path.exists(image_cached_path)) - - self.stop_servers() - - @skip_if_disabled - def test_cache_middleware_trans_with_deactivated_image(self): - """ - Ensure the image v1/v2 API image transfer forbids downloading - deactivated images. - Image deactivation is not available in v1. So, we'll deactivate the - image using v2 but test image transfer with both v1 and v2. - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - # Add an image and verify a 200 OK is returned - image_data = b"*" * FIVE_KB - headers = minimal_headers('Image1') - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("Image1", data['image']['name']) - self.assertTrue(data['image']['is_public']) - - image_id = data['image']['id'] - - # Grab the image - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - # Verify image in cache - image_cached_path = os.path.join(self.api_server.image_cache_dir, - image_id) - self.assertTrue(os.path.exists(image_cached_path)) - - # Deactivate the image using v2 - path = "http://%s:%d/v2/images/%s/actions/deactivate" - path = path % ("127.0.0.1", self.api_port, image_id) - http = httplib2.Http() - response, content = http.request(path, 'POST') - self.assertEqual(http_client.NO_CONTENT, response.status) - - # Download the image with v1. Ensure it is forbidden - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.FORBIDDEN, response.status) - - # Download the image with v2. This succeeds because - # we are in admin context. - path = "http://%s:%d/v2/images/%s/file" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - # Reactivate the image using v2 - path = "http://%s:%d/v2/images/%s/actions/reactivate" - path = path % ("127.0.0.1", self.api_port, image_id) - http = httplib2.Http() - response, content = http.request(path, 'POST') - self.assertEqual(http_client.NO_CONTENT, response.status) - - # Download the image with v1. Ensure it is allowed - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - # Download the image with v2. Ensure it is allowed - path = "http://%s:%d/v2/images/%s/file" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - # Now, we delete the image from the server and verify that - # the image cache no longer contains the deleted image - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - self.assertFalse(os.path.exists(image_cached_path)) - - self.stop_servers() - - -class BaseCacheManageMiddlewareTest(object): - - """Base test class for testing cache management middleware""" - - def verify_no_images(self): - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertIn('images', data) - self.assertEqual(0, len(data['images'])) - - def add_image(self, name): - """ - Adds an image and returns the newly-added image - identifier - """ - image_data = b"*" * FIVE_KB - headers = minimal_headers('%s' % name) - - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual(name, data['image']['name']) - self.assertTrue(data['image']['is_public']) - return data['image']['id'] - - def verify_no_cached_images(self): - """ - Verify no images in the image cache - """ - path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - data = jsonutils.loads(content) - self.assertIn('cached_images', data) - self.assertEqual([], data['cached_images']) - - @skip_if_disabled - def test_user_not_authorized(self): - self.cleanup() - self.start_servers(**self.__dict__.copy()) - self.verify_no_images() - - image_id1 = self.add_image("Image1") - image_id2 = self.add_image("Image2") - - # Verify image does not yet show up in cache (we haven't "hit" - # it yet using a GET /images/1 ... - self.verify_no_cached_images() - - # Grab the image - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id1) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - # Verify image now in cache - path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - data = jsonutils.loads(content) - self.assertIn('cached_images', data) - - cached_images = data['cached_images'] - self.assertEqual(1, len(cached_images)) - self.assertEqual(image_id1, cached_images[0]['image_id']) - - # Set policy to disallow access to cache management - rules = {"manage_image_cache": '!'} - self.set_policy_rules(rules) - - # Verify an unprivileged user cannot see cached images - path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.FORBIDDEN, response.status) - - # Verify an unprivileged user cannot delete images from the cache - path = "http://%s:%d/v1/cached_images/%s" % ("127.0.0.1", - self.api_port, image_id1) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.FORBIDDEN, response.status) - - # Verify an unprivileged user cannot delete all cached images - path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.FORBIDDEN, response.status) - - # Verify an unprivileged user cannot queue an image - path = "http://%s:%d/v1/queued_images/%s" % ("127.0.0.1", - self.api_port, image_id2) - http = httplib2.Http() - response, content = http.request(path, 'PUT') - self.assertEqual(http_client.FORBIDDEN, response.status) - - self.stop_servers() - - @skip_if_disabled - def test_cache_manage_get_cached_images(self): - """ - Tests that cached images are queryable - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - self.verify_no_images() - - image_id = self.add_image("Image1") - - # Verify image does not yet show up in cache (we haven't "hit" - # it yet using a GET /images/1 ... - self.verify_no_cached_images() - - # Grab the image - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - # Verify image now in cache - path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - data = jsonutils.loads(content) - self.assertIn('cached_images', data) - - # Verify the last_modified/last_accessed values are valid floats - for cached_image in data['cached_images']: - for time_key in ('last_modified', 'last_accessed'): - time_val = cached_image[time_key] - try: - float(time_val) - except ValueError: - self.fail('%s time %s for cached image %s not a valid ' - 'float' % (time_key, time_val, - cached_image['image_id'])) - - cached_images = data['cached_images'] - self.assertEqual(1, len(cached_images)) - self.assertEqual(image_id, cached_images[0]['image_id']) - self.assertEqual(0, cached_images[0]['hits']) - - # Hit the image - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - # Verify image hits increased in output of manage GET - path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - data = jsonutils.loads(content) - self.assertIn('cached_images', data) - - cached_images = data['cached_images'] - self.assertEqual(1, len(cached_images)) - self.assertEqual(image_id, cached_images[0]['image_id']) - self.assertEqual(1, cached_images[0]['hits']) - - self.stop_servers() - - @skip_if_disabled - def test_cache_manage_delete_cached_images(self): - """ - Tests that cached images may be deleted - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - self.verify_no_images() - - ids = {} - - # Add a bunch of images... - for x in range(4): - ids[x] = self.add_image("Image%s" % str(x)) - - # Verify no images in cached_images because no image has been hit - # yet using a GET /images/ ... - self.verify_no_cached_images() - - # Grab the images, essentially caching them... - for x in range(4): - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - ids[x]) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status, - "Failed to find image %s" % ids[x]) - - # Verify images now in cache - path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - data = jsonutils.loads(content) - self.assertIn('cached_images', data) - - cached_images = data['cached_images'] - self.assertEqual(4, len(cached_images)) - - for x in range(4, 0): # Cached images returned last modified order - self.assertEqual(ids[x], cached_images[x]['image_id']) - self.assertEqual(0, cached_images[x]['hits']) - - # Delete third image of the cached images and verify no longer in cache - path = "http://%s:%d/v1/cached_images/%s" % ("127.0.0.1", - self.api_port, ids[2]) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - data = jsonutils.loads(content) - self.assertIn('cached_images', data) - - cached_images = data['cached_images'] - self.assertEqual(3, len(cached_images)) - self.assertNotIn(ids[2], [x['image_id'] for x in cached_images]) - - # Delete all cached images and verify nothing in cache - path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - data = jsonutils.loads(content) - self.assertIn('cached_images', data) - - cached_images = data['cached_images'] - self.assertEqual(0, len(cached_images)) - - self.stop_servers() - - @skip_if_disabled - def test_cache_manage_delete_queued_images(self): - """ - Tests that all queued images may be deleted at once - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - self.verify_no_images() - - ids = {} - NUM_IMAGES = 4 - - # Add and then queue some images - for x in range(NUM_IMAGES): - ids[x] = self.add_image("Image%s" % str(x)) - path = "http://%s:%d/v1/queued_images/%s" % ("127.0.0.1", - self.api_port, ids[x]) - http = httplib2.Http() - response, content = http.request(path, 'PUT') - self.assertEqual(http_client.OK, response.status) - - # Delete all queued images - path = "http://%s:%d/v1/queued_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - data = jsonutils.loads(content) - num_deleted = data['num_deleted'] - self.assertEqual(NUM_IMAGES, num_deleted) - - # Verify a second delete now returns num_deleted=0 - path = "http://%s:%d/v1/queued_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - data = jsonutils.loads(content) - num_deleted = data['num_deleted'] - self.assertEqual(0, num_deleted) - - self.stop_servers() - - @skip_if_disabled - def test_queue_and_prefetch(self): - """ - Tests that images may be queued and prefetched - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - cache_config_filepath = os.path.join(self.test_dir, 'etc', - 'glance-cache.conf') - cache_file_options = { - 'image_cache_dir': self.api_server.image_cache_dir, - 'image_cache_driver': self.image_cache_driver, - 'registry_port': self.registry_server.bind_port, - 'log_file': os.path.join(self.test_dir, 'cache.log'), - 'lock_path': self.test_dir, - 'metadata_encryption_key': "012345678901234567890123456789ab", - 'filesystem_store_datadir': self.test_dir - } - with open(cache_config_filepath, 'w') as cache_file: - cache_file.write("""[DEFAULT] -debug = True -lock_path = %(lock_path)s -image_cache_dir = %(image_cache_dir)s -image_cache_driver = %(image_cache_driver)s -registry_host = 127.0.0.1 -registry_port = %(registry_port)s -metadata_encryption_key = %(metadata_encryption_key)s -log_file = %(log_file)s - -[glance_store] -filesystem_store_datadir=%(filesystem_store_datadir)s -""" % cache_file_options) - - self.verify_no_images() - - ids = {} - - # Add a bunch of images... - for x in range(4): - ids[x] = self.add_image("Image%s" % str(x)) - - # Queue the first image, verify no images still in cache after queueing - # then run the prefetcher and verify that the image is then in the - # cache - path = "http://%s:%d/v1/queued_images/%s" % ("127.0.0.1", - self.api_port, ids[0]) - http = httplib2.Http() - response, content = http.request(path, 'PUT') - self.assertEqual(http_client.OK, response.status) - - self.verify_no_cached_images() - - cmd = ("%s -m glance.cmd.cache_prefetcher --config-file %s" % - (sys.executable, cache_config_filepath)) - - exitcode, out, err = execute(cmd) - - self.assertEqual(0, exitcode) - self.assertEqual(b'', out.strip(), out) - - # Verify first image now in cache - path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - data = jsonutils.loads(content) - self.assertIn('cached_images', data) - - cached_images = data['cached_images'] - self.assertEqual(1, len(cached_images)) - self.assertIn(ids[0], [r['image_id'] - for r in data['cached_images']]) - - self.stop_servers() - - -class TestImageCacheXattr(functional.FunctionalTest, - BaseCacheMiddlewareTest): - - """Functional tests that exercise the image cache using the xattr driver""" - - def setUp(self): - """ - Test to see if the pre-requisites for the image cache - are working (python-xattr installed and xattr support on the - filesystem) - """ - if getattr(self, 'disabled', False): - return - - if not getattr(self, 'inited', False): - try: - import xattr # noqa - except ImportError: - self.inited = True - self.disabled = True - self.disabled_message = ("python-xattr not installed.") - return - - self.inited = True - self.disabled = False - self.image_cache_driver = "xattr" - - super(TestImageCacheXattr, self).setUp() - - self.api_server.deployment_flavor = "caching" - - if not xattr_writes_supported(self.test_dir): - self.inited = True - self.disabled = True - self.disabled_message = ("filesystem does not support xattr") - return - - def tearDown(self): - super(TestImageCacheXattr, self).tearDown() - if os.path.exists(self.api_server.image_cache_dir): - shutil.rmtree(self.api_server.image_cache_dir) - - -class TestImageCacheManageXattr(functional.FunctionalTest, - BaseCacheManageMiddlewareTest): - - """ - Functional tests that exercise the image cache management - with the Xattr cache driver - """ - - def setUp(self): - """ - Test to see if the pre-requisites for the image cache - are working (python-xattr installed and xattr support on the - filesystem) - """ - if getattr(self, 'disabled', False): - return - - if not getattr(self, 'inited', False): - try: - import xattr # noqa - except ImportError: - self.inited = True - self.disabled = True - self.disabled_message = ("python-xattr not installed.") - return - - self.inited = True - self.disabled = False - self.image_cache_driver = "xattr" - - super(TestImageCacheManageXattr, self).setUp() - - self.api_server.deployment_flavor = "cachemanagement" - - if not xattr_writes_supported(self.test_dir): - self.inited = True - self.disabled = True - self.disabled_message = ("filesystem does not support xattr") - return - - def tearDown(self): - super(TestImageCacheManageXattr, self).tearDown() - if os.path.exists(self.api_server.image_cache_dir): - shutil.rmtree(self.api_server.image_cache_dir) - - -class TestImageCacheSqlite(functional.FunctionalTest, - BaseCacheMiddlewareTest): - - """ - Functional tests that exercise the image cache using the - SQLite driver - """ - - def setUp(self): - """ - Test to see if the pre-requisites for the image cache - are working (python-xattr installed and xattr support on the - filesystem) - """ - if getattr(self, 'disabled', False): - return - - if not getattr(self, 'inited', False): - try: - import sqlite3 # noqa - except ImportError: - self.inited = True - self.disabled = True - self.disabled_message = ("python-sqlite3 not installed.") - return - - self.inited = True - self.disabled = False - - super(TestImageCacheSqlite, self).setUp() - - self.api_server.deployment_flavor = "caching" - - def tearDown(self): - super(TestImageCacheSqlite, self).tearDown() - if os.path.exists(self.api_server.image_cache_dir): - shutil.rmtree(self.api_server.image_cache_dir) - - -class TestImageCacheManageSqlite(functional.FunctionalTest, - BaseCacheManageMiddlewareTest): - - """ - Functional tests that exercise the image cache management using the - SQLite driver - """ - - def setUp(self): - """ - Test to see if the pre-requisites for the image cache - are working (python-xattr installed and xattr support on the - filesystem) - """ - if getattr(self, 'disabled', False): - return - - if not getattr(self, 'inited', False): - try: - import sqlite3 # noqa - except ImportError: - self.inited = True - self.disabled = True - self.disabled_message = ("python-sqlite3 not installed.") - return - - self.inited = True - self.disabled = False - self.image_cache_driver = "sqlite" - - super(TestImageCacheManageSqlite, self).setUp() - - self.api_server.deployment_flavor = "cachemanagement" - - def tearDown(self): - super(TestImageCacheManageSqlite, self).tearDown() - if os.path.exists(self.api_server.image_cache_dir): - shutil.rmtree(self.api_server.image_cache_dir) diff --git a/glance/tests/functional/test_client_exceptions.py b/glance/tests/functional/test_client_exceptions.py deleted file mode 100644 index 72489b9f..00000000 --- a/glance/tests/functional/test_client_exceptions.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2012 Red Hat, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Functional test asserting strongly typed exceptions from glance client""" - -import eventlet.patcher -import httplib2 -from six.moves import http_client -import webob.dec -import webob.exc - -from glance.common import client -from glance.common import exception -from glance.common import wsgi -from glance.tests import functional -from glance.tests import utils - - -eventlet.patcher.monkey_patch(socket=True) - - -class ExceptionTestApp(object): - """ - Test WSGI application which can respond with multiple kinds of HTTP - status codes - """ - - @webob.dec.wsgify - def __call__(self, request): - path = request.path_qs - - if path == "/rate-limit": - request.response = webob.exc.HTTPRequestEntityTooLarge() - - elif path == "/rate-limit-retry": - request.response.retry_after = 10 - request.response.status = http_client.REQUEST_ENTITY_TOO_LARGE - - elif path == "/service-unavailable": - request.response = webob.exc.HTTPServiceUnavailable() - - elif path == "/service-unavailable-retry": - request.response.retry_after = 10 - request.response.status = http_client.SERVICE_UNAVAILABLE - - elif path == "/expectation-failed": - request.response = webob.exc.HTTPExpectationFailed() - - elif path == "/server-error": - request.response = webob.exc.HTTPServerError() - - elif path == "/server-traceback": - raise exception.ServerError() - - -class TestClientExceptions(functional.FunctionalTest): - - def setUp(self): - super(TestClientExceptions, self).setUp() - self.port = utils.get_unused_port() - server = wsgi.Server() - self.config(bind_host='127.0.0.1') - self.config(workers=0) - server.start(ExceptionTestApp(), self.port) - self.client = client.BaseClient("127.0.0.1", self.port) - - def _do_test_exception(self, path, exc_type): - try: - self.client.do_request("GET", path) - self.fail('expected %s' % exc_type) - except exc_type as e: - if 'retry' in path: - self.assertEqual(10, e.retry_after) - - def test_rate_limited(self): - """ - Test rate limited response - """ - self._do_test_exception('/rate-limit', exception.LimitExceeded) - - def test_rate_limited_retry(self): - """ - Test rate limited response with retry - """ - self._do_test_exception('/rate-limit-retry', exception.LimitExceeded) - - def test_service_unavailable(self): - """ - Test service unavailable response - """ - self._do_test_exception('/service-unavailable', - exception.ServiceUnavailable) - - def test_service_unavailable_retry(self): - """ - Test service unavailable response with retry - """ - self._do_test_exception('/service-unavailable-retry', - exception.ServiceUnavailable) - - def test_expectation_failed(self): - """ - Test expectation failed response - """ - self._do_test_exception('/expectation-failed', - exception.UnexpectedStatus) - - def test_server_error(self): - """ - Test server error response - """ - self._do_test_exception('/server-error', - exception.ServerError) - - def test_server_traceback(self): - """ - Verify that the wsgi server does not return tracebacks to the client on - 500 errors (bug 1192132) - """ - http = httplib2.Http() - path = ('http://%s:%d/server-traceback' % - ('127.0.0.1', self.port)) - response, content = http.request(path, 'GET') - self.assertNotIn(b'ServerError', content) - self.assertEqual(http_client.INTERNAL_SERVER_ERROR, response.status) diff --git a/glance/tests/functional/test_client_redirects.py b/glance/tests/functional/test_client_redirects.py deleted file mode 100644 index a3e11569..00000000 --- a/glance/tests/functional/test_client_redirects.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Functional test cases testing glance client redirect-following.""" - -import eventlet.patcher -from six.moves import http_client as http -import webob.dec -import webob.exc - -from glance.common import client -from glance.common import exception -from glance.common import wsgi -from glance.tests import functional -from glance.tests import utils - - -eventlet.patcher.monkey_patch(socket=True) - - -def RedirectTestApp(name): - class App(object): - """ - Test WSGI application which can respond with multiple kinds of HTTP - redirects and is used to verify Glance client redirects. - """ - def __init__(self): - """ - Initialize app with a name and port. - """ - self.name = name - - @webob.dec.wsgify - def __call__(self, request): - """ - Handles all requests to the application. - """ - base = "http://%s" % request.host - path = request.path_qs - - if path == "/": - return "root" - - elif path == "/302": - url = "%s/success" % base - raise webob.exc.HTTPFound(location=url) - - elif path == "/302?with_qs=yes": - url = "%s/success?with_qs=yes" % base - raise webob.exc.HTTPFound(location=url) - - elif path == "/infinite_302": - raise webob.exc.HTTPFound(location=request.url) - - elif path.startswith("/redirect-to"): - url = "http://127.0.0.1:%s/success" % path.split("-")[-1] - raise webob.exc.HTTPFound(location=url) - - elif path == "/success": - return "success_from_host_%s" % self.name - - elif path == "/success?with_qs=yes": - return "success_with_qs" - - return "fail" - - return App - - -class TestClientRedirects(functional.FunctionalTest): - - def setUp(self): - super(TestClientRedirects, self).setUp() - self.port_one = utils.get_unused_port() - self.port_two = utils.get_unused_port() - server_one = wsgi.Server() - server_two = wsgi.Server() - self.config(bind_host='127.0.0.1') - self.config(workers=0) - server_one.start(RedirectTestApp("one")(), self.port_one) - server_two.start(RedirectTestApp("two")(), self.port_two) - self.client = client.BaseClient("127.0.0.1", self.port_one) - - def test_get_without_redirect(self): - """ - Test GET with no redirect - """ - response = self.client.do_request("GET", "/") - self.assertEqual(http.OK, response.status) - self.assertEqual(b"root", response.read()) - - def test_get_with_one_redirect(self): - """ - Test GET with one 302 FOUND redirect - """ - response = self.client.do_request("GET", "/302") - self.assertEqual(http.OK, response.status) - self.assertEqual(b"success_from_host_one", response.read()) - - def test_get_with_one_redirect_query_string(self): - """ - Test GET with one 302 FOUND redirect w/ a query string - """ - response = self.client.do_request("GET", "/302", - params={'with_qs': 'yes'}) - self.assertEqual(http.OK, response.status) - self.assertEqual(b"success_with_qs", response.read()) - - def test_get_with_max_redirects(self): - """ - Test we don't redirect forever. - """ - self.assertRaises(exception.MaxRedirectsExceeded, - self.client.do_request, - "GET", - "/infinite_302") - - def test_post_redirect(self): - """ - Test POST with 302 redirect - """ - response = self.client.do_request("POST", "/302") - self.assertEqual(http.OK, response.status) - self.assertEqual(b"success_from_host_one", response.read()) - - def test_redirect_to_new_host(self): - """ - Test redirect to one host and then another. - """ - url = "/redirect-to-%d" % self.port_two - response = self.client.do_request("POST", url) - - self.assertEqual(http.OK, response.status) - self.assertEqual(b"success_from_host_two", response.read()) - - response = self.client.do_request("POST", "/success") - self.assertEqual(http.OK, response.status) - self.assertEqual(b"success_from_host_one", response.read()) diff --git a/glance/tests/functional/test_cors_middleware.py b/glance/tests/functional/test_cors_middleware.py deleted file mode 100644 index 5e65ffe8..00000000 --- a/glance/tests/functional/test_cors_middleware.py +++ /dev/null @@ -1,86 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests cors middleware.""" - -import httplib2 -from six.moves import http_client - -from glance.tests import functional - - -class TestCORSMiddleware(functional.FunctionalTest): - '''Provide a basic smoke test to ensure CORS middleware is active. - - The tests below provide minimal confirmation that the CORS middleware - is active, and may be configured. For comprehensive tests, please consult - the test suite in oslo_middleware. - ''' - - def setUp(self): - super(TestCORSMiddleware, self).setUp() - # Cleanup is handled in teardown of the parent class. - self.start_servers(**self.__dict__.copy()) - self.http = httplib2.Http() - self.api_path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - - def test_valid_cors_options_request(self): - (r_headers, content) = self.http.request( - self.api_path, - 'OPTIONS', - headers={ - 'Origin': 'http://valid.example.com', - 'Access-Control-Request-Method': 'GET' - }) - - self.assertEqual(http_client.OK, r_headers.status) - self.assertIn('access-control-allow-origin', r_headers) - self.assertEqual('http://valid.example.com', - r_headers['access-control-allow-origin']) - - def test_invalid_cors_options_request(self): - (r_headers, content) = self.http.request( - self.api_path, - 'OPTIONS', - headers={ - 'Origin': 'http://invalid.example.com', - 'Access-Control-Request-Method': 'GET' - }) - - self.assertEqual(http_client.OK, r_headers.status) - self.assertNotIn('access-control-allow-origin', r_headers) - - def test_valid_cors_get_request(self): - (r_headers, content) = self.http.request( - self.api_path, - 'GET', - headers={ - 'Origin': 'http://valid.example.com' - }) - - self.assertEqual(http_client.OK, r_headers.status) - self.assertIn('access-control-allow-origin', r_headers) - self.assertEqual('http://valid.example.com', - r_headers['access-control-allow-origin']) - - def test_invalid_cors_get_request(self): - (r_headers, content) = self.http.request( - self.api_path, - 'GET', - headers={ - 'Origin': 'http://invalid.example.com' - }) - - self.assertEqual(http_client.OK, r_headers.status) - self.assertNotIn('access-control-allow-origin', r_headers) diff --git a/glance/tests/functional/test_glance_manage.py b/glance/tests/functional/test_glance_manage.py deleted file mode 100644 index 655f09e0..00000000 --- a/glance/tests/functional/test_glance_manage.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2012 Red Hat, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Functional test cases for glance-manage""" - -import os -import sys - -from glance.common import utils -from glance.tests import functional -from glance.tests.utils import depends_on_exe -from glance.tests.utils import execute -from glance.tests.utils import skip_if_disabled - - -class TestGlanceManage(functional.FunctionalTest): - """Functional tests for glance-manage""" - - def setUp(self): - super(TestGlanceManage, self).setUp() - conf_dir = os.path.join(self.test_dir, 'etc') - utils.safe_mkdirs(conf_dir) - self.conf_filepath = os.path.join(conf_dir, 'glance-manage.conf') - self.db_filepath = os.path.join(self.test_dir, 'tests.sqlite') - self.connection = ('sql_connection = sqlite:///%s' % - self.db_filepath) - - def _sync_db(self): - with open(self.conf_filepath, 'w') as conf_file: - conf_file.write('[DEFAULT]\n') - conf_file.write(self.connection) - conf_file.flush() - - cmd = ('%s -m glance.cmd.manage --config-file %s db sync' % - (sys.executable, self.conf_filepath)) - execute(cmd, raise_error=True) - - def _assert_table_exists(self, db_table): - cmd = ("sqlite3 {0} \"SELECT name FROM sqlite_master WHERE " - "type='table' AND name='{1}'\"").format(self.db_filepath, - db_table) - exitcode, out, err = execute(cmd, raise_error=True) - msg = "Expected table {0} was not found in the schema".format(db_table) - self.assertEqual(out.rstrip().decode("utf-8"), db_table, msg) - - @depends_on_exe('sqlite3') - @skip_if_disabled - def test_db_creation(self): - """Test schema creation by db_sync on a fresh DB""" - self._sync_db() - - for table in ['images', 'image_tags', 'image_locations', - 'image_members', 'image_properties']: - self._assert_table_exists(table) diff --git a/glance/tests/functional/test_glance_replicator.py b/glance/tests/functional/test_glance_replicator.py deleted file mode 100644 index 589cf531..00000000 --- a/glance/tests/functional/test_glance_replicator.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Functional test cases for glance-replicator""" - -import sys - -from glance.tests import functional -from glance.tests.utils import execute - - -class TestGlanceReplicator(functional.FunctionalTest): - """Functional tests for glance-replicator""" - - def test_compare(self): - # Test for issue: https://bugs.launchpad.net/glance/+bug/1598928 - cmd = ('%s -m glance.cmd.replicator ' - 'compare az1:9292 az2:9292 --debug' % - (sys.executable,)) - exitcode, out, err = execute(cmd, raise_error=False) - self.assertIn( - b'Request: GET http://az1:9292/v1/images/detail?is_public=None', - err - ) diff --git a/glance/tests/functional/test_gzip_middleware.py b/glance/tests/functional/test_gzip_middleware.py deleted file mode 100644 index c3bc9ed1..00000000 --- a/glance/tests/functional/test_gzip_middleware.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2013 Red Hat, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests gzip middleware.""" - -import httplib2 - -from glance.tests import functional -from glance.tests import utils - - -class GzipMiddlewareTest(functional.FunctionalTest): - - @utils.skip_if_disabled - def test_gzip_requests(self): - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - def request(path, headers=None): - # We don't care what version we're using here so, - # sticking with latest - url = 'http://127.0.0.1:%s/v2/%s' % (self.api_port, path) - http = httplib2.Http() - return http.request(url, 'GET', headers=headers) - - # Accept-Encoding: Identity - headers = {'Accept-Encoding': 'identity'} - response, content = request('images', headers=headers) - self.assertIsNone(response.get("-content-encoding")) - - # Accept-Encoding: gzip - headers = {'Accept-Encoding': 'gzip'} - response, content = request('images', headers=headers) - self.assertEqual('gzip', response.get("-content-encoding")) - - self.stop_servers() diff --git a/glance/tests/functional/test_healthcheck_middleware.py b/glance/tests/functional/test_healthcheck_middleware.py deleted file mode 100644 index 656cd40b..00000000 --- a/glance/tests/functional/test_healthcheck_middleware.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2015 Hewlett Packard -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests healthcheck middleware.""" - -import tempfile - -import httplib2 -from six.moves import http_client - -from glance.tests import functional -from glance.tests import utils - - -class HealthcheckMiddlewareTest(functional.FunctionalTest): - - def request(self): - url = 'http://127.0.0.1:%s/healthcheck' % self.api_port - http = httplib2.Http() - return http.request(url, 'GET') - - @utils.skip_if_disabled - def test_healthcheck_enabled(self): - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - response, content = self.request() - self.assertEqual(b'OK', content) - self.assertEqual(http_client.OK, response.status) - - self.stop_servers() - - def test_healthcheck_disabled(self): - with tempfile.NamedTemporaryFile() as test_disable_file: - self.cleanup() - self.api_server.disable_path = test_disable_file.name - self.start_servers(**self.__dict__.copy()) - - response, content = self.request() - self.assertEqual(b'DISABLED BY FILE', content) - self.assertEqual(http_client.SERVICE_UNAVAILABLE, response.status) - - self.stop_servers() diff --git a/glance/tests/functional/test_logging.py b/glance/tests/functional/test_logging.py deleted file mode 100644 index 135d5601..00000000 --- a/glance/tests/functional/test_logging.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Functional test case that tests logging output""" - -import os -import stat - -import httplib2 -from six.moves import http_client as http - -from glance.tests import functional - - -class TestLogging(functional.FunctionalTest): - - """Functional tests for Glance's logging output""" - - def test_debug(self): - """ - Test logging output proper when debug is on. - """ - self.cleanup() - self.start_servers() - - # The default functional test case has both debug on. Let's verify - # that debug statements appear in both the API and registry logs. - - self.assertTrue(os.path.exists(self.api_server.log_file)) - - with open(self.api_server.log_file, 'r') as f: - api_log_out = f.read() - - self.assertIn('DEBUG glance', api_log_out) - - self.assertTrue(os.path.exists(self.registry_server.log_file)) - - with open(self.registry_server.log_file, 'r') as freg: - registry_log_out = freg.read() - - self.assertIn('DEBUG glance', registry_log_out) - - self.stop_servers() - - def test_no_debug(self): - """ - Test logging output proper when debug is off. - """ - self.cleanup() - self.start_servers(debug=False) - - self.assertTrue(os.path.exists(self.api_server.log_file)) - - with open(self.api_server.log_file, 'r') as f: - api_log_out = f.read() - - self.assertNotIn('DEBUG glance', api_log_out) - - self.assertTrue(os.path.exists(self.registry_server.log_file)) - - with open(self.registry_server.log_file, 'r') as freg: - registry_log_out = freg.read() - - self.assertNotIn('DEBUG glance', registry_log_out) - - self.stop_servers() - - def assertNotEmptyFile(self, path): - self.assertTrue(os.path.exists(path)) - self.assertNotEqual(os.stat(path)[stat.ST_SIZE], 0) - - def test_logrotate(self): - """ - Test that we notice when our log file has been rotated - """ - self.cleanup() - self.start_servers() - - self.assertNotEmptyFile(self.api_server.log_file) - - os.rename(self.api_server.log_file, self.api_server.log_file + ".1") - - path = "http://%s:%d/" % ("127.0.0.1", self.api_port) - response, content = httplib2.Http().request(path, 'GET') - self.assertEqual(http.MULTIPLE_CHOICES, response.status) - - self.assertNotEmptyFile(self.api_server.log_file) - - self.stop_servers() diff --git a/glance/tests/functional/test_reload.py b/glance/tests/functional/test_reload.py deleted file mode 100644 index 006e020a..00000000 --- a/glance/tests/functional/test_reload.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import time -import unittest - -import psutil -import requests -import six -from six.moves import http_client as http - -from glance.tests import functional -from glance.tests.utils import execute - -TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), - '../', 'var')) - - -def set_config_value(filepath, key, value): - """Set 'key = value' in config file""" - replacement_line = '%s = %s\n' % (key, value) - match = re.compile('^%s\s+=' % key).match - with open(filepath, 'r+') as f: - lines = f.readlines() - f.seek(0, 0) - f.truncate() - for line in lines: - f.write(line if not match(line) else replacement_line) - - -class TestReload(functional.FunctionalTest): - """Test configuration reload""" - - def setUp(self): - self.workers = 1 - super(TestReload, self).setUp() - - def tearDown(self): - self.stop_servers() - super(TestReload, self).tearDown() - - def ticker(self, message, seconds=60, tick=0.01): - """ - Allows repeatedly testing for an expected result - for a finite amount of time. - - :param message: Message to display on timeout - :param seconds: Time in seconds after which we timeout - :param tick: Time to sleep before rechecking for expected result - :returns: 'True' or fails the test with 'message' on timeout - """ - # We default to allowing 60 seconds timeout but - # typically only a few hundredths of a second - # are needed. - num_ticks = seconds * (1.0 / tick) - count = 0 - while count < num_ticks: - count += 1 - time.sleep(tick) - yield - self.fail(message) - - def _get_children(self, server): - pid = None - pid = self._get_parent(server) - process = psutil.Process(pid) - try: - # psutils version >= 2 - children = process.children() - except AttributeError: - # psutils version < 2 - children = process.get_children() - pids = set() - for child in children: - pids.add(child.pid) - return pids - - def _get_parent(self, server): - if server == 'api': - return self.api_server.process_pid - elif server == 'registry': - return self.registry_server.process_pid - - def _conffile(self, service): - conf_dir = os.path.join(self.test_dir, 'etc') - conf_filepath = os.path.join(conf_dir, '%s.conf' % service) - return conf_filepath - - def _url(self, protocol, path): - return '%s://127.0.0.1:%d%s' % (protocol, self.api_port, path) - - @unittest.skipIf(six.PY3, 'SSL handshakes are broken in PY3') - def test_reload(self): - """Test SIGHUP picks up new config values""" - def check_pids(pre, post=None, workers=2): - if post is None: - if len(pre) == workers: - return True - else: - return False - if len(post) == workers: - # Check new children have different pids - if post.intersection(pre) == set(): - return True - return False - self.api_server.fork_socket = False - self.registry_server.fork_socket = False - self.start_servers(fork_socket=False, **vars(self)) - - pre_pids = {} - post_pids = {} - - # Test changing the workers value creates all new children - # This recycles the existing socket - msg = 'Start timeout' - for _ in self.ticker(msg): - for server in ('api', 'registry'): - pre_pids[server] = self._get_children(server) - if check_pids(pre_pids['api'], workers=1): - if check_pids(pre_pids['registry'], workers=1): - break - - for server in ('api', 'registry'): - # Labour costs have fallen - set_config_value(self._conffile(server), 'workers', '2') - cmd = "kill -HUP %s" % self._get_parent(server) - execute(cmd, raise_error=True) - - msg = 'Worker change timeout' - for _ in self.ticker(msg): - for server in ('api', 'registry'): - post_pids[server] = self._get_children(server) - if check_pids(pre_pids['registry'], post_pids['registry']): - if check_pids(pre_pids['api'], post_pids['api']): - break - - # Test changing from http to https - # This recycles the existing socket - path = self._url('http', '/') - response = requests.get(path) - self.assertEqual(http.MULTIPLE_CHOICES, response.status_code) - del response # close socket so that process audit is reliable - - pre_pids['api'] = self._get_children('api') - key_file = os.path.join(TEST_VAR_DIR, 'privatekey.key') - set_config_value(self._conffile('api'), 'key_file', key_file) - cert_file = os.path.join(TEST_VAR_DIR, 'certificate.crt') - set_config_value(self._conffile('api'), 'cert_file', cert_file) - cmd = "kill -HUP %s" % self._get_parent('api') - execute(cmd, raise_error=True) - - msg = 'http to https timeout' - for _ in self.ticker(msg): - post_pids['api'] = self._get_children('api') - if check_pids(pre_pids['api'], post_pids['api']): - break - - ca_file = os.path.join(TEST_VAR_DIR, 'ca.crt') - path = self._url('https', '/') - response = requests.get(path, verify=ca_file) - self.assertEqual(http.MULTIPLE_CHOICES, response.status_code) - del response - - # Test https restart - # This recycles the existing socket - pre_pids['api'] = self._get_children('api') - cmd = "kill -HUP %s" % self._get_parent('api') - execute(cmd, raise_error=True) - - msg = 'https restart timeout' - for _ in self.ticker(msg): - post_pids['api'] = self._get_children('api') - if check_pids(pre_pids['api'], post_pids['api']): - break - - ca_file = os.path.join(TEST_VAR_DIR, 'ca.crt') - path = self._url('https', '/') - response = requests.get(path, verify=ca_file) - self.assertEqual(http.MULTIPLE_CHOICES, response.status_code) - del response - - # Test changing the https bind_host - # This requires a new socket - pre_pids['api'] = self._get_children('api') - set_config_value(self._conffile('api'), 'bind_host', '127.0.0.1') - cmd = "kill -HUP %s" % self._get_parent('api') - execute(cmd, raise_error=True) - - msg = 'https bind_host timeout' - for _ in self.ticker(msg): - post_pids['api'] = self._get_children('api') - if check_pids(pre_pids['api'], post_pids['api']): - break - - path = self._url('https', '/') - response = requests.get(path, verify=ca_file) - self.assertEqual(http.MULTIPLE_CHOICES, response.status_code) - del response - - # Test https -> http - # This recycles the existing socket - pre_pids['api'] = self._get_children('api') - set_config_value(self._conffile('api'), 'key_file', '') - set_config_value(self._conffile('api'), 'cert_file', '') - cmd = "kill -HUP %s" % self._get_parent('api') - execute(cmd, raise_error=True) - - msg = 'https to http timeout' - for _ in self.ticker(msg): - post_pids['api'] = self._get_children('api') - if check_pids(pre_pids['api'], post_pids['api']): - break - - path = self._url('http', '/') - response = requests.get(path) - self.assertEqual(http.MULTIPLE_CHOICES, response.status_code) - del response - - # Test changing the http bind_host - # This requires a new socket - pre_pids['api'] = self._get_children('api') - set_config_value(self._conffile('api'), 'bind_host', '127.0.0.1') - cmd = "kill -HUP %s" % self._get_parent('api') - execute(cmd, raise_error=True) - - msg = 'http bind_host timeout' - for _ in self.ticker(msg): - post_pids['api'] = self._get_children('api') - if check_pids(pre_pids['api'], post_pids['api']): - break - - path = self._url('http', '/') - response = requests.get(path) - self.assertEqual(http.MULTIPLE_CHOICES, response.status_code) - del response - - # Test logging configuration change - # This recycles the existing socket - conf_dir = os.path.join(self.test_dir, 'etc') - log_file = conf_dir + 'new.log' - self.assertFalse(os.path.exists(log_file)) - set_config_value(self._conffile('api'), 'log_file', log_file) - cmd = "kill -HUP %s" % self._get_parent('api') - execute(cmd, raise_error=True) - msg = 'No new log file created' - for _ in self.ticker(msg): - if os.path.exists(log_file): - break diff --git a/glance/tests/functional/test_scrubber.py b/glance/tests/functional/test_scrubber.py deleted file mode 100644 index 01e6febb..00000000 --- a/glance/tests/functional/test_scrubber.py +++ /dev/null @@ -1,323 +0,0 @@ -# Copyright 2011-2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys -import time - -import httplib2 -from oslo_serialization import jsonutils -from oslo_utils import units -from six.moves import http_client -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.tests import functional -from glance.tests.utils import execute - - -TEST_IMAGE_DATA = '*' * 5 * units.Ki -TEST_IMAGE_META = { - 'name': 'test_image', - 'is_public': False, - 'disk_format': 'raw', - 'container_format': 'ovf', -} - - -class TestScrubber(functional.FunctionalTest): - - """Test that delayed_delete works and the scrubber deletes""" - - def _send_http_request(self, path, method, body=None): - headers = { - 'x-image-meta-name': 'test_image', - 'x-image-meta-is_public': 'true', - 'x-image-meta-disk_format': 'raw', - 'x-image-meta-container_format': 'ovf', - 'content-type': 'application/octet-stream' - } - return httplib2.Http().request(path, method, body, headers) - - def test_delayed_delete(self): - """ - test that images don't get deleted immediately and that the scrubber - scrubs them - """ - self.cleanup() - self.start_servers(delayed_delete=True, daemon=True, - metadata_encryption_key='') - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - response, content = self._send_http_request(path, 'POST', body='XXX') - self.assertEqual(http_client.CREATED, response.status) - image = jsonutils.loads(content)['image'] - self.assertEqual('active', image['status']) - - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image['id']) - response, content = self._send_http_request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - response, content = self._send_http_request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('pending_delete', response['x-image-meta-status']) - - self.wait_for_scrub(path) - - self.stop_servers() - - def test_delayed_delete_with_trustedauth_registry(self): - """ - test that images don't get deleted immediately and that the scrubber - scrubs them when registry is operating in trustedauth mode - """ - self.cleanup() - self.api_server.deployment_flavor = 'noauth' - self.registry_server.deployment_flavor = 'trusted-auth' - self.start_servers(delayed_delete=True, daemon=True, - metadata_encryption_key='', - send_identity_headers=True) - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': 'deae8923-075d-4287-924b-840fb2644874', - 'X-Roles': 'admin', - } - headers = { - 'x-image-meta-name': 'test_image', - 'x-image-meta-is_public': 'true', - 'x-image-meta-disk_format': 'raw', - 'x-image-meta-container_format': 'ovf', - 'content-type': 'application/octet-stream', - } - headers.update(base_headers) - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', body='XXX', - headers=headers) - self.assertEqual(http_client.CREATED, response.status) - image = jsonutils.loads(content)['image'] - self.assertEqual('active', image['status']) - image_id = image['id'] - - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE', headers=base_headers) - self.assertEqual(http_client.OK, response.status) - - response, content = http.request(path, 'HEAD', headers=base_headers) - self.assertEqual(http_client.OK, response.status) - self.assertEqual('pending_delete', response['x-image-meta-status']) - - self.wait_for_scrub(path, headers=base_headers) - - self.stop_servers() - - def test_scrubber_app(self): - """ - test that the glance-scrubber script runs successfully when not in - daemon mode - """ - self.cleanup() - self.start_servers(delayed_delete=True, daemon=False, - metadata_encryption_key='') - - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - response, content = self._send_http_request(path, 'POST', body='XXX') - self.assertEqual(http_client.CREATED, response.status) - image = jsonutils.loads(content)['image'] - self.assertEqual('active', image['status']) - - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image['id']) - response, content = self._send_http_request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - response, content = self._send_http_request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('pending_delete', response['x-image-meta-status']) - - # wait for the scrub time on the image to pass - time.sleep(self.api_server.scrub_time) - - # scrub images and make sure they get deleted - exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable - cmd = ("%s --config-file %s" % - (exe_cmd, self.scrubber_daemon.conf_file_name)) - exitcode, out, err = execute(cmd, raise_error=False) - self.assertEqual(0, exitcode) - - self.wait_for_scrub(path) - - self.stop_servers() - - def test_scrubber_app_with_trustedauth_registry(self): - """ - test that the glance-scrubber script runs successfully when not in - daemon mode and with a registry that operates in trustedauth mode - """ - self.cleanup() - self.api_server.deployment_flavor = 'noauth' - self.registry_server.deployment_flavor = 'trusted-auth' - self.start_servers(delayed_delete=True, daemon=False, - metadata_encryption_key='', - send_identity_headers=True) - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': 'deae8923-075d-4287-924b-840fb2644874', - 'X-Roles': 'admin', - } - headers = { - 'x-image-meta-name': 'test_image', - 'x-image-meta-is_public': 'true', - 'x-image-meta-disk_format': 'raw', - 'x-image-meta-container_format': 'ovf', - 'content-type': 'application/octet-stream', - } - headers.update(base_headers) - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', body='XXX', - headers=headers) - self.assertEqual(http_client.CREATED, response.status) - image = jsonutils.loads(content)['image'] - self.assertEqual('active', image['status']) - image_id = image['id'] - - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE', headers=base_headers) - self.assertEqual(http_client.OK, response.status) - - response, content = http.request(path, 'HEAD', headers=base_headers) - self.assertEqual(http_client.OK, response.status) - self.assertEqual('pending_delete', response['x-image-meta-status']) - - # wait for the scrub time on the image to pass - time.sleep(self.api_server.scrub_time) - - # scrub images and make sure they get deleted - exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable - cmd = ("%s --config-file %s" % - (exe_cmd, self.scrubber_daemon.conf_file_name)) - exitcode, out, err = execute(cmd, raise_error=False) - self.assertEqual(0, exitcode) - - self.wait_for_scrub(path, headers=base_headers) - - self.stop_servers() - - def test_scrubber_delete_handles_exception(self): - """ - Test that the scrubber handles the case where an - exception occurs when _delete() is called. The scrubber - should not write out queue files in this case. - """ - - # Start servers. - self.cleanup() - self.start_servers(delayed_delete=True, daemon=False, - default_store='file') - - # Check that we are using a file backend. - self.assertEqual(self.api_server.default_store, 'file') - - # add an image - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - response, content = self._send_http_request(path, 'POST', body='XXX') - self.assertEqual(http_client.CREATED, response.status) - image = jsonutils.loads(content)['image'] - self.assertEqual('active', image['status']) - - # delete the image - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image['id']) - response, content = self._send_http_request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - # ensure the image is marked pending delete - response, content = self._send_http_request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('pending_delete', response['x-image-meta-status']) - - # Remove the file from the backend. - file_path = os.path.join(self.api_server.image_dir, image['id']) - os.remove(file_path) - - # Wait for the scrub time on the image to pass - time.sleep(self.api_server.scrub_time) - - # run the scrubber app, and ensure it doesn't fall over - exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable - cmd = ("%s --config-file %s" % - (exe_cmd, self.scrubber_daemon.conf_file_name)) - exitcode, out, err = execute(cmd, raise_error=False) - self.assertEqual(0, exitcode) - - self.wait_for_scrub(path) - - self.stop_servers() - - def test_scrubber_app_queue_errors_not_daemon(self): - """ - test that the glance-scrubber exits with an exit code > 0 when it - fails to lookup images, indicating a configuration error when not - in daemon mode. - - Related-Bug: #1548289 - """ - # Don't start the registry server to cause intended failure - # Don't start the api server to save time - exitcode, out, err = self.scrubber_daemon.start( - delayed_delete=True, daemon=False, registry_port=28890) - self.assertEqual(0, exitcode, - "Failed to spin up the Scrubber daemon. " - "Got: %s" % err) - - # Run the Scrubber - exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable - cmd = ("%s --config-file %s" % - (exe_cmd, self.scrubber_daemon.conf_file_name)) - exitcode, out, err = execute(cmd, raise_error=False) - - self.assertEqual(1, exitcode) - self.assertIn('Can not get scrub jobs from queue', str(err)) - - self.stop_server(self.scrubber_daemon) - - def wait_for_scrub(self, path, headers=None): - """ - NOTE(jkoelker) The build servers sometimes take longer than 15 seconds - to scrub. Give it up to 5 min, checking checking every 15 seconds. - When/if it flips to deleted, bail immediately. - """ - http = httplib2.Http() - wait_for = 300 # seconds - check_every = 15 # seconds - for _ in range(wait_for // check_every): - time.sleep(check_every) - - response, content = http.request(path, 'HEAD', headers=headers) - if (response['x-image-meta-status'] == 'deleted' and - response['x-image-meta-deleted'] == 'True'): - break - else: - continue - else: - self.fail('image was never scrubbed') diff --git a/glance/tests/functional/test_sqlite.py b/glance/tests/functional/test_sqlite.py deleted file mode 100644 index 8957573b..00000000 --- a/glance/tests/functional/test_sqlite.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2012 Red Hat, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Functional test cases for sqlite-specific logic""" - - -from glance.tests import functional -from glance.tests.utils import depends_on_exe -from glance.tests.utils import execute -from glance.tests.utils import skip_if_disabled - - -class TestSqlite(functional.FunctionalTest): - """Functional tests for sqlite-specific logic""" - - @depends_on_exe('sqlite3') - @skip_if_disabled - def test_big_int_mapping(self): - """Ensure BigInteger not mapped to BIGINT""" - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - cmd = "sqlite3 tests.sqlite '.schema'" - exitcode, out, err = execute(cmd, raise_error=True) - - self.assertNotIn('BIGINT', out) - - self.stop_servers() diff --git a/glance/tests/functional/test_ssl.py b/glance/tests/functional/test_ssl.py deleted file mode 100644 index 750fdef3..00000000 --- a/glance/tests/functional/test_ssl.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import unittest - -import httplib2 -import six -from six.moves import http_client as http - -from glance.tests import functional - -TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', 'var')) - - -class TestSSL(functional.FunctionalTest): - - """Functional tests verifying SSL communication""" - - def setUp(self): - super(TestSSL, self).setUp() - - if getattr(self, 'inited', False): - return - - self.inited = False - self.disabled = True - - # NOTE (stevelle): Test key/cert/CA file created as per: - # http://nrocco.github.io/2013/01/25/ - # self-signed-ssl-certificate-chains.html - # For these tests certificate.crt must be created with 'Common Name' - # set to 127.0.0.1 - - self.key_file = os.path.join(TEST_VAR_DIR, 'privatekey.key') - if not os.path.exists(self.key_file): - self.disabled_message = ("Could not find private key file %s" % - self.key_file) - self.inited = True - return - - self.cert_file = os.path.join(TEST_VAR_DIR, 'certificate.crt') - if not os.path.exists(self.cert_file): - self.disabled_message = ("Could not find certificate file %s" % - self.cert_file) - self.inited = True - return - - self.ca_file = os.path.join(TEST_VAR_DIR, 'ca.crt') - if not os.path.exists(self.ca_file): - self.disabled_message = ("Could not find CA file %s" % - self.ca_file) - self.inited = True - return - - self.inited = True - self.disabled = False - - def tearDown(self): - super(TestSSL, self).tearDown() - if getattr(self, 'inited', False): - return - - @unittest.skipIf(six.PY3, 'SSL handshakes are broken in PY3') - def test_ssl_ok(self): - """Make sure the public API works with HTTPS.""" - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - path = "https://%s:%d/versions" % ("127.0.0.1", self.api_port) - https = httplib2.Http(ca_certs=self.ca_file) - response, content = https.request(path, 'GET') - self.assertEqual(http.OK, response.status) diff --git a/glance/tests/functional/test_wsgi.py b/glance/tests/functional/test_wsgi.py deleted file mode 100644 index e375e3b8..00000000 --- a/glance/tests/functional/test_wsgi.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for `glance.wsgi`.""" - -import socket -import time - -from oslo_config import cfg -import testtools - -from glance.common import wsgi - -CONF = cfg.CONF - - -class TestWSGIServer(testtools.TestCase): - """WSGI server tests.""" - def test_client_socket_timeout(self): - CONF.set_default("workers", 0) - CONF.set_default("client_socket_timeout", 1) - """Verify connections are timed out as per 'client_socket_timeout'""" - greetings = b'Hello, World!!!' - - def hello_world(env, start_response): - start_response('200 OK', [('Content-Type', 'text/plain')]) - return [greetings] - - server = wsgi.Server() - server.start(hello_world, 0) - port = server.sock.getsockname()[1] - - def get_request(delay=0.0): - sock = socket.socket() - sock.connect(('127.0.0.1', port)) - time.sleep(delay) - sock.send(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') - return sock.recv(1024) - - # Should succeed - no timeout - self.assertIn(greetings, get_request()) - # Should fail - connection timed out so we get nothing from the server - self.assertFalse(get_request(delay=1.1)) diff --git a/glance/tests/functional/v1/__init__.py b/glance/tests/functional/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/functional/v1/test_api.py b/glance/tests/functional/v1/test_api.py deleted file mode 100644 index b2d4539c..00000000 --- a/glance/tests/functional/v1/test_api.py +++ /dev/null @@ -1,961 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Functional test case that utilizes httplib2 against the API server""" - -import hashlib - -import httplib2 -import sys - -from oslo_serialization import jsonutils -from oslo_utils import units -from six.moves import http_client -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.tests import functional -from glance.tests.utils import minimal_headers -from glance.tests.utils import skip_if_disabled - -FIVE_KB = 5 * units.Ki -FIVE_GB = 5 * units.Gi - - -class TestApi(functional.FunctionalTest): - - """Functional tests using httplib2 against the API server""" - - def _check_image_create(self, headers, status=http_client.CREATED, - image_data="*" * FIVE_KB): - # performs image_create request, checks the response and returns - # content - http = httplib2.Http() - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - response, content = http.request( - path, 'POST', headers=headers, body=image_data) - self.assertEqual(status, response.status) - return content.decode() - - def test_checksum_32_chars_at_image_create(self): - self.cleanup() - self.start_servers(**self.__dict__.copy()) - headers = minimal_headers('Image1') - image_data = b"*" * FIVE_KB - - # checksum can be no longer that 32 characters (String(32)) - headers['X-Image-Meta-Checksum'] = 'x' * 42 - content = self._check_image_create(headers, http_client.BAD_REQUEST) - self.assertIn("Invalid checksum", content) - # test positive case as well - headers['X-Image-Meta-Checksum'] = hashlib.md5(image_data).hexdigest() - self._check_image_create(headers) - - def test_param_int_too_large_at_create(self): - # currently 2 params min_disk/min_ram can cause DBError on save - self.cleanup() - self.start_servers(**self.__dict__.copy()) - # Integer field can't be greater than max 8-byte signed integer - for param in ['min_disk', 'min_ram']: - headers = minimal_headers('Image1') - # check that long numbers result in 400 - headers['X-Image-Meta-%s' % param] = str(sys.maxsize + 1) - content = self._check_image_create(headers, - http_client.BAD_REQUEST) - self.assertIn("'%s' value out of range" % param, content) - # check that integers over 4 byte result in 400 - headers['X-Image-Meta-%s' % param] = str(2 ** 31) - content = self._check_image_create(headers, - http_client.BAD_REQUEST) - self.assertIn("'%s' value out of range" % param, content) - # verify positive case as well - headers['X-Image-Meta-%s' % param] = str((2 ** 31) - 1) - self._check_image_create(headers) - - def test_updating_is_public(self): - """Verify that we can update the is_public attribute.""" - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - # Verify no public images - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('{"images": []}', content.decode()) - - # Verify no public images - path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('{"images": []}', content.decode()) - - # POST /images with private image named Image1 - # attribute and no custom properties. Verify a 200 OK is returned - image_data = b"*" * FIVE_KB - headers = minimal_headers('Image1', public=False) - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("Image1", data['image']['name']) - self.assertFalse(data['image']['is_public']) - - # Retrieve image again to verify it was created appropriately - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - expected_image_headers = { - 'x-image-meta-id': image_id, - 'x-image-meta-name': 'Image1', - 'x-image-meta-is_public': 'False', - 'x-image-meta-status': 'active', - 'x-image-meta-disk_format': 'raw', - 'x-image-meta-container_format': 'ovf', - 'x-image-meta-size': str(FIVE_KB)} - - expected_std_headers = { - 'content-length': str(FIVE_KB), - 'content-type': 'application/octet-stream'} - - for expected_key, expected_value in expected_image_headers.items(): - self.assertEqual(expected_value, response[expected_key], - "For key '%s' expected header value '%s'. " - "Got '%s'" % (expected_key, - expected_value, - response[expected_key])) - - for expected_key, expected_value in expected_std_headers.items(): - self.assertEqual(expected_value, response[expected_key], - "For key '%s' expected header value '%s'. " - "Got '%s'" % (expected_key, - expected_value, - response[expected_key])) - - self.assertEqual(image_data, content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - hashlib.md5(content).hexdigest()) - - # PUT image with custom properties to make public and then - # Verify 200 returned - headers = {'X-Image-Meta-is_public': 'True'} - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.OK, response.status) - image = jsonutils.loads(content) - is_public = image['image']['is_public'] - self.assertTrue( - is_public, - "Expected image to be public but received %s" % is_public) - - # PUT image with custom properties to make private and then - # Verify 200 returned - headers = {'X-Image-Meta-is_public': 'False'} - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.OK, response.status) - image = jsonutils.loads(content) - is_public = image['image']['is_public'] - self.assertFalse( - is_public, - "Expected image to be private but received %s" % is_public) - - @skip_if_disabled - def test_get_head_simple_post(self): - """ - We test the following sequential series of actions: - - 0. GET /images - - Verify no public images - 1. GET /images/detail - - Verify no public images - 2. POST /images with public image named Image1 - and no custom properties - - Verify 201 returned - 3. HEAD image - - Verify HTTP headers have correct information we just added - 4. GET image - - Verify all information on image we just added is correct - 5. GET /images - - Verify the image we just added is returned - 6. GET /images/detail - - Verify the image we just added is returned - 7. PUT image with custom properties of "distro" and "arch" - - Verify 200 returned - 8. PUT image with too many custom properties - - Verify 413 returned - 9. GET image - - Verify updated information about image was stored - 10. PUT image - - Remove a previously existing property. - 11. PUT image - - Add a previously deleted property. - 12. PUT image/members/member1 - - Add member1 to image - 13. PUT image/members/member2 - - Add member2 to image - 14. GET image/members - - List image members - 15. DELETE image/members/member1 - - Delete image member1 - 16. PUT image/members - - Attempt to replace members with an overlimit amount - 17. PUT image/members/member11 - - Attempt to add a member while at limit - 18. POST /images with another public image named Image2 - - attribute and three custom properties, "distro", "arch" & "foo" - - Verify a 200 OK is returned - 19. HEAD image2 - - Verify image2 found now - 20. GET /images - - Verify 2 public images - 21. GET /images with filter on user-defined property "distro". - - Verify both images are returned - 22. GET /images with filter on user-defined property 'distro' but - - with non-existent value. Verify no images are returned - 23. GET /images with filter on non-existent user-defined property - - "boo". Verify no images are returned - 24. GET /images with filter 'arch=i386' - - Verify only image2 is returned - 25. GET /images with filter 'arch=x86_64' - - Verify only image1 is returned - 26. GET /images with filter 'foo=bar' - - Verify only image2 is returned - 27. DELETE image1 - - Delete image - 28. GET image/members - - List deleted image members - 29. PUT image/members/member2 - - Update existing member2 of deleted image - 30. PUT image/members/member3 - - Add member3 to deleted image - 31. DELETE image/members/member2 - - Delete member2 from deleted image - 32. DELETE image2 - - Delete image - 33. GET /images - - Verify no images are listed - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - # 0. GET /images - # Verify no public images - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('{"images": []}', content.decode()) - - # 1. GET /images/detail - # Verify no public images - path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('{"images": []}', content.decode()) - - # 2. POST /images with public image named Image1 - # attribute and no custom properties. Verify a 200 OK is returned - image_data = b"*" * FIVE_KB - headers = minimal_headers('Image1') - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("Image1", data['image']['name']) - self.assertTrue(data['image']['is_public']) - - # 3. HEAD image - # Verify image found now - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual("Image1", response['x-image-meta-name']) - - # 4. GET image - # Verify all information on image we just added is correct - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - expected_image_headers = { - 'x-image-meta-id': image_id, - 'x-image-meta-name': 'Image1', - 'x-image-meta-is_public': 'True', - 'x-image-meta-status': 'active', - 'x-image-meta-disk_format': 'raw', - 'x-image-meta-container_format': 'ovf', - 'x-image-meta-size': str(FIVE_KB)} - - expected_std_headers = { - 'content-length': str(FIVE_KB), - 'content-type': 'application/octet-stream'} - - for expected_key, expected_value in expected_image_headers.items(): - self.assertEqual(expected_value, response[expected_key], - "For key '%s' expected header value '%s'. " - "Got '%s'" % (expected_key, - expected_value, - response[expected_key])) - - for expected_key, expected_value in expected_std_headers.items(): - self.assertEqual(expected_value, response[expected_key], - "For key '%s' expected header value '%s'. " - "Got '%s'" % (expected_key, - expected_value, - response[expected_key])) - - self.assertEqual(image_data, content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - hashlib.md5(content).hexdigest()) - - # 5. GET /images - # Verify one public image - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - expected_result = {"images": [ - {"container_format": "ovf", - "disk_format": "raw", - "id": image_id, - "name": "Image1", - "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", - "size": 5120}]} - self.assertEqual(expected_result, jsonutils.loads(content)) - - # 6. GET /images/detail - # Verify image and all its metadata - path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - expected_image = { - "status": "active", - "name": "Image1", - "deleted": False, - "container_format": "ovf", - "disk_format": "raw", - "id": image_id, - "is_public": True, - "deleted_at": None, - "properties": {}, - "size": 5120} - - image = jsonutils.loads(content) - - for expected_key, expected_value in expected_image.items(): - self.assertEqual(expected_value, image['images'][0][expected_key], - "For key '%s' expected header value '%s'. " - "Got '%s'" % (expected_key, - expected_value, - image['images'][0][expected_key])) - - # 7. PUT image with custom properties of "distro" and "arch" - # Verify 200 returned - headers = {'X-Image-Meta-Property-Distro': 'Ubuntu', - 'X-Image-Meta-Property-Arch': 'x86_64'} - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual("x86_64", data['image']['properties']['arch']) - self.assertEqual("Ubuntu", data['image']['properties']['distro']) - - # 8. PUT image with too many custom properties - # Verify 413 returned - headers = {} - for i in range(11): # configured limit is 10 - headers['X-Image-Meta-Property-foo%d' % i] = 'bar' - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, - response.status) - - # 9. GET /images/detail - # Verify image and all its metadata - path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - expected_image = { - "status": "active", - "name": "Image1", - "deleted": False, - "container_format": "ovf", - "disk_format": "raw", - "id": image_id, - "is_public": True, - "deleted_at": None, - "properties": {'distro': 'Ubuntu', 'arch': 'x86_64'}, - "size": 5120} - - image = jsonutils.loads(content) - - for expected_key, expected_value in expected_image.items(): - self.assertEqual(expected_value, image['images'][0][expected_key], - "For key '%s' expected header value '%s'. " - "Got '%s'" % (expected_key, - expected_value, - image['images'][0][expected_key])) - - # 10. PUT image and remove a previously existing property. - headers = {'X-Image-Meta-Property-Arch': 'x86_64'} - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.OK, response.status) - - path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content)['images'][0] - self.assertEqual(1, len(data['properties'])) - self.assertEqual("x86_64", data['properties']['arch']) - - # 11. PUT image and add a previously deleted property. - headers = {'X-Image-Meta-Property-Distro': 'Ubuntu', - 'X-Image-Meta-Property-Arch': 'x86_64'} - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - - path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content)['images'][0] - self.assertEqual(2, len(data['properties'])) - self.assertEqual("x86_64", data['properties']['arch']) - self.assertEqual("Ubuntu", data['properties']['distro']) - self.assertNotEqual(data['created_at'], data['updated_at']) - - # 12. Add member to image - path = ("http://%s:%d/v1/images/%s/members/pattieblack" % - ("127.0.0.1", self.api_port, image_id)) - http = httplib2.Http() - response, content = http.request(path, 'PUT') - self.assertEqual(http_client.NO_CONTENT, response.status) - - # 13. Add member to image - path = ("http://%s:%d/v1/images/%s/members/pattiewhite" % - ("127.0.0.1", self.api_port, image_id)) - http = httplib2.Http() - response, content = http.request(path, 'PUT') - self.assertEqual(http_client.NO_CONTENT, response.status) - - # 14. List image members - path = ("http://%s:%d/v1/images/%s/members" % - ("127.0.0.1", self.api_port, image_id)) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(2, len(data['members'])) - self.assertEqual('pattieblack', data['members'][0]['member_id']) - self.assertEqual('pattiewhite', data['members'][1]['member_id']) - - # 15. Delete image member - path = ("http://%s:%d/v1/images/%s/members/pattieblack" % - ("127.0.0.1", self.api_port, image_id)) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.NO_CONTENT, response.status) - - # 16. Attempt to replace members with an overlimit amount - # Adding 11 image members should fail since configured limit is 10 - path = ("http://%s:%d/v1/images/%s/members" % - ("127.0.0.1", self.api_port, image_id)) - memberships = [] - for i in range(11): - member_id = "foo%d" % i - memberships.append(dict(member_id=member_id)) - http = httplib2.Http() - body = jsonutils.dumps(dict(memberships=memberships)) - response, content = http.request(path, 'PUT', body=body) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, - response.status) - - # 17. Attempt to add a member while at limit - # Adding an 11th member should fail since configured limit is 10 - path = ("http://%s:%d/v1/images/%s/members" % - ("127.0.0.1", self.api_port, image_id)) - memberships = [] - for i in range(10): - member_id = "foo%d" % i - memberships.append(dict(member_id=member_id)) - http = httplib2.Http() - body = jsonutils.dumps(dict(memberships=memberships)) - response, content = http.request(path, 'PUT', body=body) - self.assertEqual(http_client.NO_CONTENT, response.status) - - path = ("http://%s:%d/v1/images/%s/members/fail_me" % - ("127.0.0.1", self.api_port, image_id)) - http = httplib2.Http() - response, content = http.request(path, 'PUT') - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, - response.status) - - # 18. POST /images with another public image named Image2 - # attribute and three custom properties, "distro", "arch" & "foo". - # Verify a 200 OK is returned - image_data = b"*" * FIVE_KB - headers = minimal_headers('Image2') - headers['X-Image-Meta-Property-Distro'] = 'Ubuntu' - headers['X-Image-Meta-Property-Arch'] = 'i386' - headers['X-Image-Meta-Property-foo'] = 'bar' - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image2_id = data['image']['id'] - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("Image2", data['image']['name']) - self.assertTrue(data['image']['is_public']) - self.assertEqual('Ubuntu', data['image']['properties']['distro']) - self.assertEqual('i386', data['image']['properties']['arch']) - self.assertEqual('bar', data['image']['properties']['foo']) - - # 19. HEAD image2 - # Verify image2 found now - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image2_id) - http = httplib2.Http() - response, content = http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual("Image2", response['x-image-meta-name']) - - # 20. GET /images - # Verify 2 public images - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - images = jsonutils.loads(content)['images'] - self.assertEqual(2, len(images)) - self.assertEqual(image2_id, images[0]['id']) - self.assertEqual(image_id, images[1]['id']) - - # 21. GET /images with filter on user-defined property 'distro'. - # Verify both images are returned - path = "http://%s:%d/v1/images?property-distro=Ubuntu" % ( - "127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - images = jsonutils.loads(content)['images'] - self.assertEqual(2, len(images)) - self.assertEqual(image2_id, images[0]['id']) - self.assertEqual(image_id, images[1]['id']) - - # 22. GET /images with filter on user-defined property 'distro' but - # with non-existent value. Verify no images are returned - path = "http://%s:%d/v1/images?property-distro=fedora" % ( - "127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - images = jsonutils.loads(content)['images'] - self.assertEqual(0, len(images)) - - # 23. GET /images with filter on non-existent user-defined property - # 'boo'. Verify no images are returned - path = "http://%s:%d/v1/images?property-boo=bar" % ("127.0.0.1", - self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - images = jsonutils.loads(content)['images'] - self.assertEqual(0, len(images)) - - # 24. GET /images with filter 'arch=i386' - # Verify only image2 is returned - path = "http://%s:%d/v1/images?property-arch=i386" % ("127.0.0.1", - self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - images = jsonutils.loads(content)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image2_id, images[0]['id']) - - # 25. GET /images with filter 'arch=x86_64' - # Verify only image1 is returned - path = "http://%s:%d/v1/images?property-arch=x86_64" % ("127.0.0.1", - self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - images = jsonutils.loads(content)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image_id, images[0]['id']) - - # 26. GET /images with filter 'foo=bar' - # Verify only image2 is returned - path = "http://%s:%d/v1/images?property-foo=bar" % ("127.0.0.1", - self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - images = jsonutils.loads(content)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image2_id, images[0]['id']) - - # 27. DELETE image1 - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - # 28. Try to list members of deleted image - path = ("http://%s:%d/v1/images/%s/members" % - ("127.0.0.1", self.api_port, image_id)) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.NOT_FOUND, response.status) - - # 29. Try to update member of deleted image - path = ("http://%s:%d/v1/images/%s/members" % - ("127.0.0.1", self.api_port, image_id)) - http = httplib2.Http() - fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}] - body = jsonutils.dumps(dict(memberships=fixture)) - response, content = http.request(path, 'PUT', body=body) - self.assertEqual(http_client.NOT_FOUND, response.status) - - # 30. Try to add member to deleted image - path = ("http://%s:%d/v1/images/%s/members/chickenpattie" % - ("127.0.0.1", self.api_port, image_id)) - http = httplib2.Http() - response, content = http.request(path, 'PUT') - self.assertEqual(http_client.NOT_FOUND, response.status) - - # 31. Try to delete member of deleted image - path = ("http://%s:%d/v1/images/%s/members/pattieblack" % - ("127.0.0.1", self.api_port, image_id)) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.NOT_FOUND, response.status) - - # 32. DELETE image2 - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image2_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - # 33. GET /images - # Verify no images are listed - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - images = jsonutils.loads(content)['images'] - self.assertEqual(0, len(images)) - - # 34. HEAD /images/detail - path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'HEAD') - self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status) - self.assertEqual('GET', response.get('allow')) - - self.stop_servers() - - def test_download_non_exists_image_raises_http_forbidden(self): - """ - We test the following sequential series of actions:: - - 0. POST /images with public image named Image1 - and no custom properties - - Verify 201 returned - 1. HEAD image - - Verify HTTP headers have correct information we just added - 2. GET image - - Verify all information on image we just added is correct - 3. DELETE image1 - - Delete the newly added image - 4. GET image - - Verify that 403 HTTPForbidden exception is raised prior to - 404 HTTPNotFound - - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - image_data = b"*" * FIVE_KB - headers = minimal_headers('Image1') - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("Image1", data['image']['name']) - self.assertTrue(data['image']['is_public']) - - # 1. HEAD image - # Verify image found now - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual("Image1", response['x-image-meta-name']) - - # 2. GET /images - # Verify one public image - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - expected_result = {"images": [ - {"container_format": "ovf", - "disk_format": "raw", - "id": image_id, - "name": "Image1", - "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", - "size": 5120}]} - self.assertEqual(expected_result, jsonutils.loads(content)) - - # 3. DELETE image1 - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - # 4. GET image - # Verify that 403 HTTPForbidden exception is raised prior to - # 404 HTTPNotFound - rules = {"download_image": '!'} - self.set_policy_rules(rules) - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.FORBIDDEN, response.status) - - self.stop_servers() - - def test_download_non_exists_image_raises_http_not_found(self): - """ - We test the following sequential series of actions: - - 0. POST /images with public image named Image1 - and no custom properties - - Verify 201 returned - 1. HEAD image - - Verify HTTP headers have correct information we just added - 2. GET image - - Verify all information on image we just added is correct - 3. DELETE image1 - - Delete the newly added image - 4. GET image - - Verify that 404 HTTPNotFound exception is raised - """ - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - image_data = b"*" * FIVE_KB - headers = minimal_headers('Image1') - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("Image1", data['image']['name']) - self.assertTrue(data['image']['is_public']) - - # 1. HEAD image - # Verify image found now - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual("Image1", response['x-image-meta-name']) - - # 2. GET /images - # Verify one public image - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - expected_result = {"images": [ - {"container_format": "ovf", - "disk_format": "raw", - "id": image_id, - "name": "Image1", - "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", - "size": 5120}]} - self.assertEqual(expected_result, jsonutils.loads(content)) - - # 3. DELETE image1 - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - # 4. GET image - # Verify that 404 HTTPNotFound exception is raised - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.NOT_FOUND, response.status) - - self.stop_servers() - - def test_status_cannot_be_manipulated_directly(self): - self.cleanup() - self.start_servers(**self.__dict__.copy()) - headers = minimal_headers('Image1') - - # Create a 'queued' image - http = httplib2.Http() - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Disk-Format': 'raw', - 'X-Image-Meta-Container-Format': 'bare'} - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - response, content = http.request(path, 'POST', headers=headers, - body=None) - self.assertEqual(http_client.CREATED, response.status) - image = jsonutils.loads(content)['image'] - self.assertEqual('queued', image['status']) - - # Ensure status of 'queued' image can't be changed - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - image['id']) - http = httplib2.Http() - headers = {'X-Image-Meta-Status': 'active'} - response, content = http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.FORBIDDEN, response.status) - response, content = http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('queued', response['x-image-meta-status']) - - # We allow 'setting' to the same status - http = httplib2.Http() - headers = {'X-Image-Meta-Status': 'queued'} - response, content = http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.OK, response.status) - response, content = http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('queued', response['x-image-meta-status']) - - # Make image active - http = httplib2.Http() - headers = {'Content-Type': 'application/octet-stream'} - response, content = http.request(path, 'PUT', headers=headers, - body='data') - self.assertEqual(http_client.OK, response.status) - image = jsonutils.loads(content)['image'] - self.assertEqual('active', image['status']) - - # Ensure status of 'active' image can't be changed - http = httplib2.Http() - headers = {'X-Image-Meta-Status': 'queued'} - response, content = http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.FORBIDDEN, response.status) - response, content = http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('active', response['x-image-meta-status']) - - # We allow 'setting' to the same status - http = httplib2.Http() - headers = {'X-Image-Meta-Status': 'active'} - response, content = http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.OK, response.status) - response, content = http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('active', response['x-image-meta-status']) - - # Create a 'queued' image, ensure 'status' header is ignored - http = httplib2.Http() - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Status': 'active'} - response, content = http.request(path, 'POST', headers=headers, - body=None) - self.assertEqual(http_client.CREATED, response.status) - image = jsonutils.loads(content)['image'] - self.assertEqual('queued', image['status']) - - # Create an 'active' image, ensure 'status' header is ignored - http = httplib2.Http() - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Disk-Format': 'raw', - 'X-Image-Meta-Status': 'queued', - 'X-Image-Meta-Container-Format': 'bare'} - response, content = http.request(path, 'POST', headers=headers, - body='data') - self.assertEqual(http_client.CREATED, response.status) - image = jsonutils.loads(content)['image'] - self.assertEqual('active', image['status']) - self.stop_servers() diff --git a/glance/tests/functional/v1/test_copy_to_file.py b/glance/tests/functional/v1/test_copy_to_file.py deleted file mode 100644 index 3740c3b1..00000000 --- a/glance/tests/functional/v1/test_copy_to_file.py +++ /dev/null @@ -1,300 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2012 Red Hat, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests copying images to a Glance API server which uses a filesystem- -based storage backend. -""" - -import hashlib -import tempfile -import time - -import httplib2 -from oslo_serialization import jsonutils -from oslo_utils import units -from six.moves import http_client -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.tests import functional -from glance.tests.functional.store_utils import get_http_uri -from glance.tests.functional.store_utils import setup_http -from glance.tests.utils import skip_if_disabled - -FIVE_KB = 5 * units.Ki - - -class TestCopyToFile(functional.FunctionalTest): - - """ - Functional tests for copying images from the HTTP storage - backend to file - """ - - def _do_test_copy_from(self, from_store, get_uri): - """ - Ensure we can copy from an external image in from_store. - """ - self.cleanup() - - self.start_servers(**self.__dict__.copy()) - setup_http(self) - - # POST /images with public image to be stored in from_store, - # to stand in for the 'external' image - image_data = b"*" * FIVE_KB - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': 'external', - 'X-Image-Meta-Store': from_store, - 'X-Image-Meta-disk_format': 'raw', - 'X-Image-Meta-container_format': 'ovf', - 'X-Image-Meta-Is-Public': 'True'} - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status, content) - data = jsonutils.loads(content) - - original_image_id = data['image']['id'] - - copy_from = get_uri(self, original_image_id) - - # POST /images with public image copied from_store (to file) - headers = {'X-Image-Meta-Name': 'copied', - 'X-Image-Meta-disk_format': 'raw', - 'X-Image-Meta-container_format': 'ovf', - 'X-Image-Meta-Is-Public': 'True', - 'X-Glance-API-Copy-From': copy_from} - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status, content) - data = jsonutils.loads(content) - - copy_image_id = data['image']['id'] - self.assertNotEqual(copy_image_id, original_image_id) - - # GET image and make sure image content is as expected - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - copy_image_id) - - def _await_status(expected_status): - for i in range(100): - time.sleep(0.01) - http = httplib2.Http() - response, content = http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - if response['x-image-meta-status'] == expected_status: - return - self.fail('unexpected image status %s' % - response['x-image-meta-status']) - _await_status('active') - - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual(str(FIVE_KB), response['content-length']) - - self.assertEqual(image_data, content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - hashlib.md5(content).hexdigest()) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("copied", data['image']['name']) - - # DELETE original image - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - original_image_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - # GET image again to make sure the existence of the original - # image in from_store is not depended on - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - copy_image_id) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual(str(FIVE_KB), response['content-length']) - - self.assertEqual(image_data, content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - hashlib.md5(content).hexdigest()) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("copied", data['image']['name']) - - # DELETE copied image - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - copy_image_id) - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - self.stop_servers() - - @skip_if_disabled - def test_copy_from_http_store(self): - """ - Ensure we can copy from an external image in HTTP store. - """ - self._do_test_copy_from('file', get_http_uri) - - @skip_if_disabled - def test_copy_from_http_exists(self): - """Ensure we can copy from an external image in HTTP.""" - self.cleanup() - - self.start_servers(**self.__dict__.copy()) - - setup_http(self) - - copy_from = get_http_uri(self, 'foobar') - - # POST /images with public image copied from HTTP (to file) - headers = {'X-Image-Meta-Name': 'copied', - 'X-Image-Meta-disk_format': 'raw', - 'X-Image-Meta-container_format': 'ovf', - 'X-Image-Meta-Is-Public': 'True', - 'X-Glance-API-Copy-From': copy_from} - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status, content) - data = jsonutils.loads(content) - - copy_image_id = data['image']['id'] - self.assertEqual('queued', data['image']['status'], content) - - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - copy_image_id) - - def _await_status(expected_status): - for i in range(100): - time.sleep(0.01) - http = httplib2.Http() - response, content = http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - if response['x-image-meta-status'] == expected_status: - return - self.fail('unexpected image status %s' % - response['x-image-meta-status']) - - _await_status('active') - - # GET image and make sure image content is as expected - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - self.assertEqual(str(FIVE_KB), response['content-length']) - self.assertEqual(b"*" * FIVE_KB, content) - self.assertEqual(hashlib.md5(b"*" * FIVE_KB).hexdigest(), - hashlib.md5(content).hexdigest()) - - # DELETE copied image - http = httplib2.Http() - response, content = http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - self.stop_servers() - - @skip_if_disabled - def test_copy_from_http_nonexistent_location_url(self): - # Ensure HTTP 404 response returned when try to create - # image with non-existent http location URL. - self.cleanup() - - self.start_servers(**self.__dict__.copy()) - - setup_http(self) - - uri = get_http_uri(self, 'foobar') - copy_from = uri.replace('images', 'snafu') - - # POST /images with public image copied from HTTP (to file) - headers = {'X-Image-Meta-Name': 'copied', - 'X-Image-Meta-disk_format': 'raw', - 'X-Image-Meta-container_format': 'ovf', - 'X-Image-Meta-Is-Public': 'True', - 'X-Glance-API-Copy-From': copy_from} - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.NOT_FOUND, response.status, content) - - expected = 'HTTP datastore could not find image at URI.' - self.assertIn(expected, content.decode()) - - self.stop_servers() - - @skip_if_disabled - def test_copy_from_file(self): - """ - Ensure we can't copy from file - """ - self.cleanup() - - self.start_servers(**self.__dict__.copy()) - - with tempfile.NamedTemporaryFile() as image_file: - image_file.write(b"XXX") - image_file.flush() - copy_from = 'file://' + image_file.name - - # POST /images with public image copied from file (to file) - headers = {'X-Image-Meta-Name': 'copied', - 'X-Image-Meta-disk_format': 'raw', - 'X-Image-Meta-container_format': 'ovf', - 'X-Image-Meta-Is-Public': 'True', - 'X-Glance-API-Copy-From': copy_from} - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.BAD_REQUEST, response.status, content) - - expected = 'External sources are not supported: \'%s\'' % copy_from - msg = 'expected "%s" in "%s"' % (expected, content) - self.assertIn(expected, content.decode(), msg) - - self.stop_servers() - - @skip_if_disabled - def test_copy_from_swift_config(self): - """ - Ensure we can't copy from swift+config - """ - self.cleanup() - - self.start_servers(**self.__dict__.copy()) - - # POST /images with public image copied from file (to file) - headers = {'X-Image-Meta-Name': 'copied', - 'X-Image-Meta-disk_format': 'raw', - 'X-Image-Meta-container_format': 'ovf', - 'X-Image-Meta-Is-Public': 'True', - 'X-Glance-API-Copy-From': 'swift+config://xxx'} - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.BAD_REQUEST, response.status, content) - - expected = 'External sources are not supported: \'swift+config://xxx\'' - msg = 'expected "%s" in "%s"' % (expected, content) - self.assertIn(expected, content.decode(), msg) - - self.stop_servers() diff --git a/glance/tests/functional/v1/test_misc.py b/glance/tests/functional/v1/test_misc.py deleted file mode 100644 index 3d99cfa6..00000000 --- a/glance/tests/functional/v1/test_misc.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import hashlib -import os - -import httplib2 -from oslo_serialization import jsonutils -from oslo_utils import units -from six.moves import http_client - -from glance.tests import functional -from glance.tests.utils import minimal_headers - -FIVE_KB = 5 * units.Ki -FIVE_GB = 5 * units.Gi - - -class TestMiscellaneous(functional.FunctionalTest): - - """Some random tests for various bugs and stuff""" - - def setUp(self): - super(TestMiscellaneous, self).setUp() - - # NOTE(sirp): This is needed in case we are running the tests under an - # environment in which OS_AUTH_STRATEGY=keystone. The test server we - # spin up won't have keystone support, so we need to switch to the - # NoAuth strategy. - os.environ['OS_AUTH_STRATEGY'] = 'noauth' - os.environ['OS_AUTH_URL'] = '' - - def test_api_response_when_image_deleted_from_filesystem(self): - """ - A test for LP bug #781410 -- glance should fail more gracefully - on requests for images that have been removed from the fs - """ - - self.cleanup() - self.start_servers() - - # 1. POST /images with public image named Image1 - # attribute and no custom properties. Verify a 200 OK is returned - image_data = b"*" * FIVE_KB - headers = minimal_headers('Image1') - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("Image1", data['image']['name']) - self.assertTrue(data['image']['is_public']) - - # 2. REMOVE the image from the filesystem - image_path = "%s/images/%s" % (self.test_dir, data['image']['id']) - os.remove(image_path) - - # 3. HEAD /images/1 - # Verify image found now - path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, - data['image']['id']) - http = httplib2.Http() - response, content = http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual("Image1", response['x-image-meta-name']) - - # 4. GET /images/1 - # Verify the api throws the appropriate 404 error - path = "http://%s:%d/v1/images/1" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.NOT_FOUND, response.status) - - self.stop_servers() - - def test_exception_not_eaten_from_registry_to_api(self): - """ - A test for LP bug #704854 -- Exception thrown by registry - server is consumed by API server. - - We start both servers daemonized. - - We then use Glance API to try adding an image that does not - meet validation requirements on the registry server and test - that the error returned from the API server is appropriate - """ - self.cleanup() - self.start_servers() - - api_port = self.api_port - path = 'http://127.0.0.1:%d/v1/images' % api_port - - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual(b'{"images": []}', content) - - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': 'ImageName', - 'X-Image-Meta-Disk-Format': 'Invalid', } - ignored, content = http.request(path, 'POST', headers=headers) - - self.assertIn(b'Invalid disk format', content, - "Could not find 'Invalid disk format' " - "in output: %s" % content) - - self.stop_servers() diff --git a/glance/tests/functional/v1/test_multiprocessing.py b/glance/tests/functional/v1/test_multiprocessing.py deleted file mode 100644 index 8c7c0685..00000000 --- a/glance/tests/functional/v1/test_multiprocessing.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -import httplib2 -import psutil -from six.moves import http_client -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.tests import functional -from glance.tests.utils import execute - - -class TestMultiprocessing(functional.FunctionalTest): - """Functional tests for the bin/glance CLI tool""" - - def setUp(self): - self.workers = 2 - super(TestMultiprocessing, self).setUp() - - def test_multiprocessing(self): - """Spin up the api servers with multiprocessing on""" - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) - http = httplib2.Http() - response, content = http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual(b'{"images": []}', content) - self.stop_servers() - - def _get_children(self): - api_pid = self.api_server.process_pid - process = psutil.Process(api_pid) - try: - # psutils version >= 2 - children = process.children() - except AttributeError: - # psutils version < 2 - children = process.get_children() - pids = [str(child.pid) for child in children] - return pids - - def test_interrupt_avoids_respawn_storm(self): - """ - Ensure an interrupt signal does not cause a respawn storm. - See bug #978130 - """ - self.start_servers(**self.__dict__.copy()) - - children = self._get_children() - cmd = "kill -INT %s" % ' '.join(children) - execute(cmd, raise_error=True) - - for _ in range(9): - # Yeah. This totally isn't a race condition. Randomly fails - # set at 0.05. Works most of the time at 0.10 - time.sleep(0.10) - # ensure number of children hasn't grown - self.assertGreaterEqual(len(children), len(self._get_children())) - for child in self._get_children(): - # ensure no new children spawned - self.assertIn(child, children, child) - - self.stop_servers() diff --git a/glance/tests/functional/v2/__init__.py b/glance/tests/functional/v2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/functional/v2/registry_data_api.py b/glance/tests/functional/v2/registry_data_api.py deleted file mode 100644 index 0ba9ef68..00000000 --- a/glance/tests/functional/v2/registry_data_api.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.db.registry.api import * # noqa -from glance.common.rpc import RPCClient -from glance.registry.client.v2 import api -from glance.registry.client.v2 import client - - -def patched_bulk_request(self, commands): - # We add some auth headers which are typically - # added by keystone. This is required when testing - # without keystone, otherwise the tests fail. - # We use the 'trusted-auth' deployment flavour - # for testing so that these headers are interpreted - # as expected (ie the same way as if keystone was - # present) - body = self._serializer.to_json(commands) - headers = {"X-Identity-Status": "Confirmed", 'X-Roles': 'member'} - if self.context.user is not None: - headers['X-User-Id'] = self.context.user - if self.context.tenant is not None: - headers['X-Tenant-Id'] = self.context.tenant - response = super(RPCClient, self).do_request('POST', - self.base_path, - body, - headers=headers) - return self._deserializer.from_json(response.read()) - - -def client_wrapper(func): - def call(context): - reg_client = func(context) - reg_client.context = context - return reg_client - return call - -client.RegistryClient.bulk_request = patched_bulk_request - -api.get_registry_client = client_wrapper(api.get_registry_client) diff --git a/glance/tests/functional/v2/test_images.py b/glance/tests/functional/v2/test_images.py deleted file mode 100644 index cca59e89..00000000 --- a/glance/tests/functional/v2/test_images.py +++ /dev/null @@ -1,3920 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import signal -import uuid - -from oslo_serialization import jsonutils -import requests -import six -from six.moves import http_client as http -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range -from six.moves import urllib - -from glance.tests import functional -from glance.tests import utils as test_utils - - -TENANT1 = str(uuid.uuid4()) -TENANT2 = str(uuid.uuid4()) -TENANT3 = str(uuid.uuid4()) -TENANT4 = str(uuid.uuid4()) - - -class TestImages(functional.FunctionalTest): - - def setUp(self): - super(TestImages, self).setUp() - self.cleanup() - self.include_scrubber = False - self.api_server.deployment_flavor = 'noauth' - self.api_server.data_api = 'glance.db.sqlalchemy.api' - for i in range(3): - ret = test_utils.start_http_server("foo_image_id%d" % i, - "foo_image%d" % i) - setattr(self, 'http_server%d_pid' % i, ret[0]) - setattr(self, 'http_port%d' % i, ret[1]) - - def tearDown(self): - for i in range(3): - pid = getattr(self, 'http_server%d_pid' % i, None) - if pid: - os.kill(pid, signal.SIGKILL) - - super(TestImages, self).tearDown() - - def _url(self, path): - return 'http://127.0.0.1:%d%s' % (self.api_port, path) - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': TENANT1, - 'X-Roles': 'member', - } - base_headers.update(custom_headers or {}) - return base_headers - - def test_v1_none_properties_v2(self): - self.api_server.deployment_flavor = 'noauth' - self.api_server.use_user_token = True - self.api_server.send_identity_credentials = True - self.registry_server.deployment_flavor = '' - # Image list should be empty - self.start_servers(**self.__dict__.copy()) - - # Create an image (with two deployer-defined properties) - path = self._url('/v1/images') - headers = self._headers({'content-type': 'application/octet-stream'}) - headers.update(test_utils.minimal_headers('image-1')) - # NOTE(flaper87): Sending empty string, the server will use None - headers['x-image-meta-property-my_empty_prop'] = '' - - response = requests.post(path, headers=headers) - self.assertEqual(http.CREATED, response.status_code) - data = jsonutils.loads(response.text) - image_id = data['image']['id'] - - # NOTE(flaper87): Get the image using V2 and verify - # the returned value for `my_empty_prop` is an empty - # string. - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertEqual('', image['my_empty_prop']) - self.stop_servers() - - def test_not_authenticated_in_registry_on_ops(self): - # https://bugs.launchpad.net/glance/+bug/1451850 - # this configuration guarantees that authentication succeeds in - # glance-api and fails in glance-registry if no token is passed - self.api_server.deployment_flavor = '' - # make sure that request will reach registry - self.api_server.data_api = 'glance.db.registry.api' - self.registry_server.deployment_flavor = 'fakeauth' - self.start_servers(**self.__dict__.copy()) - headers = {'content-type': 'application/json'} - image = {'name': 'image', 'type': 'kernel', 'disk_format': 'qcow2', - 'container_format': 'bare'} - # image create should return 401 - response = requests.post(self._url('/v2/images'), headers=headers, - data=jsonutils.dumps(image)) - self.assertEqual(http.UNAUTHORIZED, response.status_code) - # image list should return 401 - response = requests.get(self._url('/v2/images')) - self.assertEqual(http.UNAUTHORIZED, response.status_code) - # image show should return 401 - response = requests.get(self._url('/v2/images/someimageid')) - self.assertEqual(http.UNAUTHORIZED, response.status_code) - # image update should return 401 - ops = [{'op': 'replace', 'path': '/protected', 'value': False}] - media_type = 'application/openstack-images-v2.1-json-patch' - response = requests.patch(self._url('/v2/images/someimageid'), - headers={'content-type': media_type}, - data=jsonutils.dumps(ops)) - self.assertEqual(http.UNAUTHORIZED, response.status_code) - # image delete should return 401 - response = requests.delete(self._url('/v2/images/someimageid')) - self.assertEqual(http.UNAUTHORIZED, response.status_code) - self.stop_servers() - - def test_image_lifecycle(self): - # Image list should be empty - self.api_server.show_multiple_locations = True - self.start_servers(**self.__dict__.copy()) - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Create an image (with two deployer-defined properties) - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', - 'foo': 'bar', 'disk_format': 'aki', - 'container_format': 'aki', 'abc': 'xyz', - 'protected': True}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image_location_header = response.headers['Location'] - - # Returned image entity should have a generated id and status - image = jsonutils.loads(response.text) - image_id = image['id'] - checked_keys = set([ - u'status', - u'name', - u'tags', - u'created_at', - u'updated_at', - u'visibility', - u'self', - u'protected', - u'id', - u'file', - u'min_disk', - u'foo', - u'abc', - u'type', - u'min_ram', - u'schema', - u'disk_format', - u'container_format', - u'owner', - u'checksum', - u'size', - u'virtual_size', - u'locations', - ]) - self.assertEqual(checked_keys, set(image.keys())) - expected_image = { - 'status': 'queued', - 'name': 'image-1', - 'tags': [], - 'visibility': 'shared', - 'self': '/v2/images/%s' % image_id, - 'protected': True, - 'file': '/v2/images/%s/file' % image_id, - 'min_disk': 0, - 'foo': 'bar', - 'abc': 'xyz', - 'type': 'kernel', - 'min_ram': 0, - 'schema': '/v2/schemas/image', - } - for key, value in expected_image.items(): - self.assertEqual(value, image[key], key) - - # Image list should now have one entry - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image_id, images[0]['id']) - - # Create another image (with two deployer-defined properties) - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-2', 'type': 'kernel', - 'bar': 'foo', 'disk_format': 'aki', - 'container_format': 'aki', 'xyz': 'abc'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Returned image entity should have a generated id and status - image = jsonutils.loads(response.text) - image2_id = image['id'] - checked_keys = set([ - u'status', - u'name', - u'tags', - u'created_at', - u'updated_at', - u'visibility', - u'self', - u'protected', - u'id', - u'file', - u'min_disk', - u'bar', - u'xyz', - u'type', - u'min_ram', - u'schema', - u'disk_format', - u'container_format', - u'owner', - u'checksum', - u'size', - u'virtual_size', - u'locations', - ]) - self.assertEqual(checked_keys, set(image.keys())) - expected_image = { - 'status': 'queued', - 'name': 'image-2', - 'tags': [], - 'visibility': 'shared', - 'self': '/v2/images/%s' % image2_id, - 'protected': False, - 'file': '/v2/images/%s/file' % image2_id, - 'min_disk': 0, - 'bar': 'foo', - 'xyz': 'abc', - 'type': 'kernel', - 'min_ram': 0, - 'schema': '/v2/schemas/image', - } - for key, value in expected_image.items(): - self.assertEqual(value, image[key], key) - - # Image list should now have two entries - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(2, len(images)) - self.assertEqual(image2_id, images[0]['id']) - self.assertEqual(image_id, images[1]['id']) - - # Image list should list only image-2 as image-1 doesn't contain the - # property 'bar' - path = self._url('/v2/images?bar=foo') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image2_id, images[0]['id']) - - # Image list should list only image-1 as image-2 doesn't contain the - # property 'foo' - path = self._url('/v2/images?foo=bar') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image_id, images[0]['id']) - - # The "changes-since" filter shouldn't work on glance v2 - path = self._url('/v2/images?changes-since=20001007T10:10:10') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - path = self._url('/v2/images?changes-since=aaa') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # Image list should list only image-1 based on the filter - # 'protected=true' - path = self._url('/v2/images?protected=true') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image_id, images[0]['id']) - - # Image list should list only image-2 based on the filter - # 'protected=false' - path = self._url('/v2/images?protected=false') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image2_id, images[0]['id']) - - # Image list should return 400 based on the filter - # 'protected=False' - path = self._url('/v2/images?protected=False') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # Image list should list only image-1 based on the filter - # 'foo=bar&abc=xyz' - path = self._url('/v2/images?foo=bar&abc=xyz') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image_id, images[0]['id']) - - # Image list should list only image-2 based on the filter - # 'bar=foo&xyz=abc' - path = self._url('/v2/images?bar=foo&xyz=abc') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image2_id, images[0]['id']) - - # Image list should not list anything as the filter 'foo=baz&abc=xyz' - # is not satisfied by either images - path = self._url('/v2/images?foo=baz&abc=xyz') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Get the image using the returned Location header - response = requests.get(image_location_header, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertEqual(image_id, image['id']) - self.assertIsNone(image['checksum']) - self.assertIsNone(image['size']) - self.assertIsNone(image['virtual_size']) - self.assertEqual('bar', image['foo']) - self.assertTrue(image['protected']) - self.assertEqual('kernel', image['type']) - self.assertTrue(image['created_at']) - self.assertTrue(image['updated_at']) - self.assertEqual(image['updated_at'], image['created_at']) - - # The URI file:// should return a 400 rather than a 500 - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - url = ('file://') - changes = [{ - 'op': 'add', - 'path': '/locations/-', - 'value': { - 'url': url, - 'metadata': {} - } - }] - - data = jsonutils.dumps(changes) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) - - # The image should be mutable, including adding and removing properties - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - data = jsonutils.dumps([ - {'op': 'replace', 'path': '/name', 'value': 'image-2'}, - {'op': 'replace', 'path': '/disk_format', 'value': 'vhd'}, - {'op': 'replace', 'path': '/container_format', 'value': 'ami'}, - {'op': 'replace', 'path': '/foo', 'value': 'baz'}, - {'op': 'add', 'path': '/ping', 'value': 'pong'}, - {'op': 'replace', 'path': '/protected', 'value': True}, - {'op': 'remove', 'path': '/type'}, - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Returned image entity should reflect the changes - image = jsonutils.loads(response.text) - self.assertEqual('image-2', image['name']) - self.assertEqual('vhd', image['disk_format']) - self.assertEqual('baz', image['foo']) - self.assertEqual('pong', image['ping']) - self.assertTrue(image['protected']) - self.assertNotIn('type', image, response.text) - - # Adding 11 image properties should fail since configured limit is 10 - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - changes = [] - for i in range(11): - changes.append({'op': 'add', - 'path': '/ping%i' % i, - 'value': 'pong'}) - - data = jsonutils.dumps(changes) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code, - response.text) - - # Adding 3 image locations should fail since configured limit is 2 - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - changes = [] - for i in range(3): - url = ('http://127.0.0.1:%s/foo_image' % - getattr(self, 'http_port%d' % i)) - changes.append({'op': 'add', 'path': '/locations/-', - 'value': {'url': url, 'metadata': {}}, - }) - - data = jsonutils.dumps(changes) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code, - response.text) - - # Ensure the v2.0 json-patch content type is accepted - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.0-json-patch' - headers = self._headers({'content-type': media_type}) - data = jsonutils.dumps([{'add': '/ding', 'value': 'dong'}]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Returned image entity should reflect the changes - image = jsonutils.loads(response.text) - self.assertEqual('dong', image['ding']) - - # Updates should persist across requests - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertEqual(image_id, image['id']) - self.assertEqual('image-2', image['name']) - self.assertEqual('baz', image['foo']) - self.assertEqual('pong', image['ping']) - self.assertTrue(image['protected']) - self.assertNotIn('type', image, response.text) - - # Try to download data before its uploaded - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers() - response = requests.get(path, headers=headers) - self.assertEqual(http.NO_CONTENT, response.status_code) - - def _verify_image_checksum_and_status(checksum, status): - # Checksum should be populated and status should be active - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertEqual(checksum, image['checksum']) - self.assertEqual(status, image['status']) - - # Upload some image data - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.put(path, headers=headers, data='ZZZZZ') - self.assertEqual(http.NO_CONTENT, response.status_code) - - expected_checksum = '8f113e38d28a79a5a451b16048cc2b72' - _verify_image_checksum_and_status(expected_checksum, 'active') - - # `disk_format` and `container_format` cannot - # be replaced when the image is active. - immutable_paths = ['/disk_format', '/container_format'] - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - path = self._url('/v2/images/%s' % image_id) - for immutable_path in immutable_paths: - data = jsonutils.dumps([ - {'op': 'replace', 'path': immutable_path, 'value': 'ari'}, - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Try to download the data that was just uploaded - path = self._url('/v2/images/%s/file' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - self.assertEqual(expected_checksum, response.headers['Content-MD5']) - self.assertEqual('ZZZZZ', response.text) - - # Uploading duplicate data should be rejected with a 409. The - # original data should remain untouched. - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.put(path, headers=headers, data='XXX') - self.assertEqual(http.CONFLICT, response.status_code) - _verify_image_checksum_and_status(expected_checksum, 'active') - - # Ensure the size is updated to reflect the data uploaded - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - self.assertEqual(5, jsonutils.loads(response.text)['size']) - - # Should be able to deactivate image - path = self._url('/v2/images/%s/actions/deactivate' % image_id) - response = requests.post(path, data={}, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Change the image to public so TENANT2 can see it - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.0-json-patch' - headers = self._headers({'content-type': media_type}) - data = jsonutils.dumps([{"replace": "/visibility", "value": "public"}]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Tenant2 should get Forbidden when deactivating the public image - path = self._url('/v2/images/%s/actions/deactivate' % image_id) - response = requests.post(path, data={}, headers=self._headers( - {'X-Tenant-Id': TENANT2})) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Tenant2 should get Forbidden when reactivating the public image - path = self._url('/v2/images/%s/actions/reactivate' % image_id) - response = requests.post(path, data={}, headers=self._headers( - {'X-Tenant-Id': TENANT2})) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Deactivating a deactivated image succeeds (no-op) - path = self._url('/v2/images/%s/actions/deactivate' % image_id) - response = requests.post(path, data={}, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Can't download a deactivated image - path = self._url('/v2/images/%s/file' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Deactivated image should still be in a listing - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(2, len(images)) - self.assertEqual(image2_id, images[0]['id']) - self.assertEqual(image_id, images[1]['id']) - - # Should be able to reactivate a deactivated image - path = self._url('/v2/images/%s/actions/reactivate' % image_id) - response = requests.post(path, data={}, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Reactivating an active image succeeds (no-op) - path = self._url('/v2/images/%s/actions/reactivate' % image_id) - response = requests.post(path, data={}, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Deletion should not work on protected images - path = self._url('/v2/images/%s' % image_id) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Unprotect image for deletion - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - doc = [{'op': 'replace', 'path': '/protected', 'value': False}] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Deletion should work. Deleting image-1 - path = self._url('/v2/images/%s' % image_id) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # This image should be no longer be directly accessible - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # And neither should its data - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers() - response = requests.get(path, headers=headers) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Image list should now contain just image-2 - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image2_id, images[0]['id']) - - # Deleting image-2 should work - path = self._url('/v2/images/%s' % image2_id) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Image list should now be empty - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Create image that tries to send True should return 400 - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = 'true' - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # Create image that tries to send a string should return 400 - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = '"hello"' - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # Create image that tries to send 123 should return 400 - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = '123' - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - self.stop_servers() - - def test_update_readonly_prop(self): - self.start_servers(**self.__dict__.copy()) - # Create an image (with two deployer-defined properties) - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-1'}) - response = requests.post(path, headers=headers, data=data) - - image = jsonutils.loads(response.text) - image_id = image['id'] - - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - - props = ['/id', '/file', '/location', '/schema', '/self'] - - for prop in props: - doc = [{'op': 'replace', - 'path': prop, - 'value': 'value1'}] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - - for prop in props: - doc = [{'op': 'remove', - 'path': prop, - 'value': 'value1'}] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - - for prop in props: - doc = [{'op': 'add', - 'path': prop, - 'value': 'value1'}] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - - self.stop_servers() - - def test_methods_that_dont_accept_illegal_bodies(self): - # Check images can be reached - self.start_servers(**self.__dict__.copy()) - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - - # Test all the schemas - schema_urls = [ - '/v2/schemas/images', - '/v2/schemas/image', - '/v2/schemas/members', - '/v2/schemas/member', - ] - for value in schema_urls: - path = self._url(value) - data = jsonutils.dumps(["body"]) - response = requests.get(path, headers=self._headers(), data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # Create image for use with tests - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - - test_urls = [ - ('/v2/images/%s', 'get'), - ('/v2/images/%s/actions/deactivate', 'post'), - ('/v2/images/%s/actions/reactivate', 'post'), - ('/v2/images/%s/tags/mytag', 'put'), - ('/v2/images/%s/tags/mytag', 'delete'), - ('/v2/images/%s/members', 'get'), - ('/v2/images/%s/file', 'get'), - ('/v2/images/%s', 'delete'), - ] - - for link, method in test_urls: - path = self._url(link % image_id) - data = jsonutils.dumps(["body"]) - response = getattr(requests, method)( - path, headers=self._headers(), data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # DELETE /images/imgid without legal json - path = self._url('/v2/images/%s' % image_id) - data = '{"hello"]' - response = requests.delete(path, headers=self._headers(), data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # POST /images/imgid/members - path = self._url('/v2/images/%s/members' % image_id) - data = jsonutils.dumps({'member': TENANT3}) - response = requests.post(path, headers=self._headers(), data=data) - self.assertEqual(http.OK, response.status_code) - - # GET /images/imgid/members/memid - path = self._url('/v2/images/%s/members/%s' % (image_id, TENANT3)) - data = jsonutils.dumps(["body"]) - response = requests.get(path, headers=self._headers(), data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # DELETE /images/imgid/members/memid - path = self._url('/v2/images/%s/members/%s' % (image_id, TENANT3)) - data = jsonutils.dumps(["body"]) - response = requests.delete(path, headers=self._headers(), data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - self.stop_servers() - - def test_download_random_access_w_range_request(self): - """ - Test partial download 'Range' requests for images (random image access) - """ - self.start_servers(**self.__dict__.copy()) - # Create an image (with two deployer-defined properties) - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-2', 'type': 'kernel', - 'bar': 'foo', 'disk_format': 'aki', - 'container_format': 'aki', 'xyz': 'abc'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - - # Upload data to image - image_data = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.put(path, headers=headers, data=image_data) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # test for success on satisfiable Range request. - range_ = 'bytes=3-10' - headers = self._headers({'Range': range_}) - path = self._url('/v2/images/%s/file' % image_id) - response = requests.get(path, headers=headers) - self.assertEqual(http.PARTIAL_CONTENT, response.status_code) - self.assertEqual('DEFGHIJK', response.text) - - # test for failure on unsatisfiable Range request. - range_ = 'bytes=10-5' - headers = self._headers({'Range': range_}) - path = self._url('/v2/images/%s/file' % image_id) - response = requests.get(path, headers=headers) - self.assertEqual(http.REQUESTED_RANGE_NOT_SATISFIABLE, - response.status_code) - - self.stop_servers() - - def test_download_random_access_w_content_range(self): - """ - Even though Content-Range is incorrect on requests, we support it - for backward compatibility with clients written for pre-Pike Glance. - The following test is for 'Content-Range' requests, which we have - to ensure that we prevent regression. - """ - self.start_servers(**self.__dict__.copy()) - # Create another image (with two deployer-defined properties) - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-2', 'type': 'kernel', - 'bar': 'foo', 'disk_format': 'aki', - 'container_format': 'aki', 'xyz': 'abc'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - - # Upload data to image - image_data = 'Z' * 15 - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.put(path, headers=headers, data=image_data) - self.assertEqual(http.NO_CONTENT, response.status_code) - - result_body = '' - for x in range(15): - # NOTE(flaper87): Read just 1 byte. Content-Range is - # 0-indexed and it specifies the first byte to read - # and the last byte to read. - content_range = 'bytes %s-%s/15' % (x, x) - headers = self._headers({'Content-Range': content_range}) - path = self._url('/v2/images/%s/file' % image_id) - response = requests.get(path, headers=headers) - self.assertEqual(http.PARTIAL_CONTENT, response.status_code) - result_body += response.text - - self.assertEqual(result_body, image_data) - - # test for failure on unsatisfiable request for ContentRange. - content_range = 'bytes 3-16/15' - headers = self._headers({'Content-Range': content_range}) - path = self._url('/v2/images/%s/file' % image_id) - response = requests.get(path, headers=headers) - self.assertEqual(http.REQUESTED_RANGE_NOT_SATISFIABLE, - response.status_code) - - self.stop_servers() - - def test_download_policy_when_cache_is_not_enabled(self): - - rules = {'context_is_admin': 'role:admin', - 'default': '', - 'add_image': '', - 'get_image': '', - 'modify_image': '', - 'upload_image': '', - 'delete_image': '', - 'download_image': '!'} - self.set_policy_rules(rules) - self.start_servers(**self.__dict__.copy()) - - # Create an image - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'member'}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Returned image entity - image = jsonutils.loads(response.text) - image_id = image['id'] - expected_image = { - 'status': 'queued', - 'name': 'image-1', - 'tags': [], - 'visibility': 'shared', - 'self': '/v2/images/%s' % image_id, - 'protected': False, - 'file': '/v2/images/%s/file' % image_id, - 'min_disk': 0, - 'min_ram': 0, - 'schema': '/v2/schemas/image', - } - for key, value in six.iteritems(expected_image): - self.assertEqual(value, image[key], key) - - # Upload data to image - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.put(path, headers=headers, data='ZZZZZ') - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Get an image should fail - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Image Deletion should work - path = self._url('/v2/images/%s' % image_id) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # This image should be no longer be directly accessible - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - self.stop_servers() - - def test_download_image_not_allowed_using_restricted_policy(self): - - rules = { - "context_is_admin": "role:admin", - "default": "", - "add_image": "", - "get_image": "", - "modify_image": "", - "upload_image": "", - "delete_image": "", - "restricted": - "not ('aki':%(container_format)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - - self.set_policy_rules(rules) - self.start_servers(**self.__dict__.copy()) - - # Create an image - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'member'}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Returned image entity - image = jsonutils.loads(response.text) - image_id = image['id'] - expected_image = { - 'status': 'queued', - 'name': 'image-1', - 'tags': [], - 'visibility': 'shared', - 'self': '/v2/images/%s' % image_id, - 'protected': False, - 'file': '/v2/images/%s/file' % image_id, - 'min_disk': 0, - 'min_ram': 0, - 'schema': '/v2/schemas/image', - } - - for key, value in six.iteritems(expected_image): - self.assertEqual(value, image[key], key) - - # Upload data to image - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.put(path, headers=headers, data='ZZZZZ') - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Get an image should fail - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream', - 'X-Roles': '_member_'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Image Deletion should work - path = self._url('/v2/images/%s' % image_id) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # This image should be no longer be directly accessible - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - self.stop_servers() - - def test_download_image_allowed_using_restricted_policy(self): - - rules = { - "context_is_admin": "role:admin", - "default": "", - "add_image": "", - "get_image": "", - "modify_image": "", - "upload_image": "", - "get_image_location": "", - "delete_image": "", - "restricted": - "not ('aki':%(container_format)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - - self.set_policy_rules(rules) - self.start_servers(**self.__dict__.copy()) - - # Create an image - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'member'}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Returned image entity - image = jsonutils.loads(response.text) - image_id = image['id'] - expected_image = { - 'status': 'queued', - 'name': 'image-1', - 'tags': [], - 'visibility': 'shared', - 'self': '/v2/images/%s' % image_id, - 'protected': False, - 'file': '/v2/images/%s/file' % image_id, - 'min_disk': 0, - 'min_ram': 0, - 'schema': '/v2/schemas/image', - } - - for key, value in six.iteritems(expected_image): - self.assertEqual(value, image[key], key) - - # Upload data to image - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.put(path, headers=headers, data='ZZZZZ') - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Get an image should be allowed - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream', - 'X-Roles': 'member'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - - # Image Deletion should work - path = self._url('/v2/images/%s' % image_id) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # This image should be no longer be directly accessible - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - self.stop_servers() - - def test_download_image_raises_service_unavailable(self): - """Test image download returns HTTPServiceUnavailable.""" - self.api_server.show_multiple_locations = True - self.start_servers(**self.__dict__.copy()) - - # Create an image - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Get image id - image = jsonutils.loads(response.text) - image_id = image['id'] - - # Update image locations via PATCH - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - http_server_pid, http_port = test_utils.start_http_server(image_id, - "image-1") - values = [{'url': 'http://127.0.0.1:%s/image-1' % http_port, - 'metadata': {'idx': '0'}}] - doc = [{'op': 'replace', - 'path': '/locations', - 'value': values}] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code) - - # Download an image should work - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/json'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - - # Stop http server used to update image location - os.kill(http_server_pid, signal.SIGKILL) - - # Download an image should raise HTTPServiceUnavailable - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/json'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.SERVICE_UNAVAILABLE, response.status_code) - - # Image Deletion should work - path = self._url('/v2/images/%s' % image_id) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # This image should be no longer be directly accessible - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - self.stop_servers() - - def test_image_modification_works_for_owning_tenant_id(self): - rules = { - "context_is_admin": "role:admin", - "default": "", - "add_image": "", - "get_image": "", - "modify_image": "tenant:%(owner)s", - "upload_image": "", - "get_image_location": "", - "delete_image": "", - "restricted": - "not ('aki':%(container_format)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - - self.set_policy_rules(rules) - self.start_servers(**self.__dict__.copy()) - - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Get the image's ID - image = jsonutils.loads(response.text) - image_id = image['id'] - - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers['content-type'] = media_type - del headers['X-Roles'] - data = jsonutils.dumps([ - {'op': 'replace', 'path': '/name', 'value': 'new-name'}, - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code) - - self.stop_servers() - - def test_image_modification_fails_on_mismatched_tenant_ids(self): - rules = { - "context_is_admin": "role:admin", - "default": "", - "add_image": "", - "get_image": "", - "modify_image": "'A-Fake-Tenant-Id':%(owner)s", - "upload_image": "", - "get_image_location": "", - "delete_image": "", - "restricted": - "not ('aki':%(container_format)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - - self.set_policy_rules(rules) - self.start_servers(**self.__dict__.copy()) - - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Get the image's ID - image = jsonutils.loads(response.text) - image_id = image['id'] - - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers['content-type'] = media_type - del headers['X-Roles'] - data = jsonutils.dumps([ - {'op': 'replace', 'path': '/name', 'value': 'new-name'}, - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - - self.stop_servers() - - def test_member_additions_works_for_owning_tenant_id(self): - rules = { - "context_is_admin": "role:admin", - "default": "", - "add_image": "", - "get_image": "", - "modify_image": "", - "upload_image": "", - "get_image_location": "", - "delete_image": "", - "restricted": - "not ('aki':%(container_format)s and role:_member_)", - "download_image": "role:admin or rule:restricted", - "add_member": "tenant:%(owner)s", - } - - self.set_policy_rules(rules) - self.start_servers(**self.__dict__.copy()) - - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Get the image's ID - image = jsonutils.loads(response.text) - image_id = image['id'] - - # Get the image's members resource - path = self._url('/v2/images/%s/members' % image_id) - body = jsonutils.dumps({'member': TENANT3}) - del headers['X-Roles'] - response = requests.post(path, headers=headers, data=body) - self.assertEqual(http.OK, response.status_code) - - self.stop_servers() - - def test_image_additions_works_only_for_specific_tenant_id(self): - rules = { - "context_is_admin": "role:admin", - "default": "", - "add_image": "'{0}':%(owner)s".format(TENANT1), - "get_image": "", - "modify_image": "", - "upload_image": "", - "get_image_location": "", - "delete_image": "", - "restricted": - "not ('aki':%(container_format)s and role:_member_)", - "download_image": "role:admin or rule:restricted", - "add_member": "", - } - - self.set_policy_rules(rules) - self.start_servers(**self.__dict__.copy()) - - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - headers['X-Tenant-Id'] = TENANT2 - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - - self.stop_servers() - - def test_owning_tenant_id_can_retrieve_image_information(self): - rules = { - "context_is_admin": "role:admin", - "default": "", - "add_image": "", - "get_image": "tenant:%(owner)s", - "modify_image": "", - "upload_image": "", - "get_image_location": "", - "delete_image": "", - "restricted": - "not ('aki':%(container_format)s and role:_member_)", - "download_image": "role:admin or rule:restricted", - "add_member": "", - } - - self.set_policy_rules(rules) - self.start_servers(**self.__dict__.copy()) - - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Remove the admin role - del headers['X-Roles'] - # Get the image's ID - image = jsonutils.loads(response.text) - image_id = image['id'] - - # Can retrieve the image as TENANT1 - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - - # Can retrieve the image's members as TENANT1 - path = self._url('/v2/images/%s/members' % image_id) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - - headers['X-Tenant-Id'] = TENANT2 - response = requests.get(path, headers=headers) - self.assertEqual(http.FORBIDDEN, response.status_code) - - self.stop_servers() - - def test_owning_tenant_can_publicize_image(self): - rules = { - "context_is_admin": "role:admin", - "default": "", - "add_image": "", - "publicize_image": "tenant:%(owner)s", - "get_image": "tenant:%(owner)s", - "modify_image": "", - "upload_image": "", - "get_image_location": "", - "delete_image": "", - "restricted": - "not ('aki':%(container_format)s and role:_member_)", - "download_image": "role:admin or rule:restricted", - "add_member": "", - } - - self.set_policy_rules(rules) - self.start_servers(**self.__dict__.copy()) - - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Get the image's ID - image = jsonutils.loads(response.text) - image_id = image['id'] - - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({ - 'Content-Type': 'application/openstack-images-v2.1-json-patch', - 'X-Tenant-Id': TENANT1, - }) - doc = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code) - - def test_owning_tenant_can_communitize_image(self): - rules = { - "context_is_admin": "role:admin", - "default": "", - "add_image": "", - "communitize_image": "tenant:%(owner)s", - "get_image": "tenant:%(owner)s", - "modify_image": "", - "upload_image": "", - "get_image_location": "", - "delete_image": "", - "restricted": - "not ('aki':%(container_format)s and role:_member_)", - "download_image": "role:admin or rule:restricted", - "add_member": "", - } - - self.set_policy_rules(rules) - self.start_servers(**self.__dict__.copy()) - - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(201, response.status_code) - - # Get the image's ID - image = jsonutils.loads(response.text) - image_id = image['id'] - - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({ - 'Content-Type': 'application/openstack-images-v2.1-json-patch', - 'X-Tenant-Id': TENANT1, - }) - doc = [{'op': 'replace', 'path': '/visibility', 'value': 'community'}] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(200, response.status_code) - - def test_owning_tenant_can_delete_image(self): - rules = { - "context_is_admin": "role:admin", - "default": "", - "add_image": "", - "publicize_image": "tenant:%(owner)s", - "get_image": "tenant:%(owner)s", - "modify_image": "", - "upload_image": "", - "get_image_location": "", - "delete_image": "", - "restricted": - "not ('aki':%(container_format)s and role:_member_)", - "download_image": "role:admin or rule:restricted", - "add_member": "", - } - - self.set_policy_rules(rules) - self.start_servers(**self.__dict__.copy()) - - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Get the image's ID - image = jsonutils.loads(response.text) - image_id = image['id'] - - path = self._url('/v2/images/%s' % image_id) - response = requests.delete(path, headers=headers) - self.assertEqual(http.NO_CONTENT, response.status_code) - - def test_list_show_ok_when_get_location_allowed_for_admins(self): - self.api_server.show_image_direct_url = True - self.api_server.show_multiple_locations = True - # setup context to allow a list locations by admin only - rules = { - "context_is_admin": "role:admin", - "default": "", - "add_image": "", - "get_image": "", - "modify_image": "", - "upload_image": "", - "get_image_location": "role:admin", - "delete_image": "", - "restricted": "", - "download_image": "", - "add_member": "", - } - - self.set_policy_rules(rules) - self.start_servers(**self.__dict__.copy()) - - # Create an image - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Tenant-Id': TENANT1}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Get the image's ID - image = jsonutils.loads(response.text) - image_id = image['id'] - - # Can retrieve the image as TENANT1 - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - - # Can list images as TENANT1 - path = self._url('/v2/images') - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - - self.stop_servers() - - def test_image_size_cap(self): - self.api_server.image_size_cap = 128 - self.start_servers(**self.__dict__.copy()) - # create an image - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-size-cap-test-image', - 'type': 'kernel', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - image = jsonutils.loads(response.text) - image_id = image['id'] - - # try to populate it with oversized data - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - - class StreamSim(object): - # Using a one-shot iterator to force chunked transfer in the PUT - # request - def __init__(self, size): - self.size = size - - def __iter__(self): - yield b'Z' * self.size - - response = requests.put(path, headers=headers, data=StreamSim( - self.api_server.image_size_cap + 1)) - self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code) - - # hashlib.md5('Z'*129).hexdigest() - # == '76522d28cb4418f12704dfa7acd6e7ee' - # If the image has this checksum, it means that the whole stream was - # accepted and written to the store, which should not be the case. - path = self._url('/v2/images/{0}'.format(image_id)) - headers = self._headers({'content-type': 'application/json'}) - response = requests.get(path, headers=headers) - image_checksum = jsonutils.loads(response.text).get('checksum') - self.assertNotEqual(image_checksum, '76522d28cb4418f12704dfa7acd6e7ee') - - def test_permissions(self): - self.start_servers(**self.__dict__.copy()) - # Create an image that belongs to TENANT1 - path = self._url('/v2/images') - headers = self._headers({'Content-Type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'raw', - 'container_format': 'bare'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image_id = jsonutils.loads(response.text)['id'] - - # Upload some image data - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.put(path, headers=headers, data='ZZZZZ') - self.assertEqual(http.NO_CONTENT, response.status_code) - - # TENANT1 should see the image in their list - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(image_id, images[0]['id']) - - # TENANT1 should be able to access the image directly - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - - # TENANT2 should not see the image in their list - path = self._url('/v2/images') - headers = self._headers({'X-Tenant-Id': TENANT2}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # TENANT2 should not be able to access the image directly - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'X-Tenant-Id': TENANT2}) - response = requests.get(path, headers=headers) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # TENANT2 should not be able to modify the image, either - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({ - 'Content-Type': 'application/openstack-images-v2.1-json-patch', - 'X-Tenant-Id': TENANT2, - }) - doc = [{'op': 'replace', 'path': '/name', 'value': 'image-2'}] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # TENANT2 should not be able to delete the image, either - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'X-Tenant-Id': TENANT2}) - response = requests.delete(path, headers=headers) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Publicize the image as an admin of TENANT1 - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({ - 'Content-Type': 'application/openstack-images-v2.1-json-patch', - 'X-Roles': 'admin', - }) - doc = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code) - - # TENANT3 should now see the image in their list - path = self._url('/v2/images') - headers = self._headers({'X-Tenant-Id': TENANT3}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(image_id, images[0]['id']) - - # TENANT3 should also be able to access the image directly - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'X-Tenant-Id': TENANT3}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - - # TENANT3 still should not be able to modify the image - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({ - 'Content-Type': 'application/openstack-images-v2.1-json-patch', - 'X-Tenant-Id': TENANT3, - }) - doc = [{'op': 'replace', 'path': '/name', 'value': 'image-2'}] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # TENANT3 should not be able to delete the image, either - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'X-Tenant-Id': TENANT3}) - response = requests.delete(path, headers=headers) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Image data should still be present after the failed delete - path = self._url('/v2/images/%s/file' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - self.assertEqual(response.text, 'ZZZZZ') - - self.stop_servers() - - def test_property_protections_with_roles(self): - # Enable property protection - self.api_server.property_protection_file = self.property_file_roles - self.start_servers(**self.__dict__.copy()) - - # Image list should be empty - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Create an image for role member with extra props - # Raises 403 since user is not allowed to set 'foo' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'member'}) - data = jsonutils.dumps({'name': 'image-1', 'foo': 'bar', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_owner_foo': 'o_s_bar'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Create an image for role member without 'foo' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'member'}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki', - 'x_owner_foo': 'o_s_bar'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Returned image entity should have 'x_owner_foo' - image = jsonutils.loads(response.text) - image_id = image['id'] - expected_image = { - 'status': 'queued', - 'name': 'image-1', - 'tags': [], - 'visibility': 'shared', - 'self': '/v2/images/%s' % image_id, - 'protected': False, - 'file': '/v2/images/%s/file' % image_id, - 'min_disk': 0, - 'x_owner_foo': 'o_s_bar', - 'min_ram': 0, - 'schema': '/v2/schemas/image', - } - for key, value in expected_image.items(): - self.assertEqual(value, image[key], key) - - # Create an image for role spl_role with extra props - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'spl_role'}) - data = jsonutils.dumps({'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'spl_create_prop': 'create_bar', - 'spl_create_prop_policy': 'create_policy_bar', - 'spl_read_prop': 'read_bar', - 'spl_update_prop': 'update_bar', - 'spl_delete_prop': 'delete_bar', - 'spl_delete_empty_prop': ''}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - - # Attempt to replace, add and remove properties which are forbidden - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'spl_role'}) - data = jsonutils.dumps([ - {'op': 'replace', 'path': '/spl_read_prop', 'value': 'r'}, - {'op': 'replace', 'path': '/spl_update_prop', 'value': 'u'}, - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code, response.text) - - # Attempt to replace, add and remove properties which are forbidden - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'spl_role'}) - data = jsonutils.dumps([ - {'op': 'add', 'path': '/spl_new_prop', 'value': 'new'}, - {'op': 'remove', 'path': '/spl_create_prop'}, - {'op': 'remove', 'path': '/spl_delete_prop'}, - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code, response.text) - - # Attempt to replace properties - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'spl_role'}) - data = jsonutils.dumps([ - # Updating an empty property to verify bug #1332103. - {'op': 'replace', 'path': '/spl_update_prop', 'value': ''}, - {'op': 'replace', 'path': '/spl_update_prop', 'value': 'u'}, - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Returned image entity should reflect the changes - image = jsonutils.loads(response.text) - - # 'spl_update_prop' has update permission for spl_role - # hence the value has changed - self.assertEqual('u', image['spl_update_prop']) - - # Attempt to remove properties - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'spl_role'}) - data = jsonutils.dumps([ - {'op': 'remove', 'path': '/spl_delete_prop'}, - # Deleting an empty property to verify bug #1332103. - {'op': 'remove', 'path': '/spl_delete_empty_prop'}, - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Returned image entity should reflect the changes - image = jsonutils.loads(response.text) - - # 'spl_delete_prop' and 'spl_delete_empty_prop' have delete - # permission for spl_role hence the property has been deleted - self.assertNotIn('spl_delete_prop', image.keys()) - self.assertNotIn('spl_delete_empty_prop', image.keys()) - - # Image Deletion should work - path = self._url('/v2/images/%s' % image_id) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # This image should be no longer be directly accessible - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - self.stop_servers() - - def test_property_protections_with_policies(self): - # Enable property protection - self.api_server.property_protection_file = self.property_file_policies - self.api_server.property_protection_rule_format = 'policies' - self.start_servers(**self.__dict__.copy()) - - # Image list should be empty - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Create an image for role member with extra props - # Raises 403 since user is not allowed to set 'foo' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'member'}) - data = jsonutils.dumps({'name': 'image-1', 'foo': 'bar', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_owner_foo': 'o_s_bar'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Create an image for role member without 'foo' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'member'}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Returned image entity - image = jsonutils.loads(response.text) - image_id = image['id'] - expected_image = { - 'status': 'queued', - 'name': 'image-1', - 'tags': [], - 'visibility': 'shared', - 'self': '/v2/images/%s' % image_id, - 'protected': False, - 'file': '/v2/images/%s/file' % image_id, - 'min_disk': 0, - 'min_ram': 0, - 'schema': '/v2/schemas/image', - } - for key, value in expected_image.items(): - self.assertEqual(value, image[key], key) - - # Create an image for role spl_role with extra props - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'spl_role, admin'}) - data = jsonutils.dumps({'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'spl_creator_policy': 'creator_bar', - 'spl_default_policy': 'default_bar'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - self.assertEqual('creator_bar', image['spl_creator_policy']) - self.assertEqual('default_bar', image['spl_default_policy']) - - # Attempt to replace a property which is permitted - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'admin'}) - data = jsonutils.dumps([ - # Updating an empty property to verify bug #1332103. - {'op': 'replace', 'path': '/spl_creator_policy', 'value': ''}, - {'op': 'replace', 'path': '/spl_creator_policy', 'value': 'r'}, - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Returned image entity should reflect the changes - image = jsonutils.loads(response.text) - - # 'spl_creator_policy' has update permission for admin - # hence the value has changed - self.assertEqual('r', image['spl_creator_policy']) - - # Attempt to replace a property which is forbidden - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'spl_role'}) - data = jsonutils.dumps([ - {'op': 'replace', 'path': '/spl_creator_policy', 'value': 'z'}, - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code, response.text) - - # Attempt to read properties - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'content-type': media_type, - 'X-Roles': 'random_role'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - - image = jsonutils.loads(response.text) - # 'random_role' is allowed read 'spl_default_policy'. - self.assertEqual(image['spl_default_policy'], 'default_bar') - # 'random_role' is forbidden to read 'spl_creator_policy'. - self.assertNotIn('spl_creator_policy', image) - - # Attempt to replace and remove properties which are permitted - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'admin'}) - data = jsonutils.dumps([ - # Deleting an empty property to verify bug #1332103. - {'op': 'replace', 'path': '/spl_creator_policy', 'value': ''}, - {'op': 'remove', 'path': '/spl_creator_policy'}, - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Returned image entity should reflect the changes - image = jsonutils.loads(response.text) - - # 'spl_creator_policy' has delete permission for admin - # hence the value has been deleted - self.assertNotIn('spl_creator_policy', image) - - # Attempt to read a property that is permitted - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'content-type': media_type, - 'X-Roles': 'random_role'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - - # Returned image entity should reflect the changes - image = jsonutils.loads(response.text) - self.assertEqual(image['spl_default_policy'], 'default_bar') - - # Image Deletion should work - path = self._url('/v2/images/%s' % image_id) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # This image should be no longer be directly accessible - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - self.stop_servers() - - def test_property_protections_special_chars_roles(self): - # Enable property protection - self.api_server.property_protection_file = self.property_file_roles - self.start_servers(**self.__dict__.copy()) - - # Verify both admin and unknown role can create properties marked with - # '@' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_all_permitted_admin': '1' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - expected_image = { - 'status': 'queued', - 'name': 'image-1', - 'tags': [], - 'visibility': 'shared', - 'self': '/v2/images/%s' % image_id, - 'protected': False, - 'file': '/v2/images/%s/file' % image_id, - 'min_disk': 0, - 'x_all_permitted_admin': '1', - 'min_ram': 0, - 'schema': '/v2/schemas/image', - } - for key, value in expected_image.items(): - self.assertEqual(value, image[key], key) - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'joe_soap'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_all_permitted_joe_soap': '1' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - expected_image = { - 'status': 'queued', - 'name': 'image-1', - 'tags': [], - 'visibility': 'shared', - 'self': '/v2/images/%s' % image_id, - 'protected': False, - 'file': '/v2/images/%s/file' % image_id, - 'min_disk': 0, - 'x_all_permitted_joe_soap': '1', - 'min_ram': 0, - 'schema': '/v2/schemas/image', - } - for key, value in expected_image.items(): - self.assertEqual(value, image[key], key) - - # Verify both admin and unknown role can read properties marked with - # '@' - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertEqual('1', image['x_all_permitted_joe_soap']) - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'joe_soap'}) - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertEqual('1', image['x_all_permitted_joe_soap']) - - # Verify both admin and unknown role can update properties marked with - # '@' - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'admin'}) - data = jsonutils.dumps([ - {'op': 'replace', - 'path': '/x_all_permitted_joe_soap', 'value': '2'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - image = jsonutils.loads(response.text) - self.assertEqual('2', image['x_all_permitted_joe_soap']) - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'joe_soap'}) - data = jsonutils.dumps([ - {'op': 'replace', - 'path': '/x_all_permitted_joe_soap', 'value': '3'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - image = jsonutils.loads(response.text) - self.assertEqual('3', image['x_all_permitted_joe_soap']) - - # Verify both admin and unknown role can delete properties marked with - # '@' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_all_permitted_a': '1', - 'x_all_permitted_b': '2' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'admin'}) - data = jsonutils.dumps([ - {'op': 'remove', 'path': '/x_all_permitted_a'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - image = jsonutils.loads(response.text) - self.assertNotIn('x_all_permitted_a', image.keys()) - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'joe_soap'}) - data = jsonutils.dumps([ - {'op': 'remove', 'path': '/x_all_permitted_b'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - image = jsonutils.loads(response.text) - self.assertNotIn('x_all_permitted_b', image.keys()) - - # Verify neither admin nor unknown role can create a property protected - # with '!' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_none_permitted_admin': '1' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'joe_soap'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_none_permitted_joe_soap': '1' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Verify neither admin nor unknown role can read properties marked with - # '!' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_none_read': '1' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - self.assertNotIn('x_none_read', image.keys()) - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertNotIn('x_none_read', image.keys()) - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'joe_soap'}) - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertNotIn('x_none_read', image.keys()) - - # Verify neither admin nor unknown role can update properties marked - # with '!' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_none_update': '1' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - self.assertEqual('1', image['x_none_update']) - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'admin'}) - data = jsonutils.dumps([ - {'op': 'replace', - 'path': '/x_none_update', 'value': '2'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code, response.text) - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'joe_soap'}) - data = jsonutils.dumps([ - {'op': 'replace', - 'path': '/x_none_update', 'value': '3'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.CONFLICT, response.status_code, response.text) - - # Verify neither admin nor unknown role can delete properties marked - # with '!' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_none_delete': '1', - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'admin'}) - data = jsonutils.dumps([ - {'op': 'remove', 'path': '/x_none_delete'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code, response.text) - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'joe_soap'}) - data = jsonutils.dumps([ - {'op': 'remove', 'path': '/x_none_delete'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.CONFLICT, response.status_code, response.text) - - self.stop_servers() - - def test_property_protections_special_chars_policies(self): - # Enable property protection - self.api_server.property_protection_file = self.property_file_policies - self.api_server.property_protection_rule_format = 'policies' - self.start_servers(**self.__dict__.copy()) - - # Verify both admin and unknown role can create properties marked with - # '@' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_all_permitted_admin': '1' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - expected_image = { - 'status': 'queued', - 'name': 'image-1', - 'tags': [], - 'visibility': 'shared', - 'self': '/v2/images/%s' % image_id, - 'protected': False, - 'file': '/v2/images/%s/file' % image_id, - 'min_disk': 0, - 'x_all_permitted_admin': '1', - 'min_ram': 0, - 'schema': '/v2/schemas/image', - } - for key, value in expected_image.items(): - self.assertEqual(value, image[key], key) - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'joe_soap'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_all_permitted_joe_soap': '1' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - expected_image = { - 'status': 'queued', - 'name': 'image-1', - 'tags': [], - 'visibility': 'shared', - 'self': '/v2/images/%s' % image_id, - 'protected': False, - 'file': '/v2/images/%s/file' % image_id, - 'min_disk': 0, - 'x_all_permitted_joe_soap': '1', - 'min_ram': 0, - 'schema': '/v2/schemas/image', - } - for key, value in expected_image.items(): - self.assertEqual(value, image[key], key) - - # Verify both admin and unknown role can read properties marked with - # '@' - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertEqual('1', image['x_all_permitted_joe_soap']) - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'joe_soap'}) - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertEqual('1', image['x_all_permitted_joe_soap']) - - # Verify both admin and unknown role can update properties marked with - # '@' - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'admin'}) - data = jsonutils.dumps([ - {'op': 'replace', - 'path': '/x_all_permitted_joe_soap', 'value': '2'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - image = jsonutils.loads(response.text) - self.assertEqual('2', image['x_all_permitted_joe_soap']) - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'joe_soap'}) - data = jsonutils.dumps([ - {'op': 'replace', - 'path': '/x_all_permitted_joe_soap', 'value': '3'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - image = jsonutils.loads(response.text) - self.assertEqual('3', image['x_all_permitted_joe_soap']) - - # Verify both admin and unknown role can delete properties marked with - # '@' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_all_permitted_a': '1', - 'x_all_permitted_b': '2' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'admin'}) - data = jsonutils.dumps([ - {'op': 'remove', 'path': '/x_all_permitted_a'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - image = jsonutils.loads(response.text) - self.assertNotIn('x_all_permitted_a', image.keys()) - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'joe_soap'}) - data = jsonutils.dumps([ - {'op': 'remove', 'path': '/x_all_permitted_b'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - image = jsonutils.loads(response.text) - self.assertNotIn('x_all_permitted_b', image.keys()) - - # Verify neither admin nor unknown role can create a property protected - # with '!' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_none_permitted_admin': '1' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'joe_soap'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_none_permitted_joe_soap': '1' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Verify neither admin nor unknown role can read properties marked with - # '!' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_none_read': '1' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - self.assertNotIn('x_none_read', image.keys()) - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertNotIn('x_none_read', image.keys()) - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'joe_soap'}) - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertNotIn('x_none_read', image.keys()) - - # Verify neither admin nor unknown role can update properties marked - # with '!' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_none_update': '1' - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - self.assertEqual('1', image['x_none_update']) - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'admin'}) - data = jsonutils.dumps([ - {'op': 'replace', - 'path': '/x_none_update', 'value': '2'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code, response.text) - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'joe_soap'}) - data = jsonutils.dumps([ - {'op': 'replace', - 'path': '/x_none_update', 'value': '3'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.CONFLICT, response.status_code, response.text) - - # Verify neither admin nor unknown role can delete properties marked - # with '!' - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json', - 'X-Roles': 'admin'}) - data = jsonutils.dumps({ - 'name': 'image-1', - 'disk_format': 'aki', - 'container_format': 'aki', - 'x_none_delete': '1', - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'admin'}) - data = jsonutils.dumps([ - {'op': 'remove', 'path': '/x_none_delete'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.FORBIDDEN, response.status_code, response.text) - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type, - 'X-Roles': 'joe_soap'}) - data = jsonutils.dumps([ - {'op': 'remove', 'path': '/x_none_delete'} - ]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.CONFLICT, response.status_code, response.text) - - self.stop_servers() - - def test_tag_lifecycle(self): - self.start_servers(**self.__dict__.copy()) - # Create an image with a tag - duplicate should be ignored - path = self._url('/v2/images') - headers = self._headers({'Content-Type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-1', 'tags': ['sniff', 'sniff']}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image_id = jsonutils.loads(response.text)['id'] - - # Image should show a list with a single tag - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - tags = jsonutils.loads(response.text)['tags'] - self.assertEqual(['sniff'], tags) - - # Delete all tags - for tag in tags: - path = self._url('/v2/images/%s/tags/%s' % (image_id, tag)) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Update image with too many tags via PUT - # Configured limit is 10 tags - for i in range(10): - path = self._url('/v2/images/%s/tags/foo%i' % (image_id, i)) - response = requests.put(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # 11th tag should fail - path = self._url('/v2/images/%s/tags/fail_me' % image_id) - response = requests.put(path, headers=self._headers()) - self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code) - - # Make sure the 11th tag was not added - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - tags = jsonutils.loads(response.text)['tags'] - self.assertEqual(10, len(tags)) - - # Update image tags via PATCH - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - doc = [ - { - 'op': 'replace', - 'path': '/tags', - 'value': ['foo'], - }, - ] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code) - - # Update image with too many tags via PATCH - # Configured limit is 10 tags - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - tags = ['foo%d' % i for i in range(11)] - doc = [ - { - 'op': 'replace', - 'path': '/tags', - 'value': tags, - }, - ] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code) - - # Tags should not have changed since request was over limit - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - tags = jsonutils.loads(response.text)['tags'] - self.assertEqual(['foo'], tags) - - # Update image with duplicate tag - it should be ignored - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - doc = [ - { - 'op': 'replace', - 'path': '/tags', - 'value': ['sniff', 'snozz', 'snozz'], - }, - ] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code) - tags = jsonutils.loads(response.text)['tags'] - self.assertEqual(['sniff', 'snozz'], sorted(tags)) - - # Image should show the appropriate tags - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - tags = jsonutils.loads(response.text)['tags'] - self.assertEqual(['sniff', 'snozz'], sorted(tags)) - - # Attempt to tag the image with a duplicate should be ignored - path = self._url('/v2/images/%s/tags/snozz' % image_id) - response = requests.put(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Create another more complex tag - path = self._url('/v2/images/%s/tags/gabe%%40example.com' % image_id) - response = requests.put(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Double-check that the tags container on the image is populated - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - tags = jsonutils.loads(response.text)['tags'] - self.assertEqual(['gabe@example.com', 'sniff', 'snozz'], - sorted(tags)) - - # Query images by single tag - path = self._url('/v2/images?tag=sniff') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(1, len(images)) - self.assertEqual('image-1', images[0]['name']) - - # Query images by multiple tags - path = self._url('/v2/images?tag=sniff&tag=snozz') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(1, len(images)) - self.assertEqual('image-1', images[0]['name']) - - # Query images by tag and other attributes - path = self._url('/v2/images?tag=sniff&status=queued') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(1, len(images)) - self.assertEqual('image-1', images[0]['name']) - - # Query images by tag and a nonexistent tag - path = self._url('/v2/images?tag=sniff&tag=fake') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # The tag should be deletable - path = self._url('/v2/images/%s/tags/gabe%%40example.com' % image_id) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # List of tags should reflect the deletion - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - tags = jsonutils.loads(response.text)['tags'] - self.assertEqual(['sniff', 'snozz'], sorted(tags)) - - # Deleting the same tag should return a 404 - path = self._url('/v2/images/%s/tags/gabe%%40example.com' % image_id) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # The tags won't be able to query the images after deleting - path = self._url('/v2/images?tag=gabe%%40example.com') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Try to add a tag that is too long - big_tag = 'a' * 300 - path = self._url('/v2/images/%s/tags/%s' % (image_id, big_tag)) - response = requests.put(path, headers=self._headers()) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # Tags should not have changed since request was over limit - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - tags = jsonutils.loads(response.text)['tags'] - self.assertEqual(['sniff', 'snozz'], sorted(tags)) - - self.stop_servers() - - def test_images_container(self): - # Image list should be empty and no next link should be present - self.start_servers(**self.__dict__.copy()) - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - first = jsonutils.loads(response.text)['first'] - self.assertEqual(0, len(images)) - self.assertNotIn('next', jsonutils.loads(response.text)) - self.assertEqual('/v2/images', first) - - # Create 7 images - images = [] - fixtures = [ - {'name': 'image-3', 'type': 'kernel', 'ping': 'pong', - 'container_format': 'ami', 'disk_format': 'ami'}, - {'name': 'image-4', 'type': 'kernel', 'ping': 'pong', - 'container_format': 'bare', 'disk_format': 'ami'}, - {'name': 'image-1', 'type': 'kernel', 'ping': 'pong'}, - {'name': 'image-3', 'type': 'ramdisk', 'ping': 'pong'}, - {'name': 'image-2', 'type': 'kernel', 'ping': 'ding'}, - {'name': 'image-3', 'type': 'kernel', 'ping': 'pong'}, - {'name': 'image-2,image-5', 'type': 'kernel', 'ping': 'pong'}, - ] - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - for fixture in fixtures: - data = jsonutils.dumps(fixture) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - images.append(jsonutils.loads(response.text)) - - # Image list should contain 7 images - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertEqual(7, len(body['images'])) - self.assertEqual('/v2/images', body['first']) - self.assertNotIn('next', jsonutils.loads(response.text)) - - # Image list filters by created_at time - url_template = '/v2/images?created_at=lt:%s' - path = self._url(url_template % images[0]['created_at']) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertEqual(0, len(body['images'])) - self.assertEqual(url_template % images[0]['created_at'], - urllib.parse.unquote(body['first'])) - - # Image list filters by updated_at time - url_template = '/v2/images?updated_at=lt:%s' - path = self._url(url_template % images[2]['updated_at']) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertGreaterEqual(3, len(body['images'])) - self.assertEqual(url_template % images[2]['updated_at'], - urllib.parse.unquote(body['first'])) - - # Image list filters by updated_at and created time with invalid value - url_template = '/v2/images?%s=lt:invalid_value' - for filter in ['updated_at', 'created_at']: - path = self._url(url_template % filter) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # Image list filters by updated_at and created_at with invalid operator - url_template = '/v2/images?%s=invalid_operator:2015-11-19T12:24:02Z' - for filter in ['updated_at', 'created_at']: - path = self._url(url_template % filter) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # Image list filters by non-'URL encoding' value - path = self._url('/v2/images?name=%FF') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # Image list filters by name with in operator - url_template = '/v2/images?name=in:%s' - filter_value = 'image-1,image-2' - path = self._url(url_template % filter_value) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertGreaterEqual(3, len(body['images'])) - - # Image list filters by container_format with in operator - url_template = '/v2/images?container_format=in:%s' - filter_value = 'bare,ami' - path = self._url(url_template % filter_value) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertGreaterEqual(2, len(body['images'])) - - # Image list filters by disk_format with in operator - url_template = '/v2/images?disk_format=in:%s' - filter_value = 'bare,ami,iso' - path = self._url(url_template % filter_value) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertGreaterEqual(2, len(body['images'])) - - # Begin pagination after the first image - template_url = ('/v2/images?limit=2&sort_dir=asc&sort_key=name' - '&marker=%s&type=kernel&ping=pong') - path = self._url(template_url % images[2]['id']) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertEqual(2, len(body['images'])) - response_ids = [image['id'] for image in body['images']] - self.assertEqual([images[6]['id'], images[0]['id']], response_ids) - - # Continue pagination using next link from previous request - path = self._url(body['next']) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertEqual(2, len(body['images'])) - response_ids = [image['id'] for image in body['images']] - self.assertEqual([images[5]['id'], images[1]['id']], response_ids) - - # Continue pagination - expect no results - path = self._url(body['next']) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertEqual(0, len(body['images'])) - - # Delete first image - path = self._url('/v2/images/%s' % images[0]['id']) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Ensure bad request for using a deleted image as marker - path = self._url('/v2/images?marker=%s' % images[0]['id']) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - self.stop_servers() - - def test_image_visibility_to_different_users(self): - self.cleanup() - self.api_server.deployment_flavor = 'fakeauth' - self.registry_server.deployment_flavor = 'fakeauth' - - kwargs = self.__dict__.copy() - kwargs['use_user_token'] = True - self.start_servers(**kwargs) - - owners = ['admin', 'tenant1', 'tenant2', 'none'] - visibilities = ['public', 'private', 'shared', 'community'] - - for owner in owners: - for visibility in visibilities: - path = self._url('/v2/images') - headers = self._headers({ - 'content-type': 'application/json', - 'X-Auth-Token': 'createuser:%s:admin' % owner, - }) - data = jsonutils.dumps({ - 'name': '%s-%s' % (owner, visibility), - 'visibility': visibility, - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - def list_images(tenant, role='', visibility=None): - auth_token = 'user:%s:%s' % (tenant, role) - headers = {'X-Auth-Token': auth_token} - path = self._url('/v2/images') - if visibility is not None: - path += '?visibility=%s' % visibility - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - return jsonutils.loads(response.text)['images'] - - # 1. Known user sees public and their own images - images = list_images('tenant1') - self.assertEqual(7, len(images)) - for image in images: - self.assertTrue(image['visibility'] == 'public' - or 'tenant1' in image['name']) - - # 2. Known user, visibility=public, sees all public images - images = list_images('tenant1', visibility='public') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('public', image['visibility']) - - # 3. Known user, visibility=private, sees only their private image - images = list_images('tenant1', visibility='private') - self.assertEqual(1, len(images)) - image = images[0] - self.assertEqual('private', image['visibility']) - self.assertIn('tenant1', image['name']) - - # 4. Known user, visibility=shared, sees only their shared image - images = list_images('tenant1', visibility='shared') - self.assertEqual(1, len(images)) - image = images[0] - self.assertEqual('shared', image['visibility']) - self.assertIn('tenant1', image['name']) - - # 5. Known user, visibility=community, sees all community images - images = list_images('tenant1', visibility='community') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('community', image['visibility']) - - # 6. Unknown user sees only public images - images = list_images('none') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('public', image['visibility']) - - # 7. Unknown user, visibility=public, sees only public images - images = list_images('none', visibility='public') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('public', image['visibility']) - - # 8. Unknown user, visibility=private, sees no images - images = list_images('none', visibility='private') - self.assertEqual(0, len(images)) - - # 9. Unknown user, visibility=shared, sees no images - images = list_images('none', visibility='shared') - self.assertEqual(0, len(images)) - - # 10. Unknown user, visibility=community, sees only community images - images = list_images('none', visibility='community') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('community', image['visibility']) - - # 11. Unknown admin sees all images except for community images - images = list_images('none', role='admin') - self.assertEqual(12, len(images)) - - # 12. Unknown admin, visibility=public, shows only public images - images = list_images('none', role='admin', visibility='public') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('public', image['visibility']) - - # 13. Unknown admin, visibility=private, sees only private images - images = list_images('none', role='admin', visibility='private') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('private', image['visibility']) - - # 14. Unknown admin, visibility=shared, sees only shared images - images = list_images('none', role='admin', visibility='shared') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('shared', image['visibility']) - - # 15. Unknown admin, visibility=community, sees only community images - images = list_images('none', role='admin', visibility='community') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('community', image['visibility']) - - # 16. Known admin sees all images, except community images owned by - # others - images = list_images('admin', role='admin') - self.assertEqual(13, len(images)) - - # 17. Known admin, visibility=public, sees all public images - images = list_images('admin', role='admin', visibility='public') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('public', image['visibility']) - - # 18. Known admin, visibility=private, sees all private images - images = list_images('admin', role='admin', visibility='private') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('private', image['visibility']) - - # 19. Known admin, visibility=shared, sees all shared images - images = list_images('admin', role='admin', visibility='shared') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('shared', image['visibility']) - - # 20. Known admin, visibility=community, sees all community images - images = list_images('admin', role='admin', visibility='community') - self.assertEqual(4, len(images)) - for image in images: - self.assertEqual('community', image['visibility']) - - self.stop_servers() - - def test_update_locations(self): - self.api_server.show_multiple_locations = True - self.start_servers(**self.__dict__.copy()) - # Create an image - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Returned image entity should have a generated id and status - image = jsonutils.loads(response.text) - image_id = image['id'] - self.assertEqual('queued', image['status']) - self.assertIsNone(image['size']) - self.assertIsNone(image['virtual_size']) - - # Update locations for the queued image - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - url = 'http://127.0.0.1:%s/foo_image' % self.http_port0 - data = jsonutils.dumps([{'op': 'replace', 'path': '/locations', - 'value': [{'url': url, 'metadata': {}}] - }]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # The image size should be updated - path = self._url('/v2/images/%s' % image_id) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertEqual(10, image['size']) - - def test_update_locations_with_restricted_sources(self): - self.api_server.show_multiple_locations = True - self.start_servers(**self.__dict__.copy()) - # Create an image - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Returned image entity should have a generated id and status - image = jsonutils.loads(response.text) - image_id = image['id'] - self.assertEqual('queued', image['status']) - self.assertIsNone(image['size']) - self.assertIsNone(image['virtual_size']) - - # Update locations for the queued image - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - data = jsonutils.dumps([{'op': 'replace', 'path': '/locations', - 'value': [{'url': 'file:///foo_image', - 'metadata': {}}] - }]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) - - data = jsonutils.dumps([{'op': 'replace', 'path': '/locations', - 'value': [{'url': 'swift+config:///foo_image', - 'metadata': {}}] - }]) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) - - -class TestImagesWithRegistry(TestImages): - def setUp(self): - super(TestImagesWithRegistry, self).setUp() - self.api_server.data_api = ( - 'glance.tests.functional.v2.registry_data_api') - self.registry_server.deployment_flavor = 'trusted-auth' - - -class TestImagesIPv6(functional.FunctionalTest): - """Verify that API and REG servers running IPv6 can communicate""" - - def setUp(self): - """ - First applying monkey patches of functions and methods which have - IPv4 hardcoded. - """ - # Setting up initial monkey patch (1) - test_utils.get_unused_port_ipv4 = test_utils.get_unused_port - test_utils.get_unused_port_and_socket_ipv4 = ( - test_utils.get_unused_port_and_socket) - test_utils.get_unused_port = test_utils.get_unused_port_ipv6 - test_utils.get_unused_port_and_socket = ( - test_utils.get_unused_port_and_socket_ipv6) - super(TestImagesIPv6, self).setUp() - self.cleanup() - # Setting up monkey patch (2), after object is ready... - self.ping_server_ipv4 = self.ping_server - self.ping_server = self.ping_server_ipv6 - self.include_scrubber = False - - def tearDown(self): - # Cleaning up monkey patch (2). - self.ping_server = self.ping_server_ipv4 - super(TestImagesIPv6, self).tearDown() - # Cleaning up monkey patch (1). - test_utils.get_unused_port = test_utils.get_unused_port_ipv4 - test_utils.get_unused_port_and_socket = ( - test_utils.get_unused_port_and_socket_ipv4) - - def _url(self, path): - return "http://[::1]:%d%s" % (self.api_port, path) - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': TENANT1, - 'X-Roles': 'member', - } - base_headers.update(custom_headers or {}) - return base_headers - - def test_image_list_ipv6(self): - # Image list should be empty - self.api_server.data_api = ( - 'glance.tests.functional.v2.registry_data_api') - self.registry_server.deployment_flavor = 'trusted-auth' - - # Setting up configuration parameters properly - # (bind_host is not needed since it is replaced by monkey patches, - # but it would be reflected in the configuration file, which is - # at least improving consistency) - self.registry_server.bind_host = "::1" - self.api_server.bind_host = "::1" - self.api_server.registry_host = "::1" - self.scrubber_daemon.registry_host = "::1" - - self.start_servers(**self.__dict__.copy()) - - requests.get(self._url('/'), headers=self._headers()) - - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(200, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - -class TestImageDirectURLVisibility(functional.FunctionalTest): - - def setUp(self): - super(TestImageDirectURLVisibility, self).setUp() - self.cleanup() - self.include_scrubber = False - self.api_server.deployment_flavor = 'noauth' - - def _url(self, path): - return 'http://127.0.0.1:%d%s' % (self.api_port, path) - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': TENANT1, - 'X-Roles': 'member', - } - base_headers.update(custom_headers or {}) - return base_headers - - def test_v2_not_enabled(self): - self.api_server.enable_v2_api = False - self.start_servers(**self.__dict__.copy()) - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.MULTIPLE_CHOICES, response.status_code) - self.stop_servers() - - def test_v2_enabled(self): - self.api_server.enable_v2_api = True - self.start_servers(**self.__dict__.copy()) - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - self.stop_servers() - - def test_image_direct_url_visible(self): - - self.api_server.show_image_direct_url = True - self.start_servers(**self.__dict__.copy()) - - # Image list should be empty - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Create an image - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', - 'foo': 'bar', 'disk_format': 'aki', - 'container_format': 'aki', - 'visibility': 'public'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Get the image id - image = jsonutils.loads(response.text) - image_id = image['id'] - - # Image direct_url should not be visible before location is set - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'Content-Type': 'application/json'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertNotIn('direct_url', image) - - # Upload some image data, setting the image location - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.put(path, headers=headers, data='ZZZZZ') - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Image direct_url should be visible - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'Content-Type': 'application/json'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertIn('direct_url', image) - - # Image direct_url should be visible to non-owner, non-admin user - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'Content-Type': 'application/json', - 'X-Tenant-Id': TENANT2}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertIn('direct_url', image) - - # Image direct_url should be visible in a list - path = self._url('/v2/images') - headers = self._headers({'Content-Type': 'application/json'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text)['images'][0] - self.assertIn('direct_url', image) - - self.stop_servers() - - def test_image_multiple_location_url_visible(self): - self.api_server.show_multiple_locations = True - self.start_servers(**self.__dict__.copy()) - - # Create an image - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', - 'foo': 'bar', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Get the image id - image = jsonutils.loads(response.text) - image_id = image['id'] - - # Image locations should not be visible before location is set - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'Content-Type': 'application/json'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertIn('locations', image) - self.assertEqual([], image["locations"]) - - # Upload some image data, setting the image location - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.put(path, headers=headers, data='ZZZZZ') - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Image locations should be visible - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'Content-Type': 'application/json'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertIn('locations', image) - loc = image['locations'] - self.assertGreater(len(loc), 0) - loc = loc[0] - self.assertIn('url', loc) - self.assertIn('metadata', loc) - - self.stop_servers() - - def test_image_direct_url_not_visible(self): - - self.api_server.show_image_direct_url = False - self.start_servers(**self.__dict__.copy()) - - # Image list should be empty - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Create an image - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', - 'foo': 'bar', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Get the image id - image = jsonutils.loads(response.text) - image_id = image['id'] - - # Upload some image data, setting the image location - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.put(path, headers=headers, data='ZZZZZ') - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Image direct_url should not be visible - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'Content-Type': 'application/json'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertNotIn('direct_url', image) - - # Image direct_url should not be visible in a list - path = self._url('/v2/images') - headers = self._headers({'Content-Type': 'application/json'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text)['images'][0] - self.assertNotIn('direct_url', image) - - self.stop_servers() - - -class TestImageDirectURLVisibilityWithRegistry(TestImageDirectURLVisibility): - def setUp(self): - super(TestImageDirectURLVisibilityWithRegistry, self).setUp() - self.api_server.data_api = ( - 'glance.tests.functional.v2.registry_data_api') - self.registry_server.deployment_flavor = 'trusted-auth' - - -class TestImageLocationSelectionStrategy(functional.FunctionalTest): - - def setUp(self): - super(TestImageLocationSelectionStrategy, self).setUp() - self.cleanup() - self.include_scrubber = False - self.api_server.deployment_flavor = 'noauth' - for i in range(3): - ret = test_utils.start_http_server("foo_image_id%d" % i, - "foo_image%d" % i) - setattr(self, 'http_server%d_pid' % i, ret[0]) - setattr(self, 'http_port%d' % i, ret[1]) - - def tearDown(self): - for i in range(3): - pid = getattr(self, 'http_server%d_pid' % i, None) - if pid: - os.kill(pid, signal.SIGKILL) - - super(TestImageLocationSelectionStrategy, self).tearDown() - - def _url(self, path): - return 'http://127.0.0.1:%d%s' % (self.api_port, path) - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': TENANT1, - 'X-Roles': 'member', - } - base_headers.update(custom_headers or {}) - return base_headers - - def test_image_locations_with_order_strategy(self): - self.api_server.show_image_direct_url = True - self.api_server.show_multiple_locations = True - self.image_location_quota = 10 - self.api_server.location_strategy = 'location_order' - preference = "http, swift, filesystem" - self.api_server.store_type_location_strategy_preference = preference - self.start_servers(**self.__dict__.copy()) - - # Create an image - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', - 'foo': 'bar', 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Get the image id - image = jsonutils.loads(response.text) - image_id = image['id'] - - # Image locations should not be visible before location is set - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'Content-Type': 'application/json'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertIn('locations', image) - self.assertEqual([], image["locations"]) - - # Update image locations via PATCH - path = self._url('/v2/images/%s' % image_id) - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - values = [{'url': 'http://127.0.0.1:%s/foo_image' % self.http_port0, - 'metadata': {}}, - {'url': 'http://127.0.0.1:%s/foo_image' % self.http_port1, - 'metadata': {}}] - doc = [{'op': 'replace', - 'path': '/locations', - 'value': values}] - data = jsonutils.dumps(doc) - response = requests.patch(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code) - - # Image locations should be visible - path = self._url('/v2/images/%s' % image_id) - headers = self._headers({'Content-Type': 'application/json'}) - response = requests.get(path, headers=headers) - self.assertEqual(http.OK, response.status_code) - image = jsonutils.loads(response.text) - self.assertIn('locations', image) - self.assertEqual(values, image['locations']) - self.assertIn('direct_url', image) - self.assertEqual(values[0]['url'], image['direct_url']) - - self.stop_servers() - - -class TestImageLocationSelectionStrategyWithRegistry( - TestImageLocationSelectionStrategy): - def setUp(self): - super(TestImageLocationSelectionStrategyWithRegistry, self).setUp() - self.api_server.data_api = ( - 'glance.tests.functional.v2.registry_data_api') - self.registry_server.deployment_flavor = 'trusted-auth' - - -class TestImageMembers(functional.FunctionalTest): - - def setUp(self): - super(TestImageMembers, self).setUp() - self.cleanup() - self.include_scrubber = False - self.api_server.deployment_flavor = 'fakeauth' - self.registry_server.deployment_flavor = 'fakeauth' - self.start_servers(**self.__dict__.copy()) - - def _url(self, path): - return 'http://127.0.0.1:%d%s' % (self.api_port, path) - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': TENANT1, - 'X-Roles': 'member', - } - base_headers.update(custom_headers or {}) - return base_headers - - def test_image_member_lifecycle(self): - - def get_header(tenant, role=''): - auth_token = 'user:%s:%s' % (tenant, role) - headers = {'X-Auth-Token': auth_token} - return headers - - # Image list should be empty - path = self._url('/v2/images') - response = requests.get(path, headers=get_header('tenant1')) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - owners = ['tenant1', 'tenant2', 'admin'] - visibilities = ['community', 'private', 'public', 'shared'] - image_fixture = [] - for owner in owners: - for visibility in visibilities: - path = self._url('/v2/images') - headers = self._headers({ - 'content-type': 'application/json', - 'X-Auth-Token': 'createuser:%s:admin' % owner, - }) - data = jsonutils.dumps({ - 'name': '%s-%s' % (owner, visibility), - 'visibility': visibility, - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image_fixture.append(jsonutils.loads(response.text)) - - # Image list should contain 6 images for tenant1 - path = self._url('/v2/images') - response = requests.get(path, headers=get_header('tenant1')) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(6, len(images)) - - # Image list should contain 3 images for TENANT3 - path = self._url('/v2/images') - response = requests.get(path, headers=get_header(TENANT3)) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(3, len(images)) - - # Add Image member for tenant1-shared image - path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) - body = jsonutils.dumps({'member': TENANT3}) - response = requests.post(path, headers=get_header('tenant1'), - data=body) - self.assertEqual(http.OK, response.status_code) - image_member = jsonutils.loads(response.text) - self.assertEqual(image_fixture[3]['id'], image_member['image_id']) - self.assertEqual(TENANT3, image_member['member_id']) - self.assertIn('created_at', image_member) - self.assertIn('updated_at', image_member) - self.assertEqual('pending', image_member['status']) - - # Image list should contain 3 images for TENANT3 - path = self._url('/v2/images') - response = requests.get(path, headers=get_header(TENANT3)) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(3, len(images)) - - # Image list should contain 0 shared images for TENANT3 - # because default is accepted - path = self._url('/v2/images?visibility=shared') - response = requests.get(path, headers=get_header(TENANT3)) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Image list should contain 4 images for TENANT3 with status pending - path = self._url('/v2/images?member_status=pending') - response = requests.get(path, headers=get_header(TENANT3)) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(4, len(images)) - - # Image list should contain 4 images for TENANT3 with status all - path = self._url('/v2/images?member_status=all') - response = requests.get(path, headers=get_header(TENANT3)) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(4, len(images)) - - # Image list should contain 1 image for TENANT3 with status pending - # and visibility shared - path = self._url('/v2/images?member_status=pending&visibility=shared') - response = requests.get(path, headers=get_header(TENANT3)) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(images[0]['name'], 'tenant1-shared') - - # Image list should contain 0 image for TENANT3 with status rejected - # and visibility shared - path = self._url('/v2/images?member_status=rejected&visibility=shared') - response = requests.get(path, headers=get_header(TENANT3)) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Image list should contain 0 image for TENANT3 with status accepted - # and visibility shared - path = self._url('/v2/images?member_status=accepted&visibility=shared') - response = requests.get(path, headers=get_header(TENANT3)) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Image list should contain 0 image for TENANT3 with status accepted - # and visibility private - path = self._url('/v2/images?visibility=private') - response = requests.get(path, headers=get_header(TENANT3)) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Image tenant2-shared's image members list should contain no members - path = self._url('/v2/images/%s/members' % image_fixture[7]['id']) - response = requests.get(path, headers=get_header('tenant2')) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertEqual(0, len(body['members'])) - - # Tenant 1, who is the owner cannot change status of image member - path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], - TENANT3)) - body = jsonutils.dumps({'status': 'accepted'}) - response = requests.put(path, headers=get_header('tenant1'), data=body) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Tenant 1, who is the owner can get status of its own image member - path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], - TENANT3)) - response = requests.get(path, headers=get_header('tenant1')) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertEqual('pending', body['status']) - self.assertEqual(image_fixture[3]['id'], body['image_id']) - self.assertEqual(TENANT3, body['member_id']) - - # Tenant 3, who is the member can get status of its own status - path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], - TENANT3)) - response = requests.get(path, headers=get_header(TENANT3)) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertEqual('pending', body['status']) - self.assertEqual(image_fixture[3]['id'], body['image_id']) - self.assertEqual(TENANT3, body['member_id']) - - # Tenant 2, who not the owner cannot get status of image member - path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], - TENANT3)) - response = requests.get(path, headers=get_header('tenant2')) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Tenant 3 can change status of image member - path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], - TENANT3)) - body = jsonutils.dumps({'status': 'accepted'}) - response = requests.put(path, headers=get_header(TENANT3), data=body) - self.assertEqual(http.OK, response.status_code) - image_member = jsonutils.loads(response.text) - self.assertEqual(image_fixture[3]['id'], image_member['image_id']) - self.assertEqual(TENANT3, image_member['member_id']) - self.assertEqual('accepted', image_member['status']) - - # Image list should contain 4 images for TENANT3 because status is - # accepted - path = self._url('/v2/images') - response = requests.get(path, headers=get_header(TENANT3)) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(4, len(images)) - - # Tenant 3 invalid status change - path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], - TENANT3)) - body = jsonutils.dumps({'status': 'invalid-status'}) - response = requests.put(path, headers=get_header(TENANT3), data=body) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # Owner cannot change status of image - path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], - TENANT3)) - body = jsonutils.dumps({'status': 'accepted'}) - response = requests.put(path, headers=get_header('tenant1'), data=body) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Add Image member for tenant2-shared image - path = self._url('/v2/images/%s/members' % image_fixture[7]['id']) - body = jsonutils.dumps({'member': TENANT4}) - response = requests.post(path, headers=get_header('tenant2'), - data=body) - self.assertEqual(http.OK, response.status_code) - image_member = jsonutils.loads(response.text) - self.assertEqual(image_fixture[7]['id'], image_member['image_id']) - self.assertEqual(TENANT4, image_member['member_id']) - self.assertIn('created_at', image_member) - self.assertIn('updated_at', image_member) - - # Add Image member to public image - path = self._url('/v2/images/%s/members' % image_fixture[2]['id']) - body = jsonutils.dumps({'member': TENANT2}) - response = requests.post(path, headers=get_header('tenant1'), - data=body) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Add Image member to private image - path = self._url('/v2/images/%s/members' % image_fixture[1]['id']) - body = jsonutils.dumps({'member': TENANT2}) - response = requests.post(path, headers=get_header('tenant1'), - data=body) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Add Image member to community image - path = self._url('/v2/images/%s/members' % image_fixture[0]['id']) - body = jsonutils.dumps({'member': TENANT2}) - response = requests.post(path, headers=get_header('tenant1'), - data=body) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Image tenant1-shared's members list should contain 1 member - path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) - response = requests.get(path, headers=get_header('tenant1')) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertEqual(1, len(body['members'])) - - # Admin can see any members - path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) - response = requests.get(path, headers=get_header('tenant1', 'admin')) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertEqual(1, len(body['members'])) - - # Image members not found for private image not owned by TENANT 1 - path = self._url('/v2/images/%s/members' % image_fixture[7]['id']) - response = requests.get(path, headers=get_header('tenant1')) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Image members forbidden for public image - path = self._url('/v2/images/%s/members' % image_fixture[2]['id']) - response = requests.get(path, headers=get_header('tenant1')) - self.assertIn("Only shared images have members", response.text) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Image members forbidden for community image - path = self._url('/v2/images/%s/members' % image_fixture[0]['id']) - response = requests.get(path, headers=get_header('tenant1')) - self.assertIn("Only shared images have members", response.text) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Image members forbidden for private image - path = self._url('/v2/images/%s/members' % image_fixture[1]['id']) - response = requests.get(path, headers=get_header('tenant1')) - self.assertIn("Only shared images have members", response.text) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Image Member Cannot delete Image membership - path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], - TENANT3)) - response = requests.delete(path, headers=get_header(TENANT3)) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Delete Image member - path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], - TENANT3)) - response = requests.delete(path, headers=get_header('tenant1')) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Now the image has no members - path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) - response = requests.get(path, headers=get_header('tenant1')) - self.assertEqual(http.OK, response.status_code) - body = jsonutils.loads(response.text) - self.assertEqual(0, len(body['members'])) - - # Adding 11 image members should fail since configured limit is 10 - path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) - for i in range(10): - body = jsonutils.dumps({'member': str(uuid.uuid4())}) - response = requests.post(path, headers=get_header('tenant1'), - data=body) - self.assertEqual(http.OK, response.status_code) - - body = jsonutils.dumps({'member': str(uuid.uuid4())}) - response = requests.post(path, headers=get_header('tenant1'), - data=body) - self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code) - - # Get Image member should return not found for public image - path = self._url('/v2/images/%s/members/%s' % (image_fixture[2]['id'], - TENANT3)) - response = requests.get(path, headers=get_header('tenant1')) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Get Image member should return not found for community image - path = self._url('/v2/images/%s/members/%s' % (image_fixture[0]['id'], - TENANT3)) - response = requests.get(path, headers=get_header('tenant1')) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Get Image member should return not found for private image - path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], - TENANT3)) - response = requests.get(path, headers=get_header('tenant1')) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Delete Image member should return forbidden for public image - path = self._url('/v2/images/%s/members/%s' % (image_fixture[2]['id'], - TENANT3)) - response = requests.delete(path, headers=get_header('tenant1')) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Delete Image member should return forbidden for community image - path = self._url('/v2/images/%s/members/%s' % (image_fixture[0]['id'], - TENANT3)) - response = requests.delete(path, headers=get_header('tenant1')) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Delete Image member should return forbidden for private image - path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], - TENANT3)) - response = requests.delete(path, headers=get_header('tenant1')) - self.assertEqual(http.FORBIDDEN, response.status_code) - - self.stop_servers() - - -class TestImageMembersWithRegistry(TestImageMembers): - def setUp(self): - super(TestImageMembersWithRegistry, self).setUp() - self.api_server.data_api = ( - 'glance.tests.functional.v2.registry_data_api') - self.registry_server.deployment_flavor = 'trusted-auth' - - -class TestQuotas(functional.FunctionalTest): - - def setUp(self): - super(TestQuotas, self).setUp() - self.cleanup() - self.include_scrubber = False - self.api_server.deployment_flavor = 'noauth' - self.registry_server.deployment_flavor = 'trusted-auth' - self.user_storage_quota = 100 - self.start_servers(**self.__dict__.copy()) - - def _url(self, path): - return 'http://127.0.0.1:%d%s' % (self.api_port, path) - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': TENANT1, - 'X-Roles': 'member', - } - base_headers.update(custom_headers or {}) - return base_headers - - def _upload_image_test(self, data_src, expected_status): - # Image list should be empty - path = self._url('/v2/images') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - images = jsonutils.loads(response.text)['images'] - self.assertEqual(0, len(images)) - - # Create an image (with a deployer-defined property) - path = self._url('/v2/images') - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps({'name': 'testimg', - 'type': 'kernel', - 'foo': 'bar', - 'disk_format': 'aki', - 'container_format': 'aki'}) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - image = jsonutils.loads(response.text) - image_id = image['id'] - - # upload data - path = self._url('/v2/images/%s/file' % image_id) - headers = self._headers({'Content-Type': 'application/octet-stream'}) - response = requests.put(path, headers=headers, data=data_src) - self.assertEqual(expected_status, response.status_code) - - # Deletion should work - path = self._url('/v2/images/%s' % image_id) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - def test_image_upload_under_quota(self): - data = b'x' * (self.user_storage_quota - 1) - self._upload_image_test(data, http.NO_CONTENT) - - def test_image_upload_exceed_quota(self): - data = b'x' * (self.user_storage_quota + 1) - self._upload_image_test(data, http.REQUEST_ENTITY_TOO_LARGE) - - def test_chunked_image_upload_under_quota(self): - def data_gen(): - yield b'x' * (self.user_storage_quota - 1) - - self._upload_image_test(data_gen(), http.NO_CONTENT) - - def test_chunked_image_upload_exceed_quota(self): - def data_gen(): - yield b'x' * (self.user_storage_quota + 1) - - self._upload_image_test(data_gen(), http.REQUEST_ENTITY_TOO_LARGE) - - -class TestQuotasWithRegistry(TestQuotas): - def setUp(self): - super(TestQuotasWithRegistry, self).setUp() - self.api_server.data_api = ( - 'glance.tests.functional.v2.registry_data_api') - self.registry_server.deployment_flavor = 'trusted-auth' diff --git a/glance/tests/functional/v2/test_metadef_namespaces.py b/glance/tests/functional/v2/test_metadef_namespaces.py deleted file mode 100644 index 99c3e9c4..00000000 --- a/glance/tests/functional/v2/test_metadef_namespaces.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -from oslo_serialization import jsonutils -import requests -from six.moves import http_client as http - -from glance.tests import functional - -TENANT1 = str(uuid.uuid4()) -TENANT2 = str(uuid.uuid4()) - - -class TestNamespaces(functional.FunctionalTest): - - def setUp(self): - super(TestNamespaces, self).setUp() - self.cleanup() - self.api_server.deployment_flavor = 'noauth' - self.start_servers(**self.__dict__.copy()) - - def _url(self, path): - return 'http://127.0.0.1:%d%s' % (self.api_port, path) - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': TENANT1, - 'X-Roles': 'admin', - } - base_headers.update(custom_headers or {}) - return base_headers - - def test_namespace_lifecycle(self): - # Namespace should not exist - path = self._url('/v2/metadefs/namespaces/MyNamespace') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Create a namespace - path = self._url('/v2/metadefs/namespaces') - headers = self._headers({'content-type': 'application/json'}) - namespace_name = 'MyNamespace' - data = jsonutils.dumps({ - "namespace": namespace_name, - "display_name": "My User Friendly Namespace", - "description": "My description" - } - ) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - namespace_loc_header = response.headers['Location'] - - # Returned namespace should match the created namespace with default - # values of visibility=private, protected=False and owner=Context - # Tenant - namespace = jsonutils.loads(response.text) - checked_keys = set([ - u'namespace', - u'display_name', - u'description', - u'visibility', - u'self', - u'schema', - u'protected', - u'owner', - u'created_at', - u'updated_at' - ]) - self.assertEqual(set(namespace.keys()), checked_keys) - expected_namespace = { - "namespace": namespace_name, - "display_name": "My User Friendly Namespace", - "description": "My description", - "visibility": "private", - "protected": False, - "owner": TENANT1, - "self": "/v2/metadefs/namespaces/%s" % namespace_name, - "schema": "/v2/schemas/metadefs/namespace" - } - for key, value in expected_namespace.items(): - self.assertEqual(namespace[key], value, key) - - # Attempt to insert a duplicate - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CONFLICT, response.status_code) - - # Get the namespace using the returned Location header - response = requests.get(namespace_loc_header, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - namespace = jsonutils.loads(response.text) - self.assertEqual(namespace_name, namespace['namespace']) - self.assertNotIn('object', namespace) - self.assertEqual(TENANT1, namespace['owner']) - self.assertEqual('private', namespace['visibility']) - self.assertFalse(namespace['protected']) - - # The namespace should be mutable - path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) - media_type = 'application/json' - headers = self._headers({'content-type': media_type}) - namespace_name = "MyNamespace-UPDATED" - data = jsonutils.dumps( - { - "namespace": namespace_name, - "display_name": "display_name-UPDATED", - "description": "description-UPDATED", - "visibility": "private", # Not changed - "protected": True, - "owner": TENANT2 - } - ) - response = requests.put(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Returned namespace should reflect the changes - namespace = jsonutils.loads(response.text) - self.assertEqual('MyNamespace-UPDATED', namespace_name) - self.assertEqual('display_name-UPDATED', namespace['display_name']) - self.assertEqual('description-UPDATED', namespace['description']) - self.assertEqual('private', namespace['visibility']) - self.assertTrue(namespace['protected']) - self.assertEqual(TENANT2, namespace['owner']) - - # Updates should persist across requests - path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - namespace = jsonutils.loads(response.text) - self.assertEqual('MyNamespace-UPDATED', namespace['namespace']) - self.assertEqual('display_name-UPDATED', namespace['display_name']) - self.assertEqual('description-UPDATED', namespace['description']) - self.assertEqual('private', namespace['visibility']) - self.assertTrue(namespace['protected']) - self.assertEqual(TENANT2, namespace['owner']) - - # Deletion should not work on protected namespaces - path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.FORBIDDEN, response.status_code) - - # Unprotect namespace for deletion - path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) - media_type = 'application/json' - headers = self._headers({'content-type': media_type}) - doc = { - "namespace": namespace_name, - "display_name": "My User Friendly Namespace", - "description": "My description", - "visibility": "public", - "protected": False, - "owner": TENANT2 - } - data = jsonutils.dumps(doc) - response = requests.put(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Deletion should work. Deleting namespace MyNamespace - path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # Namespace should not exist - path = self._url('/v2/metadefs/namespaces/MyNamespace') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - def test_metadef_dont_accept_illegal_bodies(self): - # Namespace should not exist - path = self._url('/v2/metadefs/namespaces/bodytest') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Create a namespace - path = self._url('/v2/metadefs/namespaces') - headers = self._headers({'content-type': 'application/json'}) - namespace_name = 'bodytest' - data = jsonutils.dumps({ - "namespace": namespace_name, - "display_name": "My User Friendly Namespace", - "description": "My description" - } - ) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Test all the urls that supply data - data_urls = [ - '/v2/schemas/metadefs/namespace', - '/v2/schemas/metadefs/namespaces', - '/v2/schemas/metadefs/resource_type', - '/v2/schemas/metadefs/resource_types', - '/v2/schemas/metadefs/property', - '/v2/schemas/metadefs/properties', - '/v2/schemas/metadefs/object', - '/v2/schemas/metadefs/objects', - '/v2/schemas/metadefs/tag', - '/v2/schemas/metadefs/tags', - '/v2/metadefs/resource_types', - ] - for value in data_urls: - path = self._url(value) - data = jsonutils.dumps(["body"]) - response = requests.get(path, headers=self._headers(), data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code) - - # Put the namespace into the url - test_urls = [ - ('/v2/metadefs/namespaces/%s/resource_types', 'get'), - ('/v2/metadefs/namespaces/%s/resource_types/type', 'delete'), - ('/v2/metadefs/namespaces/%s', 'get'), - ('/v2/metadefs/namespaces/%s', 'delete'), - ('/v2/metadefs/namespaces/%s/objects/name', 'get'), - ('/v2/metadefs/namespaces/%s/objects/name', 'delete'), - ('/v2/metadefs/namespaces/%s/properties', 'get'), - ('/v2/metadefs/namespaces/%s/tags/test', 'get'), - ('/v2/metadefs/namespaces/%s/tags/test', 'post'), - ('/v2/metadefs/namespaces/%s/tags/test', 'delete'), - ] - - for link, method in test_urls: - path = self._url(link % namespace_name) - data = jsonutils.dumps(["body"]) - response = getattr(requests, method)( - path, headers=self._headers(), data=data) - self.assertEqual(http.BAD_REQUEST, response.status_code) diff --git a/glance/tests/functional/v2/test_metadef_objects.py b/glance/tests/functional/v2/test_metadef_objects.py deleted file mode 100644 index 98a94cb6..00000000 --- a/glance/tests/functional/v2/test_metadef_objects.py +++ /dev/null @@ -1,273 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -from oslo_serialization import jsonutils -import requests -from six.moves import http_client as http - -from glance.tests import functional - -TENANT1 = str(uuid.uuid4()) - - -class TestMetadefObjects(functional.FunctionalTest): - - def setUp(self): - super(TestMetadefObjects, self).setUp() - self.cleanup() - self.api_server.deployment_flavor = 'noauth' - self.start_servers(**self.__dict__.copy()) - - def _url(self, path): - return 'http://127.0.0.1:%d%s' % (self.api_port, path) - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': TENANT1, - 'X-Roles': 'admin', - } - base_headers.update(custom_headers or {}) - return base_headers - - def test_metadata_objects_lifecycle(self): - # Namespace should not exist - path = self._url('/v2/metadefs/namespaces/MyNamespace') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Create a namespace - path = self._url('/v2/metadefs/namespaces') - headers = self._headers({'content-type': 'application/json'}) - namespace_name = 'MyNamespace' - data = jsonutils.dumps({ - "namespace": namespace_name, - "display_name": "My User Friendly Namespace", - "description": "My description", - "visibility": "public", - "protected": False, - "owner": "The Test Owner" - } - ) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Metadata objects should not exist - path = self._url('/v2/metadefs/namespaces/MyNamespace/objects/object1') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Create a object - path = self._url('/v2/metadefs/namespaces/MyNamespace/objects') - headers = self._headers({'content-type': 'application/json'}) - metadata_object_name = "object1" - data = jsonutils.dumps( - { - "name": metadata_object_name, - "description": "object1 description.", - "required": [ - "property1" - ], - "properties": { - "property1": { - "type": "integer", - "title": "property1", - "description": "property1 description", - "operators": [""], - "default": 100, - "minimum": 100, - "maximum": 30000369 - }, - "property2": { - "type": "string", - "title": "property2", - "description": "property2 description ", - "default": "value2", - "minLength": 2, - "maxLength": 50 - } - } - } - ) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Attempt to insert a duplicate - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CONFLICT, response.status_code) - - # Get the metadata object created above - path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % - (namespace_name, metadata_object_name)) - response = requests.get(path, - headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - metadata_object = jsonutils.loads(response.text) - self.assertEqual("object1", metadata_object['name']) - - # Returned object should match the created object - metadata_object = jsonutils.loads(response.text) - checked_keys = set([ - u'name', - u'description', - u'properties', - u'required', - u'self', - u'schema', - u'created_at', - u'updated_at' - ]) - self.assertEqual(set(metadata_object.keys()), checked_keys) - expected_metadata_object = { - "name": metadata_object_name, - "description": "object1 description.", - "required": [ - "property1" - ], - "properties": { - 'property1': { - 'type': 'integer', - "title": "property1", - 'description': 'property1 description', - 'operators': [''], - 'default': 100, - 'minimum': 100, - 'maximum': 30000369 - }, - "property2": { - "type": "string", - "title": "property2", - "description": "property2 description ", - "default": "value2", - "minLength": 2, - "maxLength": 50 - } - }, - "self": "/v2/metadefs/namespaces/%(" - "namespace)s/objects/%(object)s" % - {'namespace': namespace_name, - 'object': metadata_object_name}, - "schema": "v2/schemas/metadefs/object" - } - - # Simple key values - checked_values = set([ - u'name', - u'description', - ]) - for key, value in expected_metadata_object.items(): - if(key in checked_values): - self.assertEqual(metadata_object[key], value, key) - # Complex key values - properties - for key, value in ( - expected_metadata_object["properties"]['property2'].items()): - self.assertEqual( - metadata_object["properties"]["property2"][key], - value, key - ) - - # The metadata_object should be mutable - path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % - (namespace_name, metadata_object_name)) - media_type = 'application/json' - headers = self._headers({'content-type': media_type}) - metadata_object_name = "object1-UPDATED" - data = jsonutils.dumps( - { - "name": metadata_object_name, - "description": "desc-UPDATED", - "required": [ - "property2" - ], - "properties": { - 'property1': { - 'type': 'integer', - "title": "property1", - 'description': 'p1 desc-UPDATED', - 'default': 500, - 'minimum': 500, - 'maximum': 1369 - }, - "property2": { - "type": "string", - "title": "property2", - "description": "p2 desc-UPDATED", - 'operators': [''], - "default": "value2-UPDATED", - "minLength": 5, - "maxLength": 150 - } - } - } - ) - response = requests.put(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Returned metadata_object should reflect the changes - metadata_object = jsonutils.loads(response.text) - self.assertEqual('object1-UPDATED', metadata_object['name']) - self.assertEqual('desc-UPDATED', metadata_object['description']) - self.assertEqual('property2', metadata_object['required'][0]) - updated_property1 = metadata_object['properties']['property1'] - updated_property2 = metadata_object['properties']['property2'] - self.assertEqual('integer', updated_property1['type']) - self.assertEqual('p1 desc-UPDATED', updated_property1['description']) - self.assertEqual('500', updated_property1['default']) - self.assertEqual(500, updated_property1['minimum']) - self.assertEqual(1369, updated_property1['maximum']) - self.assertEqual([''], updated_property2['operators']) - self.assertEqual('string', updated_property2['type']) - self.assertEqual('p2 desc-UPDATED', updated_property2['description']) - self.assertEqual('value2-UPDATED', updated_property2['default']) - self.assertEqual(5, updated_property2['minLength']) - self.assertEqual(150, updated_property2['maxLength']) - - # Updates should persist across requests - path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % - (namespace_name, metadata_object_name)) - response = requests.get(path, headers=self._headers()) - self.assertEqual(200, response.status_code) - self.assertEqual('object1-UPDATED', metadata_object['name']) - self.assertEqual('desc-UPDATED', metadata_object['description']) - self.assertEqual('property2', metadata_object['required'][0]) - updated_property1 = metadata_object['properties']['property1'] - updated_property2 = metadata_object['properties']['property2'] - self.assertEqual('integer', updated_property1['type']) - self.assertEqual('p1 desc-UPDATED', updated_property1['description']) - self.assertEqual('500', updated_property1['default']) - self.assertEqual(500, updated_property1['minimum']) - self.assertEqual(1369, updated_property1['maximum']) - self.assertEqual([''], updated_property2['operators']) - self.assertEqual('string', updated_property2['type']) - self.assertEqual('p2 desc-UPDATED', updated_property2['description']) - self.assertEqual('value2-UPDATED', updated_property2['default']) - self.assertEqual(5, updated_property2['minLength']) - self.assertEqual(150, updated_property2['maxLength']) - - # Deletion of metadata_object object1 - path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % - (namespace_name, metadata_object_name)) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # metadata_object object1 should not exist - path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % - (namespace_name, metadata_object_name)) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) diff --git a/glance/tests/functional/v2/test_metadef_properties.py b/glance/tests/functional/v2/test_metadef_properties.py deleted file mode 100644 index 3d55eaeb..00000000 --- a/glance/tests/functional/v2/test_metadef_properties.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -from oslo_serialization import jsonutils -import requests -from six.moves import http_client as http - -from glance.tests import functional - -TENANT1 = str(uuid.uuid4()) - - -class TestNamespaceProperties(functional.FunctionalTest): - - def setUp(self): - super(TestNamespaceProperties, self).setUp() - self.cleanup() - self.api_server.deployment_flavor = 'noauth' - self.start_servers(**self.__dict__.copy()) - - def _url(self, path): - return 'http://127.0.0.1:%d%s' % (self.api_port, path) - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': TENANT1, - 'X-Roles': 'admin', - } - base_headers.update(custom_headers or {}) - return base_headers - - def test_properties_lifecycle(self): - # Namespace should not exist - path = self._url('/v2/metadefs/namespaces/MyNamespace') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Create a namespace - path = self._url('/v2/metadefs/namespaces') - headers = self._headers({'content-type': 'application/json'}) - namespace_name = 'MyNamespace' - resource_type_name = 'MyResourceType' - resource_type_prefix = 'MyPrefix' - data = jsonutils.dumps({ - "namespace": namespace_name, - "display_name": "My User Friendly Namespace", - "description": "My description", - "visibility": "public", - "protected": False, - "owner": "The Test Owner", - "resource_type_associations": [ - { - "name": resource_type_name, - "prefix": resource_type_prefix - } - ] - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Property1 should not exist - path = self._url('/v2/metadefs/namespaces/MyNamespace/properties' - '/property1') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Create a property - path = self._url('/v2/metadefs/namespaces/MyNamespace/properties') - headers = self._headers({'content-type': 'application/json'}) - property_name = "property1" - data = jsonutils.dumps( - { - "name": property_name, - "type": "integer", - "title": "property1", - "description": "property1 description", - "default": 100, - "minimum": 100, - "maximum": 30000369, - "readonly": False, - } - ) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Attempt to insert a duplicate - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CONFLICT, response.status_code) - - # Get the property created above - path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % - (namespace_name, property_name)) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - property_object = jsonutils.loads(response.text) - self.assertEqual("integer", property_object['type']) - self.assertEqual("property1", property_object['title']) - self.assertEqual("property1 description", property_object[ - 'description']) - self.assertEqual('100', property_object['default']) - self.assertEqual(100, property_object['minimum']) - self.assertEqual(30000369, property_object['maximum']) - - # Get the property with specific resource type association - path = self._url('/v2/metadefs/namespaces/%s/properties/%s%s' % ( - namespace_name, property_name, '='.join(['?resource_type', - resource_type_name]))) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Get the property with prefix and specific resource type association - property_name_with_prefix = ''.join([resource_type_prefix, - property_name]) - path = self._url('/v2/metadefs/namespaces/%s/properties/%s%s' % ( - namespace_name, property_name_with_prefix, '='.join([ - '?resource_type', resource_type_name]))) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - property_object = jsonutils.loads(response.text) - self.assertEqual("integer", property_object['type']) - self.assertEqual("property1", property_object['title']) - self.assertEqual("property1 description", property_object[ - 'description']) - self.assertEqual('100', property_object['default']) - self.assertEqual(100, property_object['minimum']) - self.assertEqual(30000369, property_object['maximum']) - self.assertFalse(property_object['readonly']) - - # Returned property should match the created property - property_object = jsonutils.loads(response.text) - checked_keys = set([ - u'name', - u'type', - u'title', - u'description', - u'default', - u'minimum', - u'maximum', - u'readonly', - ]) - self.assertEqual(set(property_object.keys()), checked_keys) - expected_metadata_property = { - "type": "integer", - "title": "property1", - "description": "property1 description", - "default": '100', - "minimum": 100, - "maximum": 30000369, - "readonly": False, - } - - for key, value in expected_metadata_property.items(): - self.assertEqual(property_object[key], value, key) - - # The property should be mutable - path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % - (namespace_name, property_name)) - media_type = 'application/json' - headers = self._headers({'content-type': media_type}) - property_name = "property1-UPDATED" - data = jsonutils.dumps( - { - "name": property_name, - "type": "string", - "title": "string property", - "description": "desc-UPDATED", - "operators": [""], - "default": "value-UPDATED", - "minLength": 5, - "maxLength": 10, - "readonly": True, - } - ) - response = requests.put(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Returned property should reflect the changes - property_object = jsonutils.loads(response.text) - self.assertEqual('string', property_object['type']) - self.assertEqual('desc-UPDATED', property_object['description']) - self.assertEqual('value-UPDATED', property_object['default']) - self.assertEqual([""], property_object['operators']) - self.assertEqual(5, property_object['minLength']) - self.assertEqual(10, property_object['maxLength']) - self.assertTrue(property_object['readonly']) - - # Updates should persist across requests - path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % - (namespace_name, property_name)) - response = requests.get(path, headers=self._headers()) - self.assertEqual('string', property_object['type']) - self.assertEqual('desc-UPDATED', property_object['description']) - self.assertEqual('value-UPDATED', property_object['default']) - self.assertEqual([""], property_object['operators']) - self.assertEqual(5, property_object['minLength']) - self.assertEqual(10, property_object['maxLength']) - - # Deletion of property property1 - path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % - (namespace_name, property_name)) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # property1 should not exist - path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % - (namespace_name, property_name)) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) diff --git a/glance/tests/functional/v2/test_metadef_resourcetypes.py b/glance/tests/functional/v2/test_metadef_resourcetypes.py deleted file mode 100644 index cc047d61..00000000 --- a/glance/tests/functional/v2/test_metadef_resourcetypes.py +++ /dev/null @@ -1,265 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import six -from six.moves import http_client as http -import webob.exc -from wsme.rest import json - -from glance.api import policy -from glance.api.v2.model.metadef_resource_type import ResourceType -from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation -from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociations -from glance.api.v2.model.metadef_resource_type import ResourceTypes -from glance.common import exception -from glance.common import wsgi -import glance.db -import glance.gateway -from glance.i18n import _, _LE -import glance.notifier -import glance.schema - -LOG = logging.getLogger(__name__) - - -class ResourceTypeController(object): - def __init__(self, db_api=None, policy_enforcer=None): - self.db_api = db_api or glance.db.get_api() - self.policy = policy_enforcer or policy.Enforcer() - self.gateway = glance.gateway.Gateway(db_api=self.db_api, - policy_enforcer=self.policy) - - def index(self, req): - try: - filters = {'namespace': None} - rs_type_repo = self.gateway.get_metadef_resource_type_repo( - req.context) - db_resource_type_list = rs_type_repo.list(filters=filters) - resource_type_list = [ResourceType.to_wsme_model( - resource_type) for resource_type in db_resource_type_list] - resource_types = ResourceTypes() - resource_types.resource_types = resource_type_list - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(e) - raise webob.exc.HTTPInternalServerError(e) - return resource_types - - def show(self, req, namespace): - try: - filters = {'namespace': namespace} - rs_type_repo = self.gateway.get_metadef_resource_type_repo( - req.context) - db_resource_type_list = rs_type_repo.list(filters=filters) - resource_type_list = [ResourceTypeAssociation.to_wsme_model( - resource_type) for resource_type in db_resource_type_list] - resource_types = ResourceTypeAssociations() - resource_types.resource_type_associations = resource_type_list - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except Exception as e: - LOG.error(e) - raise webob.exc.HTTPInternalServerError(e) - return resource_types - - def create(self, req, resource_type, namespace): - rs_type_factory = self.gateway.get_metadef_resource_type_factory( - req.context) - rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context) - try: - new_resource_type = rs_type_factory.new_resource_type( - namespace=namespace, **resource_type.to_dict()) - rs_type_repo.add(new_resource_type) - - except exception.Forbidden as e: - msg = (_LE("Forbidden to create resource type. " - "Reason: %(reason)s") - % {'reason': encodeutils.exception_to_unicode(e)}) - LOG.error(msg) - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Duplicate as e: - raise webob.exc.HTTPConflict(explanation=e.msg) - except Exception as e: - LOG.error(e) - raise webob.exc.HTTPInternalServerError() - return ResourceTypeAssociation.to_wsme_model(new_resource_type) - - def delete(self, req, namespace, resource_type): - rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context) - try: - filters = {} - found = False - filters['namespace'] = namespace - db_resource_type_list = rs_type_repo.list(filters=filters) - for db_resource_type in db_resource_type_list: - if db_resource_type.name == resource_type: - db_resource_type.delete() - rs_type_repo.remove(db_resource_type) - found = True - if not found: - raise exception.NotFound() - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - msg = (_("Failed to find resource type %(resourcetype)s to " - "delete") % {'resourcetype': resource_type}) - LOG.error(msg) - raise webob.exc.HTTPNotFound(explanation=msg) - except Exception as e: - LOG.error(e) - raise webob.exc.HTTPInternalServerError() - - -class RequestDeserializer(wsgi.JSONRequestDeserializer): - _disallowed_properties = ['created_at', 'updated_at'] - - def __init__(self, schema=None): - super(RequestDeserializer, self).__init__() - self.schema = schema or get_schema() - - def _get_request_body(self, request): - output = super(RequestDeserializer, self).default(request) - if 'body' not in output: - msg = _('Body expected in request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - return output['body'] - - @classmethod - def _check_allowed(cls, image): - for key in cls._disallowed_properties: - if key in image: - msg = _("Attribute '%s' is read-only.") % key - raise webob.exc.HTTPForbidden( - explanation=encodeutils.exception_to_unicode(msg)) - - def create(self, request): - body = self._get_request_body(request) - self._check_allowed(body) - try: - self.schema.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - resource_type = json.fromjson(ResourceTypeAssociation, body) - return dict(resource_type=resource_type) - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - def __init__(self, schema=None): - super(ResponseSerializer, self).__init__() - self.schema = schema - - def show(self, response, result): - resource_type_json = json.tojson(ResourceTypeAssociations, result) - body = jsonutils.dumps(resource_type_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def index(self, response, result): - resource_type_json = json.tojson(ResourceTypes, result) - body = jsonutils.dumps(resource_type_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def create(self, response, result): - resource_type_json = json.tojson(ResourceTypeAssociation, result) - response.status_int = http.CREATED - body = jsonutils.dumps(resource_type_json, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def delete(self, response, result): - response.status_int = http.NO_CONTENT - - -def _get_base_properties(): - return { - 'name': { - 'type': 'string', - 'description': _('Resource type names should be aligned with Heat ' - 'resource types whenever possible: ' - 'http://docs.openstack.org/developer/heat/' - 'template_guide/openstack.html'), - 'maxLength': 80, - }, - 'prefix': { - 'type': 'string', - 'description': _('Specifies the prefix to use for the given ' - 'resource type. Any properties in the namespace ' - 'should be prefixed with this prefix when being ' - 'applied to the specified resource type. Must ' - 'include prefix separator (e.g. a colon :).'), - 'maxLength': 80, - }, - 'properties_target': { - 'type': 'string', - 'description': _('Some resource types allow more than one key / ' - 'value pair per instance. For example, Cinder ' - 'allows user and image metadata on volumes. Only ' - 'the image properties metadata is evaluated by ' - 'Nova (scheduling or drivers). This property ' - 'allows a namespace target to remove the ' - 'ambiguity.'), - 'maxLength': 80, - }, - "created_at": { - "type": "string", - "readOnly": True, - "description": _("Date and time of resource type association"), - "format": "date-time" - }, - "updated_at": { - "type": "string", - "readOnly": True, - "description": _("Date and time of the last resource type " - "association modification"), - "format": "date-time" - } - } - - -def get_schema(): - properties = _get_base_properties() - mandatory_attrs = ResourceTypeAssociation.get_mandatory_attrs() - schema = glance.schema.Schema( - 'resource_type_association', - properties, - required=mandatory_attrs, - ) - return schema - - -def get_collection_schema(): - resource_type_schema = get_schema() - return glance.schema.CollectionSchema('resource_type_associations', - resource_type_schema) - - -def create_resource(): - """ResourceTypeAssociation resource factory method""" - schema = get_schema() - deserializer = RequestDeserializer(schema) - serializer = ResponseSerializer(schema) - controller = ResourceTypeController() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/glance/tests/functional/v2/test_metadef_tags.py b/glance/tests/functional/v2/test_metadef_tags.py deleted file mode 100644 index 520a1ce1..00000000 --- a/glance/tests/functional/v2/test_metadef_tags.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -from oslo_serialization import jsonutils -import requests -from six.moves import http_client as http - -from glance.tests import functional - -TENANT1 = str(uuid.uuid4()) - - -class TestMetadefTags(functional.FunctionalTest): - - def setUp(self): - super(TestMetadefTags, self).setUp() - self.cleanup() - self.api_server.deployment_flavor = 'noauth' - self.start_servers(**self.__dict__.copy()) - - def _url(self, path): - return 'http://127.0.0.1:%d%s' % (self.api_port, path) - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': TENANT1, - 'X-Roles': 'admin', - } - base_headers.update(custom_headers or {}) - return base_headers - - def test_metadata_tags_lifecycle(self): - # Namespace should not exist - path = self._url('/v2/metadefs/namespaces/MyNamespace') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Create a namespace - path = self._url('/v2/metadefs/namespaces') - headers = self._headers({'content-type': 'application/json'}) - namespace_name = 'MyNamespace' - data = jsonutils.dumps({ - "namespace": namespace_name, - "display_name": "My User Friendly Namespace", - "description": "My description", - "visibility": "public", - "protected": False, - "owner": "The Test Owner"} - ) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Metadata tag should not exist - metadata_tag_name = "tag1" - path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % - (namespace_name, metadata_tag_name)) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Create the metadata tag - headers = self._headers({'content-type': 'application/json'}) - response = requests.post(path, headers=headers) - self.assertEqual(http.CREATED, response.status_code) - - # Get the metadata tag created above - response = requests.get(path, - headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - metadata_tag = jsonutils.loads(response.text) - self.assertEqual(metadata_tag_name, metadata_tag['name']) - - # Returned tag should match the created tag - metadata_tag = jsonutils.loads(response.text) - checked_keys = set([ - u'name', - u'created_at', - u'updated_at' - ]) - self.assertEqual(checked_keys, set(metadata_tag.keys())) - expected_metadata_tag = { - "name": metadata_tag_name - } - - # Simple key values - checked_values = set([ - u'name' - ]) - for key, value in expected_metadata_tag.items(): - if(key in checked_values): - self.assertEqual(metadata_tag[key], value, key) - - # Try to create a duplicate metadata tag - headers = self._headers({'content-type': 'application/json'}) - response = requests.post(path, headers=headers) - self.assertEqual(http.CONFLICT, response.status_code) - - # The metadata_tag should be mutable - path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % - (namespace_name, metadata_tag_name)) - media_type = 'application/json' - headers = self._headers({'content-type': media_type}) - metadata_tag_name = "tag1-UPDATED" - data = jsonutils.dumps( - { - "name": metadata_tag_name - } - ) - response = requests.put(path, headers=headers, data=data) - self.assertEqual(http.OK, response.status_code, response.text) - - # Returned metadata_tag should reflect the changes - metadata_tag = jsonutils.loads(response.text) - self.assertEqual('tag1-UPDATED', metadata_tag['name']) - - # Updates should persist across requests - path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % - (namespace_name, metadata_tag_name)) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - self.assertEqual('tag1-UPDATED', metadata_tag['name']) - - # Deletion of metadata_tag_name - path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % - (namespace_name, metadata_tag_name)) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.NO_CONTENT, response.status_code) - - # metadata_tag_name should not exist - path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % - (namespace_name, metadata_tag_name)) - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.NOT_FOUND, response.status_code) - - # Create multiple tags. - path = self._url('/v2/metadefs/namespaces/%s/tags' % - (namespace_name)) - headers = self._headers({'content-type': 'application/json'}) - data = jsonutils.dumps( - {"tags": [{"name": "tag1"}, {"name": "tag2"}, {"name": "tag3"}]} - ) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # List out the three new tags. - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - tags = jsonutils.loads(response.text)['tags'] - self.assertEqual(3, len(tags)) - - # Attempt to create bogus duplicate tag4 - data = jsonutils.dumps( - {"tags": [{"name": "tag4"}, {"name": "tag5"}, {"name": "tag4"}]} - ) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CONFLICT, response.status_code) - - # Verify the previous 3 still exist - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - tags = jsonutils.loads(response.text)['tags'] - self.assertEqual(3, len(tags)) diff --git a/glance/tests/functional/v2/test_schemas.py b/glance/tests/functional/v2/test_schemas.py deleted file mode 100644 index 2257e95d..00000000 --- a/glance/tests/functional/v2/test_schemas.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -import requests -from six.moves import http_client as http - -from glance.tests import functional - - -class TestSchemas(functional.FunctionalTest): - - def setUp(self): - super(TestSchemas, self).setUp() - self.cleanup() - self.start_servers(**self.__dict__.copy()) - - def test_resource(self): - # Ensure the image link works and custom properties are loaded - path = 'http://%s:%d/v2/schemas/image' % ('127.0.0.1', self.api_port) - response = requests.get(path) - self.assertEqual(http.OK, response.status_code) - image_schema = jsonutils.loads(response.text) - expected = set([ - 'id', - 'name', - 'visibility', - 'checksum', - 'created_at', - 'updated_at', - 'tags', - 'size', - 'virtual_size', - 'owner', - 'container_format', - 'disk_format', - 'self', - 'file', - 'status', - 'schema', - 'direct_url', - 'locations', - 'min_ram', - 'min_disk', - 'protected', - ]) - self.assertEqual(expected, set(image_schema['properties'].keys())) - - # Ensure the images link works and agrees with the image schema - path = 'http://%s:%d/v2/schemas/images' % ('127.0.0.1', self.api_port) - response = requests.get(path) - self.assertEqual(http.OK, response.status_code) - images_schema = jsonutils.loads(response.text) - item_schema = images_schema['properties']['images']['items'] - self.assertEqual(item_schema, image_schema) - - self.stop_servers() diff --git a/glance/tests/functional/v2/test_tasks.py b/glance/tests/functional/v2/test_tasks.py deleted file mode 100644 index b04340b6..00000000 --- a/glance/tests/functional/v2/test_tasks.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_serialization import jsonutils -import requests -from six.moves import http_client as http - -from glance.tests import functional - - -TENANT1 = str(uuid.uuid4()) -TENANT2 = str(uuid.uuid4()) -TENANT3 = str(uuid.uuid4()) -TENANT4 = str(uuid.uuid4()) - - -class TestTasks(functional.FunctionalTest): - - def setUp(self): - super(TestTasks, self).setUp() - self.cleanup() - self.api_server.deployment_flavor = 'noauth' - - def _url(self, path): - return 'http://127.0.0.1:%d%s' % (self.api_port, path) - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': TENANT1, - 'X-Roles': 'admin', - } - base_headers.update(custom_headers or {}) - return base_headers - - def test_task_not_allowed_non_admin(self): - self.start_servers(**self.__dict__.copy()) - roles = {'X-Roles': 'member'} - # Task list should be empty - path = self._url('/v2/tasks') - response = requests.get(path, headers=self._headers(roles)) - self.assertEqual(http.FORBIDDEN, response.status_code) - - def test_task_lifecycle(self): - self.start_servers(**self.__dict__.copy()) - # Task list should be empty - path = self._url('/v2/tasks') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - tasks = jsonutils.loads(response.text)['tasks'] - self.assertEqual(0, len(tasks)) - - # Create a task - path = self._url('/v2/tasks') - headers = self._headers({'content-type': 'application/json'}) - - data = jsonutils.dumps({ - "type": "import", - "input": { - "import_from": "http://example.com", - "import_from_format": "qcow2", - "image_properties": { - 'disk_format': 'vhd', - 'container_format': 'ovf' - } - } - }) - response = requests.post(path, headers=headers, data=data) - self.assertEqual(http.CREATED, response.status_code) - - # Returned task entity should have a generated id and status - task = jsonutils.loads(response.text) - task_id = task['id'] - - self.assertIn('Location', response.headers) - self.assertEqual(path + '/' + task_id, response.headers['Location']) - - checked_keys = set([u'created_at', - u'id', - u'input', - u'message', - u'owner', - u'schema', - u'self', - u'status', - u'type', - u'result', - u'updated_at']) - self.assertEqual(checked_keys, set(task.keys())) - expected_task = { - 'status': 'pending', - 'type': 'import', - 'input': { - "import_from": "http://example.com", - "import_from_format": "qcow2", - "image_properties": { - 'disk_format': 'vhd', - 'container_format': 'ovf' - }}, - 'schema': '/v2/schemas/task', - } - for key, value in expected_task.items(): - self.assertEqual(value, task[key], key) - - # Tasks list should now have one entry - path = self._url('/v2/tasks') - response = requests.get(path, headers=self._headers()) - self.assertEqual(http.OK, response.status_code) - tasks = jsonutils.loads(response.text)['tasks'] - self.assertEqual(1, len(tasks)) - self.assertEqual(task_id, tasks[0]['id']) - - # Attempt to delete a task - path = self._url('/v2/tasks/%s' % tasks[0]['id']) - response = requests.delete(path, headers=self._headers()) - self.assertEqual(http.METHOD_NOT_ALLOWED, response.status_code) - self.assertIsNotNone(response.headers.get('Allow')) - self.assertEqual('GET', response.headers.get('Allow')) - - self.stop_servers() - - -class TestTasksWithRegistry(TestTasks): - def setUp(self): - super(TestTasksWithRegistry, self).setUp() - self.api_server.data_api = ( - 'glance.tests.functional.v2.registry_data_api') - self.registry_server.deployment_flavor = 'trusted-auth' - self.include_scrubber = False diff --git a/glance/tests/integration/__init__.py b/glance/tests/integration/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/integration/legacy_functional/__init__.py b/glance/tests/integration/legacy_functional/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/integration/legacy_functional/base.py b/glance/tests/integration/legacy_functional/base.py deleted file mode 100644 index f37bdfa0..00000000 --- a/glance/tests/integration/legacy_functional/base.py +++ /dev/null @@ -1,222 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import atexit -import os.path -import tempfile - -import fixtures -import glance_store -from oslo_config import cfg -from oslo_db import options - -import glance.common.client -from glance.common import config -import glance.db.sqlalchemy.api -import glance.registry.client.v1.client -from glance import tests as glance_tests -from glance.tests import utils as test_utils - - -TESTING_API_PASTE_CONF = """ -[pipeline:glance-api] -pipeline = versionnegotiation gzip unauthenticated-context rootapp - -[pipeline:glance-api-caching] -pipeline = versionnegotiation gzip unauthenticated-context cache rootapp - -[pipeline:glance-api-cachemanagement] -pipeline = - versionnegotiation - gzip - unauthenticated-context - cache - cache_manage - rootapp - -[pipeline:glance-api-fakeauth] -pipeline = versionnegotiation gzip fakeauth context rootapp - -[pipeline:glance-api-noauth] -pipeline = versionnegotiation gzip context rootapp - -[composite:rootapp] -paste.composite_factory = glance.api:root_app_factory -/: apiversions -/v1: apiv1app -/v2: apiv2app - -[app:apiversions] -paste.app_factory = glance.api.versions:create_resource - -[app:apiv1app] -paste.app_factory = glance.api.v1.router:API.factory - -[app:apiv2app] -paste.app_factory = glance.api.v2.router:API.factory - -[filter:versionnegotiation] -paste.filter_factory = - glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory - -[filter:gzip] -paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory - -[filter:cache] -paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory - -[filter:cache_manage] -paste.filter_factory = - glance.api.middleware.cache_manage:CacheManageFilter.factory - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = - glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:fakeauth] -paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory -""" - -TESTING_REGISTRY_PASTE_CONF = """ -[pipeline:glance-registry] -pipeline = unauthenticated-context registryapp - -[pipeline:glance-registry-fakeauth] -pipeline = fakeauth context registryapp - -[app:registryapp] -paste.app_factory = glance.registry.api.v1:API.factory - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = - glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:fakeauth] -paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory -""" - -CONF = cfg.CONF - - -class ApiTest(test_utils.BaseTestCase): - def setUp(self): - super(ApiTest, self).setUp() - self.init() - - def init(self): - self.test_dir = self.useFixture(fixtures.TempDir()).path - self._configure_logging() - self._configure_policy() - self._setup_database() - self._setup_stores() - self._setup_property_protection() - self.glance_registry_app = self._load_paste_app( - 'glance-registry', - flavor=getattr(self, 'registry_flavor', ''), - conf=getattr(self, 'registry_paste_conf', - TESTING_REGISTRY_PASTE_CONF), - ) - self._connect_registry_client() - self.glance_api_app = self._load_paste_app( - 'glance-api', - flavor=getattr(self, 'api_flavor', ''), - conf=getattr(self, 'api_paste_conf', TESTING_API_PASTE_CONF), - ) - self.http = test_utils.Httplib2WsgiAdapter(self.glance_api_app) - - def _setup_property_protection(self): - self._copy_data_file('property-protections.conf', self.test_dir) - self.property_file = os.path.join(self.test_dir, - 'property-protections.conf') - - def _configure_policy(self): - policy_file = self._copy_data_file('policy.json', self.test_dir) - self.config(policy_file=policy_file, group='oslo_policy') - - def _configure_logging(self): - self.config(default_log_levels=[ - 'amqplib=WARN', - 'sqlalchemy=WARN', - 'boto=WARN', - 'suds=INFO', - 'keystone=INFO', - 'eventlet.wsgi.server=DEBUG' - ]) - - def _setup_database(self): - sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir - options.set_defaults(CONF, connection=sql_connection) - glance.db.sqlalchemy.api.clear_db_env() - glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE' - if glance_db_env in os.environ: - # use the empty db created and cached as a tempfile - # instead of spending the time creating a new one - db_location = os.environ[glance_db_env] - test_utils.execute('cp %s %s/tests.sqlite' - % (db_location, self.test_dir)) - else: - test_utils.db_sync() - - # copy the clean db to a temp location so that it - # can be reused for future tests - (osf, db_location) = tempfile.mkstemp() - os.close(osf) - test_utils.execute('cp %s/tests.sqlite %s' - % (self.test_dir, db_location)) - os.environ[glance_db_env] = db_location - - # cleanup the temp file when the test suite is - # complete - def _delete_cached_db(): - try: - os.remove(os.environ[glance_db_env]) - except Exception: - glance_tests.logger.exception( - "Error cleaning up the file %s" % - os.environ[glance_db_env]) - atexit.register(_delete_cached_db) - - def _setup_stores(self): - glance_store.register_opts(CONF) - - image_dir = os.path.join(self.test_dir, "images") - self.config(group='glance_store', - filesystem_store_datadir=image_dir) - - glance_store.create_stores() - - def _load_paste_app(self, name, flavor, conf): - conf_file_path = os.path.join(self.test_dir, '%s-paste.ini' % name) - with open(conf_file_path, 'w') as conf_file: - conf_file.write(conf) - conf_file.flush() - return config.load_paste_app(name, flavor=flavor, - conf_file=conf_file_path) - - def _connect_registry_client(self): - def get_connection_type(self2): - def wrapped(*args, **kwargs): - return test_utils.HttplibWsgiAdapter(self.glance_registry_app) - return wrapped - - self.stubs.Set(glance.common.client.BaseClient, - 'get_connection_type', get_connection_type) - - def tearDown(self): - glance.db.sqlalchemy.api.clear_db_env() - super(ApiTest, self).tearDown() diff --git a/glance/tests/integration/legacy_functional/test_v1_api.py b/glance/tests/integration/legacy_functional/test_v1_api.py deleted file mode 100644 index a5895917..00000000 --- a/glance/tests/integration/legacy_functional/test_v1_api.py +++ /dev/null @@ -1,1735 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import hashlib -import os -import tempfile - -from oslo_serialization import jsonutils -from oslo_utils import units -from six.moves import http_client -import testtools - -from glance.common import timeutils -from glance.tests.integration.legacy_functional import base -from glance.tests.utils import minimal_headers - -FIVE_KB = 5 * units.Ki -FIVE_GB = 5 * units.Gi - - -class TestApi(base.ApiTest): - def test_get_head_simple_post(self): - # 0. GET /images - # Verify no public images - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('{"images": []}', content) - - # 1. GET /images/detail - # Verify no public images - path = "/v1/images/detail" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('{"images": []}', content) - - # 2. POST /images with public image named Image1 - # attribute and no custom properties. Verify a 200 OK is returned - image_data = b"*" * FIVE_KB - headers = minimal_headers('Image1') - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("Image1", data['image']['name']) - self.assertTrue(data['image']['is_public']) - - # 3. HEAD image - # Verify image found now - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual("Image1", response['x-image-meta-name']) - - # 4. GET image - # Verify all information on image we just added is correct - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - expected_image_headers = { - 'x-image-meta-id': image_id, - 'x-image-meta-name': 'Image1', - 'x-image-meta-is_public': 'True', - 'x-image-meta-status': 'active', - 'x-image-meta-disk_format': 'raw', - 'x-image-meta-container_format': 'ovf', - 'x-image-meta-size': str(FIVE_KB)} - - expected_std_headers = { - 'content-length': str(FIVE_KB), - 'content-type': 'application/octet-stream'} - - for expected_key, expected_value in expected_image_headers.items(): - self.assertEqual(expected_value, response[expected_key], - "For key '%s' expected header value '%s'. " - "Got '%s'" % (expected_key, - expected_value, - response[expected_key])) - - for expected_key, expected_value in expected_std_headers.items(): - self.assertEqual(expected_value, response[expected_key], - "For key '%s' expected header value '%s'. " - "Got '%s'" % (expected_key, - expected_value, - response[expected_key])) - - content = content.encode('utf-8') - self.assertEqual(image_data, content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - hashlib.md5(content).hexdigest()) - - # 5. GET /images - # Verify no public images - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - expected_result = {"images": [ - {"container_format": "ovf", - "disk_format": "raw", - "id": image_id, - "name": "Image1", - "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", - "size": 5120}]} - self.assertEqual(expected_result, jsonutils.loads(content)) - - # 6. GET /images/detail - # Verify image and all its metadata - path = "/v1/images/detail" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - expected_image = { - "status": "active", - "name": "Image1", - "deleted": False, - "container_format": "ovf", - "disk_format": "raw", - "id": image_id, - "is_public": True, - "deleted_at": None, - "properties": {}, - "size": 5120} - - image = jsonutils.loads(content) - - for expected_key, expected_value in expected_image.items(): - self.assertEqual(expected_value, image['images'][0][expected_key], - "For key '%s' expected header value '%s'. " - "Got '%s'" % (expected_key, - expected_value, - image['images'][0][expected_key])) - - # 7. PUT image with custom properties of "distro" and "arch" - # Verify 200 returned - headers = {'X-Image-Meta-Property-Distro': 'Ubuntu', - 'X-Image-Meta-Property-Arch': 'x86_64'} - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual("x86_64", data['image']['properties']['arch']) - self.assertEqual("Ubuntu", data['image']['properties']['distro']) - - # 8. GET /images/detail - # Verify image and all its metadata - path = "/v1/images/detail" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - expected_image = { - "status": "active", - "name": "Image1", - "deleted": False, - "container_format": "ovf", - "disk_format": "raw", - "id": image_id, - "is_public": True, - "deleted_at": None, - "properties": {'distro': 'Ubuntu', 'arch': 'x86_64'}, - "size": 5120} - - image = jsonutils.loads(content) - - for expected_key, expected_value in expected_image.items(): - self.assertEqual(expected_value, image['images'][0][expected_key], - "For key '%s' expected header value '%s'. " - "Got '%s'" % (expected_key, - expected_value, - image['images'][0][expected_key])) - - # 9. PUT image and remove a previously existing property. - headers = {'X-Image-Meta-Property-Arch': 'x86_64'} - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.OK, response.status) - - path = "/v1/images/detail" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content)['images'][0] - self.assertEqual(1, len(data['properties'])) - self.assertEqual("x86_64", data['properties']['arch']) - - # 10. PUT image and add a previously deleted property. - headers = {'X-Image-Meta-Property-Distro': 'Ubuntu', - 'X-Image-Meta-Property-Arch': 'x86_64'} - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', headers=headers) - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - - path = "/v1/images/detail" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content)['images'][0] - self.assertEqual(2, len(data['properties'])) - self.assertEqual("x86_64", data['properties']['arch']) - self.assertEqual("Ubuntu", data['properties']['distro']) - self.assertNotEqual(data['created_at'], data['updated_at']) - - # DELETE image - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - def test_queued_process_flow(self): - """ - We test the process flow where a user registers an image - with Glance but does not immediately upload an image file. - Later, the user uploads an image file using a PUT operation. - We track the changing of image status throughout this process. - - 0. GET /images - - Verify no public images - 1. POST /images with public image named Image1 with no location - attribute and no image data. - - Verify 201 returned - 2. GET /images - - Verify one public image - 3. HEAD image - - Verify image now in queued status - 4. PUT image with image data - - Verify 200 returned - 5. HEAD images - - Verify image now in active status - 6. GET /images - - Verify one public image - """ - - # 0. GET /images - # Verify no public images - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('{"images": []}', content) - - # 1. POST /images with public image named Image1 - # with no location or image data - headers = minimal_headers('Image1') - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - self.assertIsNone(data['image']['checksum']) - self.assertEqual(0, data['image']['size']) - self.assertEqual('ovf', data['image']['container_format']) - self.assertEqual('raw', data['image']['disk_format']) - self.assertEqual("Image1", data['image']['name']) - self.assertTrue(data['image']['is_public']) - - image_id = data['image']['id'] - - # 2. GET /images - # Verify 1 public image - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(image_id, data['images'][0]['id']) - self.assertIsNone(data['images'][0]['checksum']) - self.assertEqual(0, data['images'][0]['size']) - self.assertEqual('ovf', data['images'][0]['container_format']) - self.assertEqual('raw', data['images'][0]['disk_format']) - self.assertEqual("Image1", data['images'][0]['name']) - - # 3. HEAD /images - # Verify status is in queued - path = "/v1/images/%s" % (image_id) - response, content = self.http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual("Image1", response['x-image-meta-name']) - self.assertEqual("queued", response['x-image-meta-status']) - self.assertEqual('0', response['x-image-meta-size']) - self.assertEqual(image_id, response['x-image-meta-id']) - - # 4. PUT image with image data, verify 200 returned - image_data = b"*" * FIVE_KB - headers = {'Content-Type': 'application/octet-stream'} - path = "/v1/images/%s" % (image_id) - response, content = self.http.request(path, 'PUT', headers=headers, - body=image_data) - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['image']['checksum']) - self.assertEqual(FIVE_KB, data['image']['size']) - self.assertEqual("Image1", data['image']['name']) - self.assertTrue(data['image']['is_public']) - - # 5. HEAD /images - # Verify status is in active - path = "/v1/images/%s" % (image_id) - response, content = self.http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual("Image1", response['x-image-meta-name']) - self.assertEqual("active", response['x-image-meta-status']) - - # 6. GET /images - # Verify 1 public image still... - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(hashlib.md5(image_data).hexdigest(), - data['images'][0]['checksum']) - self.assertEqual(image_id, data['images'][0]['id']) - self.assertEqual(FIVE_KB, data['images'][0]['size']) - self.assertEqual('ovf', data['images'][0]['container_format']) - self.assertEqual('raw', data['images'][0]['disk_format']) - self.assertEqual("Image1", data['images'][0]['name']) - - # DELETE image - path = "/v1/images/%s" % (image_id) - response, content = self.http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - def test_v1_not_enabled(self): - self.config(enable_v1_api=False) - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) - - def test_v1_enabled(self): - self.config(enable_v1_api=True) - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - - def test_zero_initial_size(self): - """ - A test to ensure that an image with size explicitly set to zero - has status that immediately transitions to active. - """ - # 1. POST /images with public image named Image1 - # attribute and a size of zero. - # Verify a 201 OK is returned - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Size': '0', - 'X-Image-Meta-Name': 'Image1', - 'X-Image-Meta-disk_format': 'raw', - 'X-image-Meta-container_format': 'ovf', - 'X-Image-Meta-Is-Public': 'True'} - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - image = jsonutils.loads(content)['image'] - self.assertEqual('active', image['status']) - - # 2. HEAD image-location - # Verify image size is zero and the status is active - path = response.get('location') - response, content = self.http.request(path, 'HEAD') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('0', response['x-image-meta-size']) - self.assertEqual('active', response['x-image-meta-status']) - - # 3. GET image-location - # Verify image content is empty - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual(0, len(content)) - - def test_traceback_not_consumed(self): - """ - A test that errors coming from the POST API do not - get consumed and print the actual error message, and - not something like <traceback object at 0x1918d40> - - :see https://bugs.launchpad.net/glance/+bug/755912 - """ - # POST /images with binary data, but not setting - # Content-Type to application/octet-stream, verify a - # 400 returned and that the error is readable. - with tempfile.NamedTemporaryFile() as test_data_file: - test_data_file.write(b"XXX") - test_data_file.flush() - path = "/v1/images" - headers = minimal_headers('Image1') - headers['Content-Type'] = 'not octet-stream' - response, content = self.http.request(path, 'POST', - body=test_data_file.name, - headers=headers) - self.assertEqual(http_client.BAD_REQUEST, response.status) - expected = "Content-Type must be application/octet-stream" - self.assertIn(expected, content, - "Could not find '%s' in '%s'" % (expected, content)) - - def test_filtered_images(self): - """ - Set up four test images and ensure each query param filter works - """ - - # 0. GET /images - # Verify no public images - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('{"images": []}', content) - - image_ids = [] - - # 1. POST /images with three public images, and one private image - # with various attributes - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': 'Image1', - 'X-Image-Meta-Status': 'active', - 'X-Image-Meta-Container-Format': 'ovf', - 'X-Image-Meta-Disk-Format': 'vdi', - 'X-Image-Meta-Size': '19', - 'X-Image-Meta-Is-Public': 'True', - 'X-Image-Meta-Protected': 'True', - 'X-Image-Meta-Property-pants': 'are on'} - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - self.assertEqual("are on", data['image']['properties']['pants']) - self.assertTrue(data['image']['is_public']) - image_ids.append(data['image']['id']) - - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': 'My Image!', - 'X-Image-Meta-Status': 'active', - 'X-Image-Meta-Container-Format': 'ovf', - 'X-Image-Meta-Disk-Format': 'vhd', - 'X-Image-Meta-Size': '20', - 'X-Image-Meta-Is-Public': 'True', - 'X-Image-Meta-Protected': 'False', - 'X-Image-Meta-Property-pants': 'are on'} - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - self.assertEqual("are on", data['image']['properties']['pants']) - self.assertTrue(data['image']['is_public']) - image_ids.append(data['image']['id']) - - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': 'My Image!', - 'X-Image-Meta-Status': 'saving', - 'X-Image-Meta-Container-Format': 'ami', - 'X-Image-Meta-Disk-Format': 'ami', - 'X-Image-Meta-Size': '21', - 'X-Image-Meta-Is-Public': 'True', - 'X-Image-Meta-Protected': 'False', - 'X-Image-Meta-Property-pants': 'are off'} - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - self.assertEqual("are off", data['image']['properties']['pants']) - self.assertTrue(data['image']['is_public']) - image_ids.append(data['image']['id']) - - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': 'My Private Image', - 'X-Image-Meta-Status': 'active', - 'X-Image-Meta-Container-Format': 'ami', - 'X-Image-Meta-Disk-Format': 'ami', - 'X-Image-Meta-Size': '22', - 'X-Image-Meta-Is-Public': 'False', - 'X-Image-Meta-Protected': 'False'} - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - self.assertFalse(data['image']['is_public']) - image_ids.append(data['image']['id']) - - # 2. GET /images - # Verify three public images - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(3, len(data['images'])) - - # 3. GET /images with name filter - # Verify correct images returned with name - params = "name=My%20Image!" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(2, len(data['images'])) - for image in data['images']: - self.assertEqual("My Image!", image['name']) - - # 4. GET /images with status filter - # Verify correct images returned with status - params = "status=queued" - path = "/v1/images/detail?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(3, len(data['images'])) - for image in data['images']: - self.assertEqual("queued", image['status']) - - params = "status=active" - path = "/v1/images/detail?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(0, len(data['images'])) - - # 5. GET /images with container_format filter - # Verify correct images returned with container_format - params = "container_format=ovf" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(2, len(data['images'])) - for image in data['images']: - self.assertEqual("ovf", image['container_format']) - - # 6. GET /images with disk_format filter - # Verify correct images returned with disk_format - params = "disk_format=vdi" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(1, len(data['images'])) - for image in data['images']: - self.assertEqual("vdi", image['disk_format']) - - # 7. GET /images with size_max filter - # Verify correct images returned with size <= expected - params = "size_max=20" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(2, len(data['images'])) - for image in data['images']: - self.assertLessEqual(image['size'], 20) - - # 8. GET /images with size_min filter - # Verify correct images returned with size >= expected - params = "size_min=20" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(2, len(data['images'])) - for image in data['images']: - self.assertGreaterEqual(image['size'], 20) - - # 9. Get /images with is_public=None filter - # Verify correct images returned with property - # Bug lp:803656 Support is_public in filtering - params = "is_public=None" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(4, len(data['images'])) - - # 10. Get /images with is_public=False filter - # Verify correct images returned with property - # Bug lp:803656 Support is_public in filtering - params = "is_public=False" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(1, len(data['images'])) - for image in data['images']: - self.assertEqual("My Private Image", image['name']) - - # 11. Get /images with is_public=True filter - # Verify correct images returned with property - # Bug lp:803656 Support is_public in filtering - params = "is_public=True" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(3, len(data['images'])) - for image in data['images']: - self.assertNotEqual(image['name'], "My Private Image") - - # 12. Get /images with protected=False filter - # Verify correct images returned with property - params = "protected=False" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(2, len(data['images'])) - for image in data['images']: - self.assertNotEqual(image['name'], "Image1") - - # 13. Get /images with protected=True filter - # Verify correct images returned with property - params = "protected=True" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(1, len(data['images'])) - for image in data['images']: - self.assertEqual("Image1", image['name']) - - # 14. GET /images with property filter - # Verify correct images returned with property - params = "property-pants=are%20on" - path = "/v1/images/detail?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(2, len(data['images'])) - for image in data['images']: - self.assertEqual("are on", image['properties']['pants']) - - # 15. GET /images with property filter and name filter - # Verify correct images returned with property and name - # Make sure you quote the url when using more than one param! - params = "name=My%20Image!&property-pants=are%20on" - path = "/v1/images/detail?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(1, len(data['images'])) - for image in data['images']: - self.assertEqual("are on", image['properties']['pants']) - self.assertEqual("My Image!", image['name']) - - # 16. GET /images with past changes-since filter - yesterday = timeutils.isotime(timeutils.utcnow() - - datetime.timedelta(1)) - params = "changes-since=%s" % yesterday - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(3, len(data['images'])) - - # one timezone west of Greenwich equates to an hour ago - # taking care to pre-urlencode '+' as '%2B', otherwise the timezone - # '+' is wrongly decoded as a space - # TODO(eglynn): investigate '+' --> decoding, an artifact - # of WSGI/webob dispatch? - now = timeutils.utcnow() - hour_ago = now.strftime('%Y-%m-%dT%H:%M:%S%%2B01:00') - params = "changes-since=%s" % hour_ago - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(3, len(data['images'])) - - # 17. GET /images with future changes-since filter - tomorrow = timeutils.isotime(timeutils.utcnow() + - datetime.timedelta(1)) - params = "changes-since=%s" % tomorrow - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(0, len(data['images'])) - - # one timezone east of Greenwich equates to an hour from now - now = timeutils.utcnow() - hour_hence = now.strftime('%Y-%m-%dT%H:%M:%S-01:00') - params = "changes-since=%s" % hour_hence - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(0, len(data['images'])) - - # 18. GET /images with size_min filter - # Verify correct images returned with size >= expected - params = "size_min=-1" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.BAD_REQUEST, response.status) - self.assertIn("filter size_min got -1", content) - - # 19. GET /images with size_min filter - # Verify correct images returned with size >= expected - params = "size_max=-1" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.BAD_REQUEST, response.status) - self.assertIn("filter size_max got -1", content) - - # 20. GET /images with size_min filter - # Verify correct images returned with size >= expected - params = "min_ram=-1" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.BAD_REQUEST, response.status) - self.assertIn("Bad value passed to filter min_ram got -1", content) - - # 21. GET /images with size_min filter - # Verify correct images returned with size >= expected - params = "protected=imalittleteapot" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.BAD_REQUEST, response.status) - self.assertIn("protected got imalittleteapot", content) - - # 22. GET /images with size_min filter - # Verify correct images returned with size >= expected - params = "is_public=imalittleteapot" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.BAD_REQUEST, response.status) - self.assertIn("is_public got imalittleteapot", content) - - def test_limited_images(self): - """ - Ensure marker and limit query params work - """ - - # 0. GET /images - # Verify no public images - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('{"images": []}', content) - - image_ids = [] - - # 1. POST /images with three public images with various attributes - headers = minimal_headers('Image1') - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - image_ids.append(jsonutils.loads(content)['image']['id']) - - headers = minimal_headers('Image2') - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - image_ids.append(jsonutils.loads(content)['image']['id']) - - headers = minimal_headers('Image3') - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - image_ids.append(jsonutils.loads(content)['image']['id']) - - # 2. GET /images with all images - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - images = jsonutils.loads(content)['images'] - self.assertEqual(3, len(images)) - - # 3. GET /images with limit of 2 - # Verify only two images were returned - params = "limit=2" - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content)['images'] - self.assertEqual(2, len(data)) - self.assertEqual(images[0]['id'], data[0]['id']) - self.assertEqual(images[1]['id'], data[1]['id']) - - # 4. GET /images with marker - # Verify only two images were returned - params = "marker=%s" % images[0]['id'] - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content)['images'] - self.assertEqual(2, len(data)) - self.assertEqual(images[1]['id'], data[0]['id']) - self.assertEqual(images[2]['id'], data[1]['id']) - - # 5. GET /images with marker and limit - # Verify only one image was returned with the correct id - params = "limit=1&marker=%s" % images[1]['id'] - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content)['images'] - self.assertEqual(1, len(data)) - self.assertEqual(images[2]['id'], data[0]['id']) - - # 6. GET /images/detail with marker and limit - # Verify only one image was returned with the correct id - params = "limit=1&marker=%s" % images[1]['id'] - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content)['images'] - self.assertEqual(1, len(data)) - self.assertEqual(images[2]['id'], data[0]['id']) - - # DELETE images - for image_id in image_ids: - path = "/v1/images/%s" % (image_id) - response, content = self.http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - def test_ordered_images(self): - """ - Set up three test images and ensure each query param filter works - """ - # 0. GET /images - # Verify no public images - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('{"images": []}', content) - - # 1. POST /images with three public images with various attributes - image_ids = [] - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': 'Image1', - 'X-Image-Meta-Status': 'active', - 'X-Image-Meta-Container-Format': 'ovf', - 'X-Image-Meta-Disk-Format': 'vdi', - 'X-Image-Meta-Size': '19', - 'X-Image-Meta-Is-Public': 'True'} - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - image_ids.append(jsonutils.loads(content)['image']['id']) - - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': 'ASDF', - 'X-Image-Meta-Status': 'active', - 'X-Image-Meta-Container-Format': 'bare', - 'X-Image-Meta-Disk-Format': 'iso', - 'X-Image-Meta-Size': '2', - 'X-Image-Meta-Is-Public': 'True'} - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - image_ids.append(jsonutils.loads(content)['image']['id']) - - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': 'XYZ', - 'X-Image-Meta-Status': 'saving', - 'X-Image-Meta-Container-Format': 'ami', - 'X-Image-Meta-Disk-Format': 'ami', - 'X-Image-Meta-Size': '5', - 'X-Image-Meta-Is-Public': 'True'} - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - image_ids.append(jsonutils.loads(content)['image']['id']) - - # 2. GET /images with no query params - # Verify three public images sorted by created_at desc - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(3, len(data['images'])) - self.assertEqual(image_ids[2], data['images'][0]['id']) - self.assertEqual(image_ids[1], data['images'][1]['id']) - self.assertEqual(image_ids[0], data['images'][2]['id']) - - # 3. GET /images sorted by name asc - params = 'sort_key=name&sort_dir=asc' - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(3, len(data['images'])) - self.assertEqual(image_ids[1], data['images'][0]['id']) - self.assertEqual(image_ids[0], data['images'][1]['id']) - self.assertEqual(image_ids[2], data['images'][2]['id']) - - # 4. GET /images sorted by size desc - params = 'sort_key=size&sort_dir=desc' - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(3, len(data['images'])) - self.assertEqual(image_ids[0], data['images'][0]['id']) - self.assertEqual(image_ids[2], data['images'][1]['id']) - self.assertEqual(image_ids[1], data['images'][2]['id']) - - # 5. GET /images sorted by size desc with a marker - params = 'sort_key=size&sort_dir=desc&marker=%s' % image_ids[0] - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(2, len(data['images'])) - self.assertEqual(image_ids[2], data['images'][0]['id']) - self.assertEqual(image_ids[1], data['images'][1]['id']) - - # 6. GET /images sorted by name asc with a marker - params = 'sort_key=name&sort_dir=asc&marker=%s' % image_ids[2] - path = "/v1/images?%s" % (params) - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - data = jsonutils.loads(content) - self.assertEqual(0, len(data['images'])) - - # DELETE images - for image_id in image_ids: - path = "/v1/images/%s" % (image_id) - response, content = self.http.request(path, 'DELETE') - self.assertEqual(http_client.OK, response.status) - - def test_duplicate_image_upload(self): - """ - Upload initial image, then attempt to upload duplicate image - """ - # 0. GET /images - # Verify no public images - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('{"images": []}', content) - - # 1. POST /images with public image named Image1 - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': 'Image1', - 'X-Image-Meta-Status': 'active', - 'X-Image-Meta-Container-Format': 'ovf', - 'X-Image-Meta-Disk-Format': 'vdi', - 'X-Image-Meta-Size': '19', - 'X-Image-Meta-Is-Public': 'True'} - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - - image = jsonutils.loads(content)['image'] - - # 2. POST /images with public image named Image1, and ID: 1 - headers = {'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': 'Image1 Update', - 'X-Image-Meta-Status': 'active', - 'X-Image-Meta-Container-Format': 'ovf', - 'X-Image-Meta-Disk-Format': 'vdi', - 'X-Image-Meta-Size': '19', - 'X-Image-Meta-Id': image['id'], - 'X-Image-Meta-Is-Public': 'True'} - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CONFLICT, response.status) - - def test_delete_not_existing(self): - """ - We test the following: - - 0. GET /images/1 - - Verify 404 - 1. DELETE /images/1 - - Verify 404 - """ - - # 0. GET /images - # Verify no public images - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - self.assertEqual('{"images": []}', content) - - # 1. DELETE /images/1 - # Verify 404 returned - path = "/v1/images/1" - response, content = self.http.request(path, 'DELETE') - self.assertEqual(http_client.NOT_FOUND, response.status) - - def _do_test_post_image_content_bad_format(self, format): - """ - We test that missing container/disk format fails with 400 "Bad Request" - - :see https://bugs.launchpad.net/glance/+bug/933702 - """ - - # Verify no public images - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - images = jsonutils.loads(content)['images'] - self.assertEqual(0, len(images)) - - path = "/v1/images" - - # POST /images without given format being specified - headers = minimal_headers('Image1') - headers['X-Image-Meta-' + format] = 'bad_value' - with tempfile.NamedTemporaryFile() as test_data_file: - test_data_file.write(b"XXX") - test_data_file.flush() - response, content = self.http.request(path, 'POST', - headers=headers, - body=test_data_file.name) - self.assertEqual(http_client.BAD_REQUEST, response.status) - type = format.replace('_format', '') - expected = "Invalid %s format 'bad_value' for image" % type - self.assertIn(expected, content, - "Could not find '%s' in '%s'" % (expected, content)) - - # make sure the image was not created - # Verify no public images - path = "/v1/images" - response, content = self.http.request(path, 'GET') - self.assertEqual(http_client.OK, response.status) - images = jsonutils.loads(content)['images'] - self.assertEqual(0, len(images)) - - def test_post_image_content_bad_container_format(self): - self._do_test_post_image_content_bad_format('container_format') - - def test_post_image_content_bad_disk_format(self): - self._do_test_post_image_content_bad_format('disk_format') - - def _do_test_put_image_content_missing_format(self, format): - """ - We test that missing container/disk format only fails with - 400 "Bad Request" when the image content is PUT (i.e. not - on the original POST of a queued image). - - :see https://bugs.launchpad.net/glance/+bug/937216 - """ - - # POST queued image - path = "/v1/images" - headers = { - 'X-Image-Meta-Name': 'Image1', - 'X-Image-Meta-Is-Public': 'True', - } - response, content = self.http.request(path, 'POST', headers=headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - self.addDetail('image_data', testtools.content.json_content(data)) - - # PUT image content images without given format being specified - path = "/v1/images/%s" % (image_id) - headers = minimal_headers('Image1') - del headers['X-Image-Meta-' + format] - with tempfile.NamedTemporaryFile() as test_data_file: - test_data_file.write(b"XXX") - test_data_file.flush() - response, content = self.http.request(path, 'PUT', - headers=headers, - body=test_data_file.name) - self.assertEqual(http_client.BAD_REQUEST, response.status) - type = format.replace('_format', '').capitalize() - expected = "%s format is not specified" % type - self.assertIn(expected, content, - "Could not find '%s' in '%s'" % (expected, content)) - - def test_put_image_content_bad_container_format(self): - self._do_test_put_image_content_missing_format('container_format') - - def test_put_image_content_bad_disk_format(self): - self._do_test_put_image_content_missing_format('disk_format') - - def _do_test_mismatched_attribute(self, attribute, value): - """ - Test mismatched attribute. - """ - - image_data = "*" * FIVE_KB - headers = minimal_headers('Image1') - headers[attribute] = value - path = "/v1/images" - response, content = self.http.request(path, 'POST', headers=headers, - body=image_data) - self.assertEqual(http_client.BAD_REQUEST, response.status) - - images_dir = os.path.join(self.test_dir, 'images') - image_count = len([name for name in os.listdir(images_dir) - if os.path.isfile(os.path.join(images_dir, name))]) - self.assertEqual(0, image_count) - - def test_mismatched_size(self): - """ - Test mismatched size. - """ - self._do_test_mismatched_attribute('x-image-meta-size', - str(FIVE_KB + 1)) - - def test_mismatched_checksum(self): - """ - Test mismatched checksum. - """ - self._do_test_mismatched_attribute('x-image-meta-checksum', - 'foobar') - - -class TestApiWithFakeAuth(base.ApiTest): - def __init__(self, *args, **kwargs): - super(TestApiWithFakeAuth, self).__init__(*args, **kwargs) - self.api_flavor = 'fakeauth' - self.registry_flavor = 'fakeauth' - - def test_ownership(self): - # Add an image with admin privileges and ensure the owner - # can be set to something other than what was used to authenticate - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - - create_headers = { - 'X-Image-Meta-Name': 'MyImage', - 'X-Image-Meta-disk_format': 'raw', - 'X-Image-Meta-container_format': 'ovf', - 'X-Image-Meta-Is-Public': 'True', - 'X-Image-Meta-Owner': 'tenant2', - } - create_headers.update(auth_headers) - - path = "/v1/images" - response, content = self.http.request(path, 'POST', - headers=create_headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - - path = "/v1/images/%s" % (image_id) - response, content = self.http.request(path, 'HEAD', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - self.assertEqual('tenant2', response['x-image-meta-owner']) - - # Now add an image without admin privileges and ensure the owner - # cannot be set to something other than what was used to authenticate - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:role1', - } - create_headers.update(auth_headers) - - path = "/v1/images" - response, content = self.http.request(path, 'POST', - headers=create_headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - - # We have to be admin to see the owner - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - create_headers.update(auth_headers) - - path = "/v1/images/%s" % (image_id) - response, content = self.http.request(path, 'HEAD', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - self.assertEqual('tenant1', response['x-image-meta-owner']) - - # Make sure the non-privileged user can't update their owner either - update_headers = { - 'X-Image-Meta-Name': 'MyImage2', - 'X-Image-Meta-Owner': 'tenant2', - 'X-Auth-Token': 'user1:tenant1:role1', - } - - path = "/v1/images/%s" % (image_id) - response, content = self.http.request(path, 'PUT', - headers=update_headers) - self.assertEqual(http_client.OK, response.status) - - # We have to be admin to see the owner - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - - path = "/v1/images/%s" % (image_id) - response, content = self.http.request(path, 'HEAD', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - self.assertEqual('tenant1', response['x-image-meta-owner']) - - # An admin user should be able to update the owner - auth_headers = { - 'X-Auth-Token': 'user1:tenant3:admin', - } - - update_headers = { - 'X-Image-Meta-Name': 'MyImage2', - 'X-Image-Meta-Owner': 'tenant2', - } - update_headers.update(auth_headers) - - path = "/v1/images/%s" % (image_id) - response, content = self.http.request(path, 'PUT', - headers=update_headers) - self.assertEqual(http_client.OK, response.status) - - path = "/v1/images/%s" % (image_id) - response, content = self.http.request(path, 'HEAD', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - self.assertEqual('tenant2', response['x-image-meta-owner']) - - def test_image_visibility_to_different_users(self): - owners = ['admin', 'tenant1', 'tenant2', 'none'] - visibilities = {'public': 'True', 'private': 'False'} - image_ids = {} - - for owner in owners: - for visibility, is_public in visibilities.items(): - name = '%s-%s' % (owner, visibility) - headers = { - 'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': name, - 'X-Image-Meta-Status': 'active', - 'X-Image-Meta-Is-Public': is_public, - 'X-Image-Meta-Owner': owner, - 'X-Auth-Token': 'createuser:createtenant:admin', - } - path = "/v1/images" - response, content = self.http.request(path, 'POST', - headers=headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_ids[name] = data['image']['id'] - - def list_images(tenant, role='', is_public=None): - auth_token = 'user:%s:%s' % (tenant, role) - headers = {'X-Auth-Token': auth_token} - path = "/v1/images/detail" - if is_public is not None: - path += '?is_public=%s' % is_public - response, content = self.http.request(path, 'GET', headers=headers) - self.assertEqual(http_client.OK, response.status) - return jsonutils.loads(content)['images'] - - # 1. Known user sees public and their own images - images = list_images('tenant1') - self.assertEqual(5, len(images)) - for image in images: - self.assertTrue(image['is_public'] or image['owner'] == 'tenant1') - - # 2. Unknown user sees only public images - images = list_images('none') - self.assertEqual(4, len(images)) - for image in images: - self.assertTrue(image['is_public']) - - # 3. Unknown admin sees only public images - images = list_images('none', role='admin') - self.assertEqual(4, len(images)) - for image in images: - self.assertTrue(image['is_public']) - - # 4. Unknown admin, is_public=none, shows all images - images = list_images('none', role='admin', is_public='none') - self.assertEqual(8, len(images)) - - # 5. Unknown admin, is_public=true, shows only public images - images = list_images('none', role='admin', is_public='true') - self.assertEqual(4, len(images)) - for image in images: - self.assertTrue(image['is_public']) - - # 6. Unknown admin, is_public=false, sees only private images - images = list_images('none', role='admin', is_public='false') - self.assertEqual(4, len(images)) - for image in images: - self.assertFalse(image['is_public']) - - # 7. Known admin sees public and their own images - images = list_images('admin', role='admin') - self.assertEqual(5, len(images)) - for image in images: - self.assertTrue(image['is_public'] or image['owner'] == 'admin') - - # 8. Known admin, is_public=none, shows all images - images = list_images('admin', role='admin', is_public='none') - self.assertEqual(8, len(images)) - - # 9. Known admin, is_public=true, sees all public and their images - images = list_images('admin', role='admin', is_public='true') - self.assertEqual(5, len(images)) - for image in images: - self.assertTrue(image['is_public'] or image['owner'] == 'admin') - - # 10. Known admin, is_public=false, sees all private images - images = list_images('admin', role='admin', is_public='false') - self.assertEqual(4, len(images)) - for image in images: - self.assertFalse(image['is_public']) - - def test_property_protections(self): - # Enable property protection - self.config(property_protection_file=self.property_file) - self.init() - - CREATE_HEADERS = { - 'X-Image-Meta-Name': 'MyImage', - 'X-Image-Meta-disk_format': 'raw', - 'X-Image-Meta-container_format': 'ovf', - 'X-Image-Meta-Is-Public': 'True', - 'X-Image-Meta-Owner': 'tenant2', - } - - # Create an image for role member with extra properties - # Raises 403 since user is not allowed to create 'foo' - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:member', - } - custom_props = { - 'x-image-meta-property-foo': 'bar' - } - auth_headers.update(custom_props) - auth_headers.update(CREATE_HEADERS) - path = "/v1/images" - response, content = self.http.request(path, 'POST', - headers=auth_headers) - self.assertEqual(http_client.FORBIDDEN, response.status) - - # Create an image for role member without 'foo' - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:member', - } - custom_props = { - 'x-image-meta-property-x_owner_foo': 'o_s_bar', - } - auth_headers.update(custom_props) - auth_headers.update(CREATE_HEADERS) - path = "/v1/images" - response, content = self.http.request(path, 'POST', - headers=auth_headers) - self.assertEqual(http_client.CREATED, response.status) - - # Returned image entity should have 'x_owner_foo' - data = jsonutils.loads(content) - self.assertEqual('o_s_bar', - data['image']['properties']['x_owner_foo']) - - # Create an image for role spl_role with extra properties - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:spl_role', - } - custom_props = { - 'X-Image-Meta-Property-spl_create_prop': 'create_bar', - 'X-Image-Meta-Property-spl_read_prop': 'read_bar', - 'X-Image-Meta-Property-spl_update_prop': 'update_bar', - 'X-Image-Meta-Property-spl_delete_prop': 'delete_bar' - } - auth_headers.update(custom_props) - auth_headers.update(CREATE_HEADERS) - path = "/v1/images" - response, content = self.http.request(path, 'POST', - headers=auth_headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - - # Attempt to update two properties, one protected(spl_read_prop), the - # other not(spl_update_prop). Request should be forbidden. - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:spl_role', - } - custom_props = { - 'X-Image-Meta-Property-spl_read_prop': 'r', - 'X-Image-Meta-Property-spl_update_prop': 'u', - 'X-Glance-Registry-Purge-Props': 'False' - } - auth_headers.update(auth_headers) - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.FORBIDDEN, response.status) - - # Attempt to create properties which are forbidden - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:spl_role', - } - custom_props = { - 'X-Image-Meta-Property-spl_new_prop': 'new', - 'X-Glance-Registry-Purge-Props': 'True' - } - auth_headers.update(auth_headers) - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.FORBIDDEN, response.status) - - # Attempt to update, create and delete properties - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:spl_role', - } - custom_props = { - 'X-Image-Meta-Property-spl_create_prop': 'create_bar', - 'X-Image-Meta-Property-spl_read_prop': 'read_bar', - 'X-Image-Meta-Property-spl_update_prop': 'u', - 'X-Glance-Registry-Purge-Props': 'True' - } - auth_headers.update(auth_headers) - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - - # Returned image entity should reflect the changes - image = jsonutils.loads(content) - - # 'spl_update_prop' has update permission for spl_role - # hence the value has changed - self.assertEqual('u', image['image']['properties']['spl_update_prop']) - - # 'spl_delete_prop' has delete permission for spl_role - # hence the property has been deleted - self.assertNotIn('spl_delete_prop', image['image']['properties']) - - # 'spl_create_prop' has create permission for spl_role - # hence the property has been created - self.assertEqual('create_bar', - image['image']['properties']['spl_create_prop']) - - # Image Deletion should work - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:spl_role', - } - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'DELETE', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - - # This image should be no longer be directly accessible - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:spl_role', - } - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'HEAD', - headers=auth_headers) - self.assertEqual(http_client.NOT_FOUND, response.status) - - def test_property_protections_special_chars(self): - # Enable property protection - self.config(property_protection_file=self.property_file) - self.init() - - CREATE_HEADERS = { - 'X-Image-Meta-Name': 'MyImage', - 'X-Image-Meta-disk_format': 'raw', - 'X-Image-Meta-container_format': 'ovf', - 'X-Image-Meta-Is-Public': 'True', - 'X-Image-Meta-Owner': 'tenant2', - 'X-Image-Meta-Size': '0', - } - - # Create an image - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:member', - } - auth_headers.update(CREATE_HEADERS) - path = "/v1/images" - response, content = self.http.request(path, 'POST', - headers=auth_headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - - # Verify both admin and unknown role can create properties marked with - # '@' - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - custom_props = { - 'X-Image-Meta-Property-x_all_permitted_admin': '1' - } - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - image = jsonutils.loads(content) - self.assertEqual('1', - image['image']['properties']['x_all_permitted_admin']) - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:joe_soap', - } - custom_props = { - 'X-Image-Meta-Property-x_all_permitted_joe_soap': '1', - 'X-Glance-Registry-Purge-Props': 'False' - } - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - image = jsonutils.loads(content) - self.assertEqual( - '1', image['image']['properties']['x_all_permitted_joe_soap']) - - # Verify both admin and unknown role can read properties marked with - # '@' - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'HEAD', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - self.assertEqual('1', response.get( - 'x-image-meta-property-x_all_permitted_admin')) - self.assertEqual('1', response.get( - 'x-image-meta-property-x_all_permitted_joe_soap')) - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:joe_soap', - } - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'HEAD', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - self.assertEqual('1', response.get( - 'x-image-meta-property-x_all_permitted_admin')) - self.assertEqual('1', response.get( - 'x-image-meta-property-x_all_permitted_joe_soap')) - - # Verify both admin and unknown role can update properties marked with - # '@' - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - custom_props = { - 'X-Image-Meta-Property-x_all_permitted_admin': '2', - 'X-Glance-Registry-Purge-Props': 'False' - } - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - image = jsonutils.loads(content) - self.assertEqual('2', - image['image']['properties']['x_all_permitted_admin']) - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:joe_soap', - } - custom_props = { - 'X-Image-Meta-Property-x_all_permitted_joe_soap': '2', - 'X-Glance-Registry-Purge-Props': 'False' - } - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - image = jsonutils.loads(content) - self.assertEqual( - '2', image['image']['properties']['x_all_permitted_joe_soap']) - - # Verify both admin and unknown role can delete properties marked with - # '@' - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - custom_props = { - 'X-Image-Meta-Property-x_all_permitted_joe_soap': '2', - 'X-Glance-Registry-Purge-Props': 'True' - } - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - image = jsonutils.loads(content) - self.assertNotIn('x_all_permitted_admin', image['image']['properties']) - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:joe_soap', - } - custom_props = { - 'X-Glance-Registry-Purge-Props': 'True' - } - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - image = jsonutils.loads(content) - self.assertNotIn('x_all_permitted_joe_soap', - image['image']['properties']) - - # Verify neither admin nor unknown role can create a property protected - # with '!' - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - custom_props = { - 'X-Image-Meta-Property-x_none_permitted_admin': '1' - } - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.FORBIDDEN, response.status) - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:joe_soap', - } - custom_props = { - 'X-Image-Meta-Property-x_none_permitted_joe_soap': '1' - } - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.FORBIDDEN, response.status) - - # Verify neither admin nor unknown role can read properties marked with - # '!' - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - custom_props = { - 'X-Image-Meta-Property-x_none_read': '1' - } - auth_headers.update(custom_props) - auth_headers.update(CREATE_HEADERS) - path = "/v1/images" - response, content = self.http.request(path, 'POST', - headers=auth_headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'HEAD', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - self.assertRaises(KeyError, - response.get, 'X-Image-Meta-Property-x_none_read') - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:joe_soap', - } - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'HEAD', - headers=auth_headers) - self.assertEqual(http_client.OK, response.status) - self.assertRaises(KeyError, - response.get, 'X-Image-Meta-Property-x_none_read') - - # Verify neither admin nor unknown role can update properties marked - # with '!' - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - custom_props = { - 'X-Image-Meta-Property-x_none_update': '1' - } - auth_headers.update(custom_props) - auth_headers.update(CREATE_HEADERS) - path = "/v1/images" - response, content = self.http.request(path, 'POST', - headers=auth_headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - custom_props = { - 'X-Image-Meta-Property-x_none_update': '2' - } - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.FORBIDDEN, response.status) - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:joe_soap', - } - custom_props = { - 'X-Image-Meta-Property-x_none_update': '2' - } - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.FORBIDDEN, response.status) - - # Verify neither admin nor unknown role can delete properties marked - # with '!' - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - custom_props = { - 'X-Image-Meta-Property-x_none_delete': '1' - } - auth_headers.update(custom_props) - auth_headers.update(CREATE_HEADERS) - path = "/v1/images" - response, content = self.http.request(path, 'POST', - headers=auth_headers) - self.assertEqual(http_client.CREATED, response.status) - data = jsonutils.loads(content) - image_id = data['image']['id'] - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:admin', - } - custom_props = { - 'X-Glance-Registry-Purge-Props': 'True' - } - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.FORBIDDEN, response.status) - auth_headers = { - 'X-Auth-Token': 'user1:tenant1:joe_soap', - } - custom_props = { - 'X-Glance-Registry-Purge-Props': 'True' - } - auth_headers.update(custom_props) - path = "/v1/images/%s" % image_id - response, content = self.http.request(path, 'PUT', - headers=auth_headers) - self.assertEqual(http_client.FORBIDDEN, response.status) diff --git a/glance/tests/integration/v2/__init__.py b/glance/tests/integration/v2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/integration/v2/base.py b/glance/tests/integration/v2/base.py deleted file mode 100644 index f38c4e57..00000000 --- a/glance/tests/integration/v2/base.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright 2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import atexit -import os.path -import tempfile - -import fixtures -import glance_store -from oslo_config import cfg -from oslo_db import options - -import glance.common.client -from glance.common import config -import glance.db.sqlalchemy.api -import glance.registry.client.v1.client -from glance import tests as glance_tests -from glance.tests import utils as test_utils - - -TESTING_API_PASTE_CONF = """ -[pipeline:glance-api] -pipeline = versionnegotiation gzip unauthenticated-context rootapp - -[pipeline:glance-api-caching] -pipeline = versionnegotiation gzip unauthenticated-context cache rootapp - -[pipeline:glance-api-cachemanagement] -pipeline = - versionnegotiation - gzip - unauthenticated-context - cache - cache_manage - rootapp - -[pipeline:glance-api-fakeauth] -pipeline = versionnegotiation gzip fakeauth context rootapp - -[pipeline:glance-api-noauth] -pipeline = versionnegotiation gzip context rootapp - -[composite:rootapp] -paste.composite_factory = glance.api:root_app_factory -/: apiversions -/v1: apiv1app -/v2: apiv2app - -[app:apiversions] -paste.app_factory = glance.api.versions:create_resource - -[app:apiv1app] -paste.app_factory = glance.api.v1.router:API.factory - -[app:apiv2app] -paste.app_factory = glance.api.v2.router:API.factory - -[filter:versionnegotiation] -paste.filter_factory = - glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory - -[filter:gzip] -paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory - -[filter:cache] -paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory - -[filter:cache_manage] -paste.filter_factory = - glance.api.middleware.cache_manage:CacheManageFilter.factory - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = - glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:fakeauth] -paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory -""" - -TESTING_REGISTRY_PASTE_CONF = """ -[pipeline:glance-registry] -pipeline = unauthenticated-context registryapp - -[pipeline:glance-registry-fakeauth] -pipeline = fakeauth context registryapp - -[app:registryapp] -paste.app_factory = glance.registry.api.v1:API.factory - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = - glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:fakeauth] -paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory -""" - -CONF = cfg.CONF - - -class ApiTest(test_utils.BaseTestCase): - def setUp(self): - super(ApiTest, self).setUp() - self.test_dir = self.useFixture(fixtures.TempDir()).path - self._configure_logging() - self._setup_database() - self._setup_stores() - self._setup_property_protection() - self.glance_registry_app = self._load_paste_app( - 'glance-registry', - flavor=getattr(self, 'registry_flavor', ''), - conf=getattr(self, 'registry_paste_conf', - TESTING_REGISTRY_PASTE_CONF), - ) - self._connect_registry_client() - self.glance_api_app = self._load_paste_app( - 'glance-api', - flavor=getattr(self, 'api_flavor', ''), - conf=getattr(self, 'api_paste_conf', TESTING_API_PASTE_CONF), - ) - self.http = test_utils.Httplib2WsgiAdapter(self.glance_api_app) - - def _setup_property_protection(self): - self._copy_data_file('property-protections.conf', self.test_dir) - self.property_file = os.path.join(self.test_dir, - 'property-protections.conf') - - def _configure_logging(self): - self.config(default_log_levels=[ - 'amqplib=WARN', - 'sqlalchemy=WARN', - 'boto=WARN', - 'suds=INFO', - 'keystone=INFO', - 'eventlet.wsgi.server=DEBUG' - ]) - - def _setup_database(self): - sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir - options.set_defaults(CONF, connection=sql_connection) - glance.db.sqlalchemy.api.clear_db_env() - glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE' - if glance_db_env in os.environ: - # use the empty db created and cached as a tempfile - # instead of spending the time creating a new one - db_location = os.environ[glance_db_env] - test_utils.execute('cp %s %s/tests.sqlite' - % (db_location, self.test_dir)) - else: - test_utils.db_sync() - - # copy the clean db to a temp location so that it - # can be reused for future tests - (osf, db_location) = tempfile.mkstemp() - os.close(osf) - test_utils.execute('cp %s/tests.sqlite %s' - % (self.test_dir, db_location)) - os.environ[glance_db_env] = db_location - - # cleanup the temp file when the test suite is - # complete - def _delete_cached_db(): - try: - os.remove(os.environ[glance_db_env]) - except Exception: - glance_tests.logger.exception( - "Error cleaning up the file %s" % - os.environ[glance_db_env]) - atexit.register(_delete_cached_db) - - def _setup_stores(self): - glance_store.register_opts(CONF) - - image_dir = os.path.join(self.test_dir, "images") - self.config(group='glance_store', - filesystem_store_datadir=image_dir) - - glance_store.create_stores() - - def _load_paste_app(self, name, flavor, conf): - conf_file_path = os.path.join(self.test_dir, '%s-paste.ini' % name) - with open(conf_file_path, 'w') as conf_file: - conf_file.write(conf) - conf_file.flush() - return config.load_paste_app(name, flavor=flavor, - conf_file=conf_file_path) - - def _connect_registry_client(self): - def get_connection_type(self2): - def wrapped(*args, **kwargs): - return test_utils.HttplibWsgiAdapter(self.glance_registry_app) - return wrapped - - self.stubs.Set(glance.common.client.BaseClient, - 'get_connection_type', get_connection_type) - - def tearDown(self): - glance.db.sqlalchemy.api.clear_db_env() - super(ApiTest, self).tearDown() diff --git a/glance/tests/integration/v2/test_property_quota_violations.py b/glance/tests/integration/v2/test_property_quota_violations.py deleted file mode 100644 index b955009f..00000000 --- a/glance/tests/integration/v2/test_property_quota_violations.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves import http_client -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.tests.integration.v2 import base - -CONF = cfg.CONF - - -class TestPropertyQuotaViolations(base.ApiTest): - def __init__(self, *args, **kwargs): - super(TestPropertyQuotaViolations, self).__init__(*args, **kwargs) - self.api_flavor = 'noauth' - self.registry_flavor = 'fakeauth' - - def _headers(self, custom_headers=None): - base_headers = { - 'X-Identity-Status': 'Confirmed', - 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', - 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', - 'X-Tenant-Id': "foo", - 'X-Roles': 'member', - } - base_headers.update(custom_headers or {}) - return base_headers - - def _get(self, image_id=""): - path = ('/v2/images/%s' % image_id).rstrip('/') - rsp, content = self.http.request(path, 'GET', headers=self._headers()) - self.assertEqual(http_client.OK, rsp.status) - content = jsonutils.loads(content) - return content - - def _create_image(self, body): - path = '/v2/images' - headers = self._headers({'content-type': 'application/json'}) - rsp, content = self.http.request(path, 'POST', headers=headers, - body=jsonutils.dumps(body)) - self.assertEqual(http_client.CREATED, rsp.status) - return jsonutils.loads(content) - - def _patch(self, image_id, body, expected_status): - path = '/v2/images/%s' % image_id - media_type = 'application/openstack-images-v2.1-json-patch' - headers = self._headers({'content-type': media_type}) - rsp, content = self.http.request(path, 'PATCH', headers=headers, - body=jsonutils.dumps(body)) - self.assertEqual(expected_status, rsp.status, content) - return content - - def test_property_ops_when_quota_violated(self): - # Image list must be empty to begin with - image_list = self._get()['images'] - self.assertEqual(0, len(image_list)) - - orig_property_quota = 10 - CONF.set_override('image_property_quota', orig_property_quota) - - # Create an image (with deployer-defined properties) - req_body = {'name': 'testimg', - 'disk_format': 'aki', - 'container_format': 'aki'} - for i in range(orig_property_quota): - req_body['k_%d' % i] = 'v_%d' % i - image = self._create_image(req_body) - image_id = image['id'] - for i in range(orig_property_quota): - self.assertEqual('v_%d' % i, image['k_%d' % i]) - - # Now reduce property quota. We should be allowed to modify/delete - # existing properties (even if the result still exceeds property quota) - # but not add new properties nor replace existing properties with new - # properties (as long as we're over the quota) - self.config(image_property_quota=2) - - patch_body = [{'op': 'replace', 'path': '/k_4', 'value': 'v_4.new'}] - image = jsonutils.loads(self._patch(image_id, patch_body, - http_client.OK)) - self.assertEqual('v_4.new', image['k_4']) - - patch_body = [{'op': 'remove', 'path': '/k_7'}] - image = jsonutils.loads(self._patch(image_id, patch_body, - http_client.OK)) - self.assertNotIn('k_7', image) - - patch_body = [{'op': 'add', 'path': '/k_100', 'value': 'v_100'}] - self._patch(image_id, patch_body, http_client.REQUEST_ENTITY_TOO_LARGE) - image = self._get(image_id) - self.assertNotIn('k_100', image) - - patch_body = [ - {'op': 'remove', 'path': '/k_5'}, - {'op': 'add', 'path': '/k_100', 'value': 'v_100'}, - ] - self._patch(image_id, patch_body, http_client.REQUEST_ENTITY_TOO_LARGE) - image = self._get(image_id) - self.assertNotIn('k_100', image) - self.assertIn('k_5', image) - - # temporary violations to property quota should be allowed as long as - # it's within one PATCH request and the end result does not violate - # quotas. - patch_body = [{'op': 'add', 'path': '/k_100', 'value': 'v_100'}, - {'op': 'add', 'path': '/k_99', 'value': 'v_99'}] - to_rm = ['k_%d' % i for i in range(orig_property_quota) if i != 7] - patch_body.extend([{'op': 'remove', 'path': '/%s' % k} for k in to_rm]) - image = jsonutils.loads(self._patch(image_id, patch_body, - http_client.OK)) - self.assertEqual('v_99', image['k_99']) - self.assertEqual('v_100', image['k_100']) - for k in to_rm: - self.assertNotIn(k, image) diff --git a/glance/tests/integration/v2/test_tasks_api.py b/glance/tests/integration/v2/test_tasks_api.py deleted file mode 100644 index 9bb5f9a9..00000000 --- a/glance/tests/integration/v2/test_tasks_api.py +++ /dev/null @@ -1,557 +0,0 @@ -# Copyright 2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -from oslo_serialization import jsonutils as json -from six.moves import http_client - -from glance.api.v2 import tasks -from glance.common import timeutils -from glance.tests.integration.v2 import base - -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' -TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' -TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' - - -def minimal_task_headers(owner='tenant1'): - headers = { - 'X-Auth-Token': 'user1:%s:admin' % owner, - 'Content-Type': 'application/json', - } - return headers - - -def _new_task_fixture(**kwargs): - task_data = { - "type": "import", - "input": { - "import_from": "http://example.com", - "import_from_format": "qcow2", - "image_properties": { - 'disk_format': 'vhd', - 'container_format': 'ovf' - } - } - } - task_data.update(kwargs) - return task_data - - -class TestTasksApi(base.ApiTest): - - def __init__(self, *args, **kwargs): - super(TestTasksApi, self).__init__(*args, **kwargs) - self.api_flavor = 'fakeauth' - self.registry_flavor = 'fakeauth' - - def _wait_on_task_execution(self, max_wait=5): - """Wait until all the tasks have finished execution and are in - state of success or failure. - """ - - start = timeutils.utcnow() - - # wait for maximum of seconds defined by max_wait - while timeutils.delta_seconds(start, timeutils.utcnow()) < max_wait: - wait = False - # Verify that no task is in status of pending or processing - path = "/v2/tasks" - res, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - content_dict = json.loads(content) - - self.assertEqual(http_client.OK, res.status) - res_tasks = content_dict['tasks'] - if len(res_tasks) != 0: - for task in res_tasks: - if task['status'] in ('pending', 'processing'): - wait = True - break - - if wait: - # Bug #1541487: we must give time to the server to execute the - # task, but the server is run in the same process than the - # test. Use eventlet to give the control to the pending server - # task. - eventlet.sleep(0.05) - continue - else: - break - - def _post_new_task(self, **kwargs): - task_owner = kwargs.get('owner') - headers = minimal_task_headers(task_owner) - - task_data = _new_task_fixture() - task_data['input']['import_from'] = "http://example.com" - body_content = json.dumps(task_data) - - path = "/v2/tasks" - response, content = self.http.request(path, 'POST', - headers=headers, - body=body_content) - - self.assertEqual(http_client.CREATED, response.status) - - task = json.loads(content) - task_id = task['id'] - - self.assertIsNotNone(task_id) - self.assertEqual(task_owner, task['owner']) - self.assertEqual(task_data['type'], task['type']) - self.assertEqual(task_data['input'], task['input']) - self.assertEqual("http://localhost" + path + "/" + task_id, - response.webob_resp.headers['Location']) - - return task, task_data - - def test_all_task_api(self): - # 0. GET /tasks - # Verify no tasks - path = "/v2/tasks" - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - content_dict = json.loads(content) - - self.assertEqual(http_client.OK, response.status) - self.assertFalse(content_dict['tasks']) - - # 1. GET /tasks/{task_id} - # Verify non-existent task - task_id = 'NON_EXISTENT_TASK' - path = "/v2/tasks/%s" % task_id - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.NOT_FOUND, response.status) - - # 2. POST /tasks - # Create a new task - task_owner = 'tenant1' - data, req_input = self._post_new_task(owner=task_owner) - - # 3. GET /tasks/{task_id} - # Get an existing task - task_id = data['id'] - path = "/v2/tasks/%s" % task_id - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - self.assertEqual(http_client.OK, response.status) - - # NOTE(sabari): wait for all task executions to finish before checking - # task status. - self._wait_on_task_execution(max_wait=10) - - # 4. GET /tasks - # Get all tasks (not deleted) - path = "/v2/tasks" - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - self.assertIsNotNone(content) - - data = json.loads(content) - self.assertIsNotNone(data) - self.assertEqual(1, len(data['tasks'])) - # NOTE(venkatesh) find a way to get expected_keys from tasks controller - expected_keys = set(['id', 'expires_at', 'type', 'owner', 'status', - 'created_at', 'updated_at', 'self', 'schema']) - task = data['tasks'][0] - self.assertEqual(expected_keys, set(task.keys())) - self.assertEqual(req_input['type'], task['type']) - self.assertEqual(task_owner, task['owner']) - self.assertEqual('success', task['status']) - self.assertIsNotNone(task['created_at']) - self.assertIsNotNone(task['updated_at']) - - def test_task_schema_api(self): - # 0. GET /schemas/task - # Verify schema for task - path = "/v2/schemas/task" - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - self.assertEqual(http_client.OK, response.status) - - schema = tasks.get_task_schema() - expected_schema = schema.minimal() - data = json.loads(content) - self.assertIsNotNone(data) - self.assertEqual(expected_schema, data) - - # 1. GET /schemas/tasks - # Verify schema for tasks - path = "/v2/schemas/tasks" - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - self.assertEqual(http_client.OK, response.status) - - schema = tasks.get_collection_schema() - expected_schema = schema.minimal() - data = json.loads(content) - self.assertIsNotNone(data) - self.assertEqual(expected_schema, data) - - # NOTE(nikhil): wait for all task executions to finish before exiting - # else there is a risk of running into deadlock - self._wait_on_task_execution() - - def test_create_new_task(self): - # 0. POST /tasks - # Create a new task with valid input and type - task_data = _new_task_fixture() - task_owner = 'tenant1' - body_content = json.dumps(task_data) - - path = "/v2/tasks" - response, content = self.http.request( - path, 'POST', headers=minimal_task_headers(task_owner), - body=body_content) - self.assertEqual(http_client.CREATED, response.status) - - data = json.loads(content) - task_id = data['id'] - - self.assertIsNotNone(task_id) - self.assertEqual(task_owner, data['owner']) - self.assertEqual(task_data['type'], data['type']) - self.assertEqual(task_data['input'], data['input']) - - # 1. POST /tasks - # Create a new task with invalid type - # Expect BadRequest(400) Error as response - task_data = _new_task_fixture(type='invalid') - task_owner = 'tenant1' - body_content = json.dumps(task_data) - - path = "/v2/tasks" - response, content = self.http.request( - path, 'POST', headers=minimal_task_headers(task_owner), - body=body_content) - self.assertEqual(http_client.BAD_REQUEST, response.status) - - # 1. POST /tasks - # Create a new task with invalid input for type 'import' - # Expect BadRequest(400) Error as response - task_data = _new_task_fixture(task_input='{something: invalid}') - task_owner = 'tenant1' - body_content = json.dumps(task_data) - - path = "/v2/tasks" - response, content = self.http.request( - path, 'POST', headers=minimal_task_headers(task_owner), - body=body_content) - self.assertEqual(http_client.BAD_REQUEST, response.status) - - # NOTE(nikhil): wait for all task executions to finish before exiting - # else there is a risk of running into deadlock - self._wait_on_task_execution() - - def test_tasks_with_filter(self): - - # 0. GET /v2/tasks - # Verify no tasks - path = "/v2/tasks" - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - - content_dict = json.loads(content) - self.assertFalse(content_dict['tasks']) - - task_ids = [] - - # 1. Make 2 POST requests on /tasks with various attributes - task_owner = TENANT1 - data, req_input1 = self._post_new_task(owner=task_owner) - task_ids.append(data['id']) - - task_owner = TENANT2 - data, req_input2 = self._post_new_task(owner=task_owner) - task_ids.append(data['id']) - - # 2. GET /tasks - # Verify two import tasks - path = "/v2/tasks" - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - - content_dict = json.loads(content) - self.assertEqual(2, len(content_dict['tasks'])) - - # 3. GET /tasks with owner filter - # Verify correct task returned with owner - params = "owner=%s" % TENANT1 - path = "/v2/tasks?%s" % params - - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - - content_dict = json.loads(content) - self.assertEqual(1, len(content_dict['tasks'])) - self.assertEqual(TENANT1, content_dict['tasks'][0]['owner']) - - # Check the same for different owner. - params = "owner=%s" % TENANT2 - path = "/v2/tasks?%s" % params - - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - - content_dict = json.loads(content) - self.assertEqual(1, len(content_dict['tasks'])) - self.assertEqual(TENANT2, content_dict['tasks'][0]['owner']) - - # 4. GET /tasks with type filter - # Verify correct task returned with type - params = "type=import" - path = "/v2/tasks?%s" % params - - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - self.assertEqual(http_client.OK, response.status) - - content_dict = json.loads(content) - self.assertEqual(2, len(content_dict['tasks'])) - - actual_task_ids = [task['id'] for task in content_dict['tasks']] - self.assertEqual(set(task_ids), set(actual_task_ids)) - - # NOTE(nikhil): wait for all task executions to finish before exiting - # else there is a risk of running into deadlock - self._wait_on_task_execution() - - def test_limited_tasks(self): - """ - Ensure marker and limit query params work - """ - - # 0. GET /tasks - # Verify no tasks - path = "/v2/tasks" - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - self.assertEqual(http_client.OK, response.status) - tasks = json.loads(content) - self.assertFalse(tasks['tasks']) - - task_ids = [] - - # 1. POST /tasks with three tasks with various attributes - - task, _ = self._post_new_task(owner=TENANT1) - task_ids.append(task['id']) - - task, _ = self._post_new_task(owner=TENANT2) - task_ids.append(task['id']) - - task, _ = self._post_new_task(owner=TENANT3) - task_ids.append(task['id']) - - # 2. GET /tasks - # Verify 3 tasks are returned - path = "/v2/tasks" - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - - tasks = json.loads(content)['tasks'] - - self.assertEqual(3, len(tasks)) - - # 3. GET /tasks with limit of 2 - # Verify only two tasks were returned - params = "limit=2" - path = "/v2/tasks?%s" % params - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - - actual_tasks = json.loads(content)['tasks'] - - self.assertEqual(2, len(actual_tasks)) - self.assertEqual(tasks[0]['id'], actual_tasks[0]['id']) - self.assertEqual(tasks[1]['id'], actual_tasks[1]['id']) - - # 4. GET /tasks with marker - # Verify only two tasks were returned - params = "marker=%s" % tasks[0]['id'] - path = "/v2/tasks?%s" % params - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - - actual_tasks = json.loads(content)['tasks'] - - self.assertEqual(2, len(actual_tasks)) - self.assertEqual(tasks[1]['id'], actual_tasks[0]['id']) - self.assertEqual(tasks[2]['id'], actual_tasks[1]['id']) - - # 5. GET /tasks with marker and limit - # Verify only one task was returned with the correct id - params = "limit=1&marker=%s" % tasks[1]['id'] - path = "/v2/tasks?%s" % params - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - - actual_tasks = json.loads(content)['tasks'] - - self.assertEqual(1, len(actual_tasks)) - self.assertEqual(tasks[2]['id'], actual_tasks[0]['id']) - - # NOTE(nikhil): wait for all task executions to finish before exiting - # else there is a risk of running into deadlock - self._wait_on_task_execution() - - def test_ordered_tasks(self): - # 0. GET /tasks - # Verify no tasks - path = "/v2/tasks" - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - self.assertEqual(http_client.OK, response.status) - tasks = json.loads(content) - self.assertFalse(tasks['tasks']) - - task_ids = [] - - # 1. POST /tasks with three tasks with various attributes - task, _ = self._post_new_task(owner=TENANT1) - task_ids.append(task['id']) - - task, _ = self._post_new_task(owner=TENANT2) - task_ids.append(task['id']) - - task, _ = self._post_new_task(owner=TENANT3) - task_ids.append(task['id']) - - # 2. GET /tasks with no query params - # Verify three tasks sorted by created_at desc - # 2. GET /tasks - # Verify 3 tasks are returned - path = "/v2/tasks" - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - - actual_tasks = json.loads(content)['tasks'] - - self.assertEqual(3, len(actual_tasks)) - self.assertEqual(task_ids[2], actual_tasks[0]['id']) - self.assertEqual(task_ids[1], actual_tasks[1]['id']) - self.assertEqual(task_ids[0], actual_tasks[2]['id']) - - # 3. GET /tasks sorted by owner asc - params = 'sort_key=owner&sort_dir=asc' - path = '/v2/tasks?%s' % params - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - - expected_task_owners = [TENANT1, TENANT2, TENANT3] - expected_task_owners.sort() - - actual_tasks = json.loads(content)['tasks'] - self.assertEqual(3, len(actual_tasks)) - self.assertEqual(expected_task_owners, - [t['owner'] for t in actual_tasks]) - - # 4. GET /tasks sorted by owner desc with a marker - params = 'sort_key=owner&sort_dir=desc&marker=%s' % task_ids[0] - path = '/v2/tasks?%s' % params - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - - actual_tasks = json.loads(content)['tasks'] - self.assertEqual(2, len(actual_tasks)) - self.assertEqual(task_ids[2], actual_tasks[0]['id']) - self.assertEqual(task_ids[1], actual_tasks[1]['id']) - self.assertEqual(TENANT3, actual_tasks[0]['owner']) - self.assertEqual(TENANT2, actual_tasks[1]['owner']) - - # 5. GET /tasks sorted by owner asc with a marker - params = 'sort_key=owner&sort_dir=asc&marker=%s' % task_ids[0] - path = '/v2/tasks?%s' % params - - response, content = self.http.request(path, 'GET', - headers=minimal_task_headers()) - - self.assertEqual(http_client.OK, response.status) - - actual_tasks = json.loads(content)['tasks'] - - self.assertEqual(0, len(actual_tasks)) - - # NOTE(nikhil): wait for all task executions to finish before exiting - # else there is a risk of running into deadlock - self._wait_on_task_execution() - - def test_delete_task(self): - # 0. POST /tasks - # Create a new task with valid input and type - task_data = _new_task_fixture() - task_owner = 'tenant1' - body_content = json.dumps(task_data) - - path = "/v2/tasks" - response, content = self.http.request( - path, 'POST', headers=minimal_task_headers(task_owner), - body=body_content) - self.assertEqual(http_client.CREATED, response.status) - - data = json.loads(content) - task_id = data['id'] - - # 1. DELETE on /tasks/{task_id} - # Attempt to delete a task - path = "/v2/tasks/%s" % task_id - response, content = self.http.request(path, - 'DELETE', - headers=minimal_task_headers()) - self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status) - self.assertEqual('GET', response.webob_resp.headers.get('Allow')) - self.assertEqual(('GET',), response.webob_resp.allow) - self.assertEqual(('GET',), response.allow) - - # 2. GET /tasks/{task_id} - # Ensure that methods mentioned in the Allow header work - path = "/v2/tasks/%s" % task_id - response, content = self.http.request(path, - 'GET', - headers=minimal_task_headers()) - self.assertEqual(http_client.OK, response.status) - self.assertIsNotNone(content) - - # NOTE(nikhil): wait for all task executions to finish before exiting - # else there is a risk of running into deadlock - self._wait_on_task_execution() diff --git a/glance/tests/stubs.py b/glance/tests/stubs.py deleted file mode 100644 index 80bf5c84..00000000 --- a/glance/tests/stubs.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Stubouts, mocks and fixtures for the test suite""" - -import os - -try: - import sendfile - SENDFILE_SUPPORTED = True -except ImportError: - SENDFILE_SUPPORTED = False - -import routes -import webob - -from glance.api.middleware import context -from glance.api.v1 import router -import glance.common.client -from glance.registry.api import v1 as rserver -from glance.tests import utils - - -DEBUG = False - - -class FakeRegistryConnection(object): - - def __init__(self, registry=None): - self.registry = registry or rserver - - def __call__(self, *args, **kwargs): - # NOTE(flaper87): This method takes - # __init__'s place in the chain. - return self - - def connect(self): - return True - - def close(self): - return True - - def request(self, method, url, body=None, headers=None): - self.req = webob.Request.blank("/" + url.lstrip("/")) - self.req.method = method - if headers: - self.req.headers = headers - if body: - self.req.body = body - - def getresponse(self): - mapper = routes.Mapper() - server = self.registry.API(mapper) - # NOTE(markwash): we need to pass through context auth information if - # we have it. - if 'X-Auth-Token' in self.req.headers: - api = utils.FakeAuthMiddleware(server) - else: - api = context.UnauthenticatedContextMiddleware(server) - webob_res = self.req.get_response(api) - - return utils.FakeHTTPResponse(status=webob_res.status_int, - headers=webob_res.headers, - data=webob_res.body) - - -def stub_out_registry_and_store_server(stubs, base_dir, **kwargs): - """Mocks calls to 127.0.0.1 on 9191 and 9292 for testing. - - Done so that a real Glance server does not need to be up and - running - """ - - class FakeSocket(object): - - def __init__(self, *args, **kwargs): - pass - - def fileno(self): - return 42 - - class FakeSendFile(object): - - def __init__(self, req): - self.req = req - - def sendfile(self, o, i, offset, nbytes): - os.lseek(i, offset, os.SEEK_SET) - prev_len = len(self.req.body) - self.req.body += os.read(i, nbytes) - return len(self.req.body) - prev_len - - class FakeGlanceConnection(object): - - def __init__(self, *args, **kwargs): - self.sock = FakeSocket() - self.stub_force_sendfile = kwargs.get('stub_force_sendfile', - SENDFILE_SUPPORTED) - - def connect(self): - return True - - def close(self): - return True - - def _clean_url(self, url): - # TODO(bcwaldon): Fix the hack that strips off v1 - return url.replace('/v1', '', 1) if url.startswith('/v1') else url - - def putrequest(self, method, url): - self.req = webob.Request.blank(self._clean_url(url)) - if self.stub_force_sendfile: - fake_sendfile = FakeSendFile(self.req) - stubs.Set(sendfile, 'sendfile', fake_sendfile.sendfile) - self.req.method = method - - def putheader(self, key, value): - self.req.headers[key] = value - - def endheaders(self): - hl = [i.lower() for i in self.req.headers.keys()] - assert not ('content-length' in hl and - 'transfer-encoding' in hl), ( - 'Content-Length and Transfer-Encoding are mutually exclusive') - - def send(self, data): - # send() is called during chunked-transfer encoding, and - # data is of the form %x\r\n%s\r\n. Strip off the %x and - # only write the actual data in tests. - self.req.body += data.split("\r\n")[1] - - def request(self, method, url, body=None, headers=None): - self.req = webob.Request.blank(self._clean_url(url)) - self.req.method = method - if headers: - self.req.headers = headers - if body: - self.req.body = body - - def getresponse(self): - mapper = routes.Mapper() - api = context.UnauthenticatedContextMiddleware(router.API(mapper)) - res = self.req.get_response(api) - - # httplib.Response has a read() method...fake it out - def fake_reader(): - return res.body - - setattr(res, 'read', fake_reader) - return res - - def fake_get_connection_type(client): - """Returns the proper connection type.""" - DEFAULT_REGISTRY_PORT = 9191 - DEFAULT_API_PORT = 9292 - - if (client.port == DEFAULT_API_PORT and - client.host == '0.0.0.0'): - return FakeGlanceConnection - elif (client.port == DEFAULT_REGISTRY_PORT and - client.host == '0.0.0.0'): - rserver = kwargs.get("registry") - return FakeRegistryConnection(registry=rserver) - - def fake_image_iter(self): - for i in self.source.app_iter: - yield i - - def fake_sendable(self, body): - force = getattr(self, 'stub_force_sendfile', None) - if force is None: - return self._stub_orig_sendable(body) - else: - if force: - assert glance.common.client.SENDFILE_SUPPORTED - return force - - stubs.Set(glance.common.client.BaseClient, 'get_connection_type', - fake_get_connection_type) - setattr(glance.common.client.BaseClient, '_stub_orig_sendable', - glance.common.client.BaseClient._sendable) - stubs.Set(glance.common.client.BaseClient, '_sendable', - fake_sendable) - - -def stub_out_registry_server(stubs, **kwargs): - """Mocks calls to 127.0.0.1 on 9191 for testing. - - Done so that a real Glance Registry server does not need to be up and - running. - """ - def fake_get_connection_type(client): - """Returns the proper connection type.""" - DEFAULT_REGISTRY_PORT = 9191 - - if (client.port == DEFAULT_REGISTRY_PORT and - client.host == '0.0.0.0'): - rserver = kwargs.pop("registry", None) - return FakeRegistryConnection(registry=rserver) - - def fake_image_iter(self): - for i in self.response.app_iter: - yield i - - stubs.Set(glance.common.client.BaseClient, 'get_connection_type', - fake_get_connection_type) diff --git a/glance/tests/test_hacking.py b/glance/tests/test_hacking.py deleted file mode 100644 index 0568014f..00000000 --- a/glance/tests/test_hacking.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.hacking import checks -from glance.tests import utils - - -class HackingTestCase(utils.BaseTestCase): - def test_assert_true_instance(self): - self.assertEqual(1, len(list(checks.assert_true_instance( - "self.assertTrue(isinstance(e, " - "exception.BuildAbortException))")))) - - self.assertEqual( - 0, len(list(checks.assert_true_instance("self.assertTrue()")))) - - def test_assert_equal_type(self): - self.assertEqual(1, len(list(checks.assert_equal_type( - "self.assertEqual(type(als['QuicAssist']), list)")))) - - self.assertEqual( - 0, len(list(checks.assert_equal_type("self.assertTrue()")))) - - def test_assert_equal_none(self): - self.assertEqual(1, len(list(checks.assert_equal_none( - "self.assertEqual(A, None)")))) - - self.assertEqual(1, len(list(checks.assert_equal_none( - "self.assertEqual(None, A)")))) - - self.assertEqual( - 0, len(list(checks.assert_equal_none("self.assertIsNone()")))) - - def test_no_translate_debug_logs(self): - self.assertEqual(1, len(list(checks.no_translate_debug_logs( - "LOG.debug(_('foo'))", "glance/store/foo.py")))) - - self.assertEqual(0, len(list(checks.no_translate_debug_logs( - "LOG.debug('foo')", "glance/store/foo.py")))) - - self.assertEqual(0, len(list(checks.no_translate_debug_logs( - "LOG.info(_('foo'))", "glance/store/foo.py")))) - - def test_no_direct_use_of_unicode_function(self): - self.assertEqual(1, len(list(checks.no_direct_use_of_unicode_function( - "unicode('the party dont start til the unicode walks in')")))) - self.assertEqual(1, len(list(checks.no_direct_use_of_unicode_function( - """unicode('something ' - 'something else""")))) - self.assertEqual(0, len(list(checks.no_direct_use_of_unicode_function( - "six.text_type('party over')")))) - self.assertEqual(0, len(list(checks.no_direct_use_of_unicode_function( - "not_actually_unicode('something completely different')")))) - - def test_no_contextlib_nested(self): - self.assertEqual(1, len(list(checks.check_no_contextlib_nested( - "with contextlib.nested(")))) - - self.assertEqual(1, len(list(checks.check_no_contextlib_nested( - "with nested(")))) - - self.assertEqual(0, len(list(checks.check_no_contextlib_nested( - "with foo as bar")))) - - def test_dict_constructor_with_list_copy(self): - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " dict([(i, connect_info[i])")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " attrs = dict([(k, _from_json(v))")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " type_names = dict((value, key) for key, value in")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " dict((value, key) for key, value in")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - "foo(param=dict((k, v) for k, v in bar.items()))")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " dict([[i,i] for i in range(3)])")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " dd = dict([i,i] for i in range(3))")))) - - self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( - " create_kwargs = dict(snapshot=snapshot,")))) - - self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( - " self._render_dict(xml, data_el, data.__dict__)")))) - - def test_check_python3_xrange(self): - func = checks.check_python3_xrange - self.assertEqual(1, len(list(func('for i in xrange(10)')))) - self.assertEqual(1, len(list(func('for i in xrange (10)')))) - self.assertEqual(0, len(list(func('for i in range(10)')))) - self.assertEqual(0, len(list(func('for i in six.moves.range(10)')))) - self.assertEqual(0, len(list(func('testxrange(10)')))) - - def test_dict_iteritems(self): - self.assertEqual(1, len(list(checks.check_python3_no_iteritems( - "obj.iteritems()")))) - - self.assertEqual(0, len(list(checks.check_python3_no_iteritems( - "six.iteritems(obj)")))) - - self.assertEqual(0, len(list(checks.check_python3_no_iteritems( - "obj.items()")))) - - def test_dict_iterkeys(self): - self.assertEqual(1, len(list(checks.check_python3_no_iterkeys( - "obj.iterkeys()")))) - - self.assertEqual(0, len(list(checks.check_python3_no_iterkeys( - "six.iterkeys(obj)")))) - - self.assertEqual(0, len(list(checks.check_python3_no_iterkeys( - "obj.keys()")))) - - def test_dict_itervalues(self): - self.assertEqual(1, len(list(checks.check_python3_no_itervalues( - "obj.itervalues()")))) - - self.assertEqual(0, len(list(checks.check_python3_no_itervalues( - "six.itervalues(ob)")))) - - self.assertEqual(0, len(list(checks.check_python3_no_itervalues( - "obj.values()")))) diff --git a/glance/tests/unit/__init__.py b/glance/tests/unit/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/unit/api/__init__.py b/glance/tests/unit/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/unit/api/middleware/__init__.py b/glance/tests/unit/api/middleware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/unit/api/middleware/test_cache_manage.py b/glance/tests/unit/api/middleware/test_cache_manage.py deleted file mode 100644 index d66aacb8..00000000 --- a/glance/tests/unit/api/middleware/test_cache_manage.py +++ /dev/null @@ -1,171 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.api import cached_images -from glance.api.middleware import cache_manage -import glance.common.config -import glance.common.wsgi -import glance.image_cache -from glance.tests import utils as test_utils - -import mock -import webob - - -class TestCacheManageFilter(test_utils.BaseTestCase): - @mock.patch.object(glance.image_cache.ImageCache, "init_driver") - def setUp(self, mock_init_driver): - super(TestCacheManageFilter, self).setUp() - self.stub_application_name = "stubApplication" - self.stub_value = "Stub value" - self.image_id = "image_id_stub" - - mock_init_driver.return_value = None - - self.cache_manage_filter = cache_manage.CacheManageFilter( - self.stub_application_name) - - def test_bogus_request(self): - # prepare - bogus_request = webob.Request.blank("/bogus/") - - # call - resource = self.cache_manage_filter.process_request(bogus_request) - - # check - self.assertIsNone(resource) - - @mock.patch.object(cached_images.Controller, "get_cached_images") - def test_get_cached_images(self, - mock_get_cached_images): - # setup - mock_get_cached_images.return_value = self.stub_value - - # prepare - request = webob.Request.blank("/v1/cached_images") - - # call - resource = self.cache_manage_filter.process_request(request) - - # check - mock_get_cached_images.assert_called_with(request) - self.assertEqual('"' + self.stub_value + '"', - resource.body.decode('utf-8')) - - @mock.patch.object(cached_images.Controller, "delete_cached_image") - def test_delete_cached_image(self, - mock_delete_cached_image): - # setup - mock_delete_cached_image.return_value = self.stub_value - - # prepare - request = webob.Request.blank("/v1/cached_images/" + self.image_id, - environ={'REQUEST_METHOD': "DELETE"}) - - # call - resource = self.cache_manage_filter.process_request(request) - - # check - mock_delete_cached_image.assert_called_with(request, - image_id=self.image_id) - self.assertEqual('"' + self.stub_value + '"', - resource.body.decode('utf-8')) - - @mock.patch.object(cached_images.Controller, "delete_cached_images") - def test_delete_cached_images(self, - mock_delete_cached_images): - # setup - mock_delete_cached_images.return_value = self.stub_value - - # prepare - request = webob.Request.blank("/v1/cached_images", - environ={'REQUEST_METHOD': "DELETE"}) - - # call - resource = self.cache_manage_filter.process_request(request) - - # check - mock_delete_cached_images.assert_called_with(request) - self.assertEqual('"' + self.stub_value + '"', - resource.body.decode('utf-8')) - - @mock.patch.object(cached_images.Controller, "queue_image") - def test_put_queued_image(self, - mock_queue_image): - # setup - mock_queue_image.return_value = self.stub_value - - # prepare - request = webob.Request.blank("/v1/queued_images/" + self.image_id, - environ={'REQUEST_METHOD': "PUT"}) - - # call - resource = self.cache_manage_filter.process_request(request) - - # check - mock_queue_image.assert_called_with(request, image_id=self.image_id) - self.assertEqual('"' + self.stub_value + '"', - resource.body.decode('utf-8')) - - @mock.patch.object(cached_images.Controller, "get_queued_images") - def test_get_queued_images(self, - mock_get_queued_images): - # setup - mock_get_queued_images.return_value = self.stub_value - - # prepare - request = webob.Request.blank("/v1/queued_images") - - # call - resource = self.cache_manage_filter.process_request(request) - - # check - mock_get_queued_images.assert_called_with(request) - self.assertEqual('"' + self.stub_value + '"', - resource.body.decode('utf-8')) - - @mock.patch.object(cached_images.Controller, "delete_queued_image") - def test_delete_queued_image(self, - mock_delete_queued_image): - # setup - mock_delete_queued_image.return_value = self.stub_value - - # prepare - request = webob.Request.blank("/v1/queued_images/" + self.image_id, - environ={'REQUEST_METHOD': 'DELETE'}) - - # call - resource = self.cache_manage_filter.process_request(request) - - # check - mock_delete_queued_image.assert_called_with(request, - image_id=self.image_id) - self.assertEqual('"' + self.stub_value + '"', - resource.body.decode('utf-8')) - - @mock.patch.object(cached_images.Controller, "delete_queued_images") - def test_delete_queued_images(self, - mock_delete_queued_images): - # setup - mock_delete_queued_images.return_value = self.stub_value - - # prepare - request = webob.Request.blank("/v1/queued_images", - environ={'REQUEST_METHOD': 'DELETE'}) - - # call - resource = self.cache_manage_filter.process_request(request) - - # check - mock_delete_queued_images.assert_called_with(request) - self.assertEqual('"' + self.stub_value + '"', - resource.body.decode('utf-8')) diff --git a/glance/tests/unit/api/test_cmd.py b/glance/tests/unit/api/test_cmd.py deleted file mode 100644 index 3582da0f..00000000 --- a/glance/tests/unit/api/test_cmd.py +++ /dev/null @@ -1,134 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import sys - -import glance_store as store -import mock -from oslo_config import cfg -from oslo_log import log as logging -import six - -import glance.cmd.api -import glance.cmd.cache_cleaner -import glance.cmd.cache_pruner -import glance.common.config -from glance.common import exception as exc -import glance.common.wsgi -import glance.image_cache.cleaner -import glance.image_cache.pruner -from glance.tests import utils as test_utils - - -CONF = cfg.CONF - - -class TestGlanceApiCmd(test_utils.BaseTestCase): - - __argv_backup = None - - def _do_nothing(self, *args, **kwargs): - pass - - def _raise(self, exc): - def fake(*args, **kwargs): - raise exc - return fake - - def setUp(self): - super(TestGlanceApiCmd, self).setUp() - self.__argv_backup = sys.argv - sys.argv = ['glance-api'] - self.stderr = six.StringIO() - sys.stderr = self.stderr - - store.register_opts(CONF) - - self.stubs.Set(glance.common.config, 'load_paste_app', - self._do_nothing) - self.stubs.Set(glance.common.wsgi.Server, 'start', - self._do_nothing) - self.stubs.Set(glance.common.wsgi.Server, 'wait', - self._do_nothing) - - def tearDown(self): - sys.stderr = sys.__stderr__ - sys.argv = self.__argv_backup - super(TestGlanceApiCmd, self).tearDown() - - def test_supported_default_store(self): - self.config(group='glance_store', default_store='file') - glance.cmd.api.main() - - def test_worker_creation_failure(self): - failure = exc.WorkerCreationFailure(reason='test') - self.stubs.Set(glance.common.wsgi.Server, 'start', - self._raise(failure)) - exit = self.assertRaises(SystemExit, glance.cmd.api.main) - self.assertEqual(2, exit.code) - - @mock.patch.object(glance.common.config, 'parse_cache_args') - @mock.patch.object(logging, 'setup') - @mock.patch.object(glance.image_cache.ImageCache, 'init_driver') - @mock.patch.object(glance.image_cache.ImageCache, 'clean') - def test_cache_cleaner_main(self, mock_cache_clean, - mock_cache_init_driver, mock_log_setup, - mock_parse_config): - mock_cache_init_driver.return_value = None - - manager = mock.MagicMock() - manager.attach_mock(mock_log_setup, 'mock_log_setup') - manager.attach_mock(mock_parse_config, 'mock_parse_config') - manager.attach_mock(mock_cache_init_driver, 'mock_cache_init_driver') - manager.attach_mock(mock_cache_clean, 'mock_cache_clean') - glance.cmd.cache_cleaner.main() - expected_call_sequence = [mock.call.mock_parse_config(), - mock.call.mock_log_setup(CONF, 'glance'), - mock.call.mock_cache_init_driver(), - mock.call.mock_cache_clean()] - self.assertEqual(expected_call_sequence, manager.mock_calls) - - @mock.patch.object(glance.image_cache.base.CacheApp, '__init__') - def test_cache_cleaner_main_runtime_exception_handling(self, mock_cache): - mock_cache.return_value = None - self.stubs.Set(glance.image_cache.cleaner.Cleaner, 'run', - self._raise(RuntimeError)) - exit = self.assertRaises(SystemExit, glance.cmd.cache_cleaner.main) - self.assertEqual('ERROR: ', exit.code) - - @mock.patch.object(glance.common.config, 'parse_cache_args') - @mock.patch.object(logging, 'setup') - @mock.patch.object(glance.image_cache.ImageCache, 'init_driver') - @mock.patch.object(glance.image_cache.ImageCache, 'prune') - def test_cache_pruner_main(self, mock_cache_prune, - mock_cache_init_driver, mock_log_setup, - mock_parse_config): - mock_cache_init_driver.return_value = None - - manager = mock.MagicMock() - manager.attach_mock(mock_log_setup, 'mock_log_setup') - manager.attach_mock(mock_parse_config, 'mock_parse_config') - manager.attach_mock(mock_cache_init_driver, 'mock_cache_init_driver') - manager.attach_mock(mock_cache_prune, 'mock_cache_prune') - glance.cmd.cache_pruner.main() - expected_call_sequence = [mock.call.mock_parse_config(), - mock.call.mock_log_setup(CONF, 'glance'), - mock.call.mock_cache_init_driver(), - mock.call.mock_cache_prune()] - self.assertEqual(expected_call_sequence, manager.mock_calls) - - @mock.patch.object(glance.image_cache.base.CacheApp, '__init__') - def test_cache_pruner_main_runtime_exception_handling(self, mock_cache): - mock_cache.return_value = None - self.stubs.Set(glance.image_cache.pruner.Pruner, 'run', - self._raise(RuntimeError)) - exit = self.assertRaises(SystemExit, glance.cmd.cache_pruner.main) - self.assertEqual('ERROR: ', exit.code) diff --git a/glance/tests/unit/api/test_cmd_cache_manage.py b/glance/tests/unit/api/test_cmd_cache_manage.py deleted file mode 100644 index daf2a6e0..00000000 --- a/glance/tests/unit/api/test_cmd_cache_manage.py +++ /dev/null @@ -1,396 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import optparse -import sys - -import mock -import prettytable -from six.moves import StringIO - -from glance.cmd import cache_manage -from glance.common import exception -import glance.common.utils -import glance.image_cache.client -from glance.tests import utils as test_utils - - -@mock.patch('sys.stdout', mock.Mock()) -class TestGlanceCmdManage(test_utils.BaseTestCase): - - @mock.patch.object(optparse.OptionParser, 'print_help') - @mock.patch.object(optparse.OptionParser, 'parse_args') - def test_help(self, mock_parse_args, mock_print_help): - mock_parse_args.return_value = (optparse.Values(), ['help']) - oparser = optparse.OptionParser() - (options, command, args) = cache_manage.parse_options(oparser, - ['help']) - command(options, args) - self.assertEqual(1, mock_print_help.call_count) - - @mock.patch.object(optparse.OptionParser, 'parse_args') - def test_help_with_command(self, mock_parse_args): - mock_parse_args.return_value = (optparse.Values(), ['help', - 'list-cached']) - oparser = optparse.OptionParser() - (options, command, args) = cache_manage.parse_options(oparser, - ['help', - 'list-cached']) - command(options, args) - - @mock.patch.object(sys, 'exit') - @mock.patch.object(optparse.OptionParser, 'parse_args') - def test_help_with_redundant_command(self, mock_parse_args, mock_exit): - mock_parse_args.return_value = (optparse.Values(), ['help', - 'list-cached', - "1"]) - oparser = optparse.OptionParser() - (options, command, args) = cache_manage.parse_options(oparser, - ['help', - 'list-cached', - "1"]) - command(options, args) - self.assertEqual(1, mock_exit.call_count) - - @mock.patch.object(glance.image_cache.client.CacheClient, - 'get_cached_images') - @mock.patch.object(prettytable.PrettyTable, 'add_row') - def test_list_cached_images(self, mock_row_create, mock_images): - """ - Verify that list_cached() method correctly processes images with all - filled data and images with not filled 'last_accessed' field. - """ - - mock_images.return_value = [ - {'last_accessed': float(0), - 'last_modified': float(1378985797.124511), - 'image_id': '1', 'size': '128', 'hits': '1'}, - {'last_accessed': float(1378985797.124511), - 'last_modified': float(1378985797.124511), - 'image_id': '2', 'size': '255', 'hits': '2'}] - cache_manage.list_cached(mock.Mock(), '') - - self.assertEqual(len(mock_images.return_value), - mock_row_create.call_count) - - @mock.patch.object(glance.image_cache.client.CacheClient, - 'get_cached_images') - def test_list_cached_images_empty(self, mock_images): - """ - Verify that list_cached() method handles a case when no images are - cached without errors. - """ - - mock_images.return_value = [] - self.assertEqual(cache_manage.SUCCESS, - cache_manage.list_cached(mock.Mock(), '')) - - @mock.patch.object(glance.image_cache.client.CacheClient, - 'get_queued_images') - @mock.patch.object(prettytable.PrettyTable, 'add_row') - def test_list_queued_images(self, mock_row_create, mock_images): - """Verify that list_queued() method correctly processes images.""" - - mock_images.return_value = [ - {'image_id': '1'}, {'image_id': '2'}] - cache_manage.list_queued(mock.Mock(), '') - - self.assertEqual(len(mock_images.return_value), - mock_row_create.call_count) - - @mock.patch.object(glance.image_cache.client.CacheClient, - 'get_queued_images') - def test_list_queued_images_empty(self, mock_images): - """ - Verify that list_queued() method handles a case when no images were - queued without errors. - """ - - mock_images.return_value = [] - - self.assertEqual(cache_manage.SUCCESS, - cache_manage.list_queued(mock.Mock(), '')) - - def test_queue_image_without_index(self): - self.assertEqual(cache_manage.FAILURE, - cache_manage.queue_image(mock.Mock(), [])) - - @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_queue_image_not_forced_not_confirmed(self, - mock_client, mock_confirm): - # options.forced set to False and queue confirmation set to False. - - mock_confirm.return_value = False - mock_options = mock.Mock() - mock_options.force = False - self.assertEqual(cache_manage.SUCCESS, - cache_manage.queue_image(mock_options, ['img_id'])) - self.assertFalse(mock_client.called) - - @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_queue_image_not_forced_confirmed(self, mock_client, mock_confirm): - # options.forced set to False and queue confirmation set to True. - - mock_confirm.return_value = True - mock_options = mock.Mock() - mock_options.force = False - mock_options.verbose = True # to cover additional condition and line - manager = mock.MagicMock() - manager.attach_mock(mock_client, 'mock_client') - - self.assertEqual(cache_manage.SUCCESS, - cache_manage.queue_image(mock_options, ['img_id'])) - self.assertTrue(mock_client.called) - self.assertIn( - mock.call.mock_client().queue_image_for_caching('img_id'), - manager.mock_calls) - - def test_delete_cached_image_without_index(self): - self.assertEqual(cache_manage.FAILURE, - cache_manage.delete_cached_image(mock.Mock(), [])) - - @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_delete_cached_image_not_forced_not_confirmed(self, - mock_client, - mock_confirm): - # options.forced set to False and delete confirmation set to False. - - mock_confirm.return_value = False - mock_options = mock.Mock() - mock_options.force = False - self.assertEqual( - cache_manage.SUCCESS, - cache_manage.delete_cached_image(mock_options, ['img_id'])) - self.assertFalse(mock_client.called) - - @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_delete_cached_image_not_forced_confirmed(self, mock_client, - mock_confirm): - # options.forced set to False and delete confirmation set to True. - - mock_confirm.return_value = True - mock_options = mock.Mock() - mock_options.force = False - mock_options.verbose = True # to cover additional condition and line - manager = mock.MagicMock() - manager.attach_mock(mock_client, 'mock_client') - - self.assertEqual( - cache_manage.SUCCESS, - cache_manage.delete_cached_image(mock_options, ['img_id'])) - - self.assertIn( - mock.call.mock_client().delete_cached_image('img_id'), - manager.mock_calls) - - @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_delete_cached_images_not_forced_not_confirmed(self, - mock_client, - mock_confirm): - # options.forced set to False and delete confirmation set to False. - - mock_confirm.return_value = False - mock_options = mock.Mock() - mock_options.force = False - self.assertEqual( - cache_manage.SUCCESS, - cache_manage.delete_all_cached_images(mock_options, None)) - self.assertFalse(mock_client.called) - - @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_delete_cached_images_not_forced_confirmed(self, mock_client, - mock_confirm): - # options.forced set to False and delete confirmation set to True. - - mock_confirm.return_value = True - mock_options = mock.Mock() - mock_options.force = False - mock_options.verbose = True # to cover additional condition and line - manager = mock.MagicMock() - manager.attach_mock(mock_client, 'mock_client') - - self.assertEqual( - cache_manage.SUCCESS, - cache_manage.delete_all_cached_images(mock_options, None)) - self.assertTrue(mock_client.called) - self.assertIn( - mock.call.mock_client().delete_all_cached_images(), - manager.mock_calls) - - def test_delete_queued_image_without_index(self): - self.assertEqual(cache_manage.FAILURE, - cache_manage.delete_queued_image(mock.Mock(), [])) - - @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_delete_queued_image_not_forced_not_confirmed(self, - mock_client, - mock_confirm): - # options.forced set to False and delete confirmation set to False. - - mock_confirm.return_value = False - mock_options = mock.Mock() - mock_options.force = False - self.assertEqual( - cache_manage.SUCCESS, - cache_manage.delete_queued_image(mock_options, ['img_id'])) - self.assertFalse(mock_client.called) - - @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_delete_queued_image_not_forced_confirmed(self, mock_client, - mock_confirm): - # options.forced set to False and delete confirmation set to True. - - mock_confirm.return_value = True - mock_options = mock.Mock() - mock_options.force = False - mock_options.verbose = True # to cover additional condition and line - manager = mock.MagicMock() - manager.attach_mock(mock_client, 'mock_client') - - self.assertEqual( - cache_manage.SUCCESS, - cache_manage.delete_queued_image(mock_options, ['img_id'])) - self.assertTrue(mock_client.called) - self.assertIn( - mock.call.mock_client().delete_queued_image('img_id'), - manager.mock_calls) - - @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_delete_queued_images_not_forced_not_confirmed(self, - mock_client, - mock_confirm): - # options.forced set to False and delete confirmation set to False. - - mock_confirm.return_value = False - mock_options = mock.Mock() - mock_options.force = False - self.assertEqual( - cache_manage.SUCCESS, - cache_manage.delete_all_queued_images(mock_options, None)) - self.assertFalse(mock_client.called) - - @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_delete_queued_images_not_forced_confirmed(self, mock_client, - mock_confirm): - # options.forced set to False and delete confirmation set to True. - mock_confirm.return_value = True - mock_options = mock.Mock() - mock_options.force = False - mock_options.verbose = True # to cover additional condition and line - manager = mock.MagicMock() - manager.attach_mock(mock_client, 'mock_client') - - self.assertEqual( - cache_manage.SUCCESS, - cache_manage.delete_all_queued_images(mock_options, None)) - self.assertTrue(mock_client.called) - self.assertIn( - mock.call.mock_client().delete_all_queued_images(), - manager.mock_calls) - - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_catch_error_not_found(self, mock_function): - mock_function.side_effect = exception.NotFound() - - self.assertEqual(cache_manage.FAILURE, - cache_manage.list_cached(mock.Mock(), None)) - - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_catch_error_forbidden(self, mock_function): - mock_function.side_effect = exception.Forbidden() - - self.assertEqual(cache_manage.FAILURE, - cache_manage.list_cached(mock.Mock(), None)) - - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_catch_error_unhandled(self, mock_function): - mock_function.side_effect = exception.Duplicate() - my_mock = mock.Mock() - my_mock.debug = False - - self.assertEqual(cache_manage.FAILURE, - cache_manage.list_cached(my_mock, None)) - - @mock.patch.object(glance.cmd.cache_manage, 'get_client') - def test_catch_error_unhandled_debug_mode(self, mock_function): - mock_function.side_effect = exception.Duplicate() - my_mock = mock.Mock() - my_mock.debug = True - - self.assertRaises(exception.Duplicate, - cache_manage.list_cached, my_mock, None) - - def test_cache_manage_env(self): - def_value = 'sometext12345678900987654321' - self.assertNotEqual(def_value, - cache_manage.env('PATH', default=def_value)) - - def test_cache_manage_env_default(self): - def_value = 'sometext12345678900987654321' - self.assertEqual(def_value, - cache_manage.env('TMPVALUE1234567890', - default=def_value)) - - def test_create_option(self): - oparser = optparse.OptionParser() - cache_manage.create_options(oparser) - self.assertGreater(len(oparser.option_list), 0) - - @mock.patch.object(glance.cmd.cache_manage, 'lookup_command') - def test_parse_options_no_parameters(self, mock_lookup): - with mock.patch('sys.stdout', new_callable=StringIO): - oparser = optparse.OptionParser() - cache_manage.create_options(oparser) - - result = self.assertRaises(SystemExit, cache_manage.parse_options, - oparser, []) - self.assertEqual(0, result.code) - self.assertFalse(mock_lookup.called) - - @mock.patch.object(optparse.OptionParser, 'print_usage') - def test_parse_options_no_arguments(self, mock_printout): - oparser = optparse.OptionParser() - cache_manage.create_options(oparser) - - result = self.assertRaises(SystemExit, cache_manage.parse_options, - oparser, ['-p', '1212']) - self.assertEqual(0, result.code) - self.assertTrue(mock_printout.called) - - @mock.patch.object(glance.cmd.cache_manage, 'lookup_command') - def test_parse_options_retrieve_command(self, mock_lookup): - mock_lookup.return_value = True - oparser = optparse.OptionParser() - cache_manage.create_options(oparser) - (options, command, args) = cache_manage.parse_options(oparser, - ['-p', '1212', - 'list-cached']) - - self.assertTrue(command) - - def test_lookup_command_unsupported_command(self): - self.assertRaises(SystemExit, cache_manage.lookup_command, mock.Mock(), - 'unsupported_command') - - def test_lookup_command_supported_command(self): - command = cache_manage.lookup_command(mock.Mock(), 'list-cached') - self.assertEqual(cache_manage.list_cached, command) diff --git a/glance/tests/unit/api/test_common.py b/glance/tests/unit/api/test_common.py deleted file mode 100644 index 55a35e2b..00000000 --- a/glance/tests/unit/api/test_common.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools -import webob - -import glance.api.common -from glance.common import config -from glance.common import exception -from glance.tests import utils as test_utils - - -class SimpleIterator(object): - def __init__(self, file_object, chunk_size): - self.file_object = file_object - self.chunk_size = chunk_size - - def __iter__(self): - def read_chunk(): - return self.fobj.read(self.chunk_size) - - chunk = read_chunk() - while chunk: - yield chunk - chunk = read_chunk() - else: - raise StopIteration() - - -class TestSizeCheckedIter(testtools.TestCase): - def _get_image_metadata(self): - return {'id': 'e31cb99c-fe89-49fb-9cc5-f5104fffa636'} - - def _get_webob_response(self): - request = webob.Request.blank('/') - response = webob.Response() - response.request = request - return response - - def test_uniform_chunk_size(self): - resp = self._get_webob_response() - meta = self._get_image_metadata() - checked_image = glance.api.common.size_checked_iter( - resp, meta, 4, ['AB', 'CD'], None) - - self.assertEqual('AB', next(checked_image)) - self.assertEqual('CD', next(checked_image)) - self.assertRaises(StopIteration, next, checked_image) - - def test_small_last_chunk(self): - resp = self._get_webob_response() - meta = self._get_image_metadata() - checked_image = glance.api.common.size_checked_iter( - resp, meta, 3, ['AB', 'C'], None) - - self.assertEqual('AB', next(checked_image)) - self.assertEqual('C', next(checked_image)) - self.assertRaises(StopIteration, next, checked_image) - - def test_variable_chunk_size(self): - resp = self._get_webob_response() - meta = self._get_image_metadata() - checked_image = glance.api.common.size_checked_iter( - resp, meta, 6, ['AB', '', 'CDE', 'F'], None) - - self.assertEqual('AB', next(checked_image)) - self.assertEqual('', next(checked_image)) - self.assertEqual('CDE', next(checked_image)) - self.assertEqual('F', next(checked_image)) - self.assertRaises(StopIteration, next, checked_image) - - def test_too_many_chunks(self): - """An image should streamed regardless of expected_size""" - resp = self._get_webob_response() - meta = self._get_image_metadata() - checked_image = glance.api.common.size_checked_iter( - resp, meta, 4, ['AB', 'CD', 'EF'], None) - - self.assertEqual('AB', next(checked_image)) - self.assertEqual('CD', next(checked_image)) - self.assertEqual('EF', next(checked_image)) - self.assertRaises(exception.GlanceException, next, checked_image) - - def test_too_few_chunks(self): - resp = self._get_webob_response() - meta = self._get_image_metadata() - checked_image = glance.api.common.size_checked_iter(resp, meta, 6, - ['AB', 'CD'], - None) - - self.assertEqual('AB', next(checked_image)) - self.assertEqual('CD', next(checked_image)) - self.assertRaises(exception.GlanceException, next, checked_image) - - def test_too_much_data(self): - resp = self._get_webob_response() - meta = self._get_image_metadata() - checked_image = glance.api.common.size_checked_iter(resp, meta, 3, - ['AB', 'CD'], - None) - - self.assertEqual('AB', next(checked_image)) - self.assertEqual('CD', next(checked_image)) - self.assertRaises(exception.GlanceException, next, checked_image) - - def test_too_little_data(self): - resp = self._get_webob_response() - meta = self._get_image_metadata() - checked_image = glance.api.common.size_checked_iter(resp, meta, 6, - ['AB', 'CD', 'E'], - None) - - self.assertEqual('AB', next(checked_image)) - self.assertEqual('CD', next(checked_image)) - self.assertEqual('E', next(checked_image)) - self.assertRaises(exception.GlanceException, next, checked_image) - - -class TestMalformedRequest(test_utils.BaseTestCase): - def setUp(self): - """Establish a clean test environment""" - super(TestMalformedRequest, self).setUp() - self.config(flavor='', - group='paste_deploy', - config_file='etc/glance-api-paste.ini') - self.api = config.load_paste_app('glance-api') - - def test_redirect_incomplete_url(self): - """Test Glance redirects /v# to /v#/ with correct Location header""" - req = webob.Request.blank('/v1.1') - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPFound.code, res.status_int) - self.assertEqual('http://localhost/v1/', res.location) diff --git a/glance/tests/unit/api/test_property_protections.py b/glance/tests/unit/api/test_property_protections.py deleted file mode 100644 index d6b31626..00000000 --- a/glance/tests/unit/api/test_property_protections.py +++ /dev/null @@ -1,300 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.api import policy -from glance.api import property_protections -from glance.common import exception -from glance.common import property_utils -import glance.domain -from glance.tests import utils - - -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' - - -class TestProtectedImageRepoProxy(utils.BaseTestCase): - - class ImageRepoStub(object): - def __init__(self, fixtures): - self.fixtures = fixtures - - def get(self, image_id): - for f in self.fixtures: - if f.image_id == image_id: - return f - else: - raise ValueError(image_id) - - def list(self, *args, **kwargs): - return self.fixtures - - def add(self, image): - self.fixtures.append(image) - - def setUp(self): - super(TestProtectedImageRepoProxy, self).setUp() - self.set_property_protections() - self.policy = policy.Enforcer() - self.property_rules = property_utils.PropertyRules(self.policy) - self.image_factory = glance.domain.ImageFactory() - extra_props = {'spl_create_prop': 'c', - 'spl_read_prop': 'r', - 'spl_update_prop': 'u', - 'spl_delete_prop': 'd', - 'forbidden': 'prop'} - extra_props_2 = {'spl_read_prop': 'r', 'forbidden': 'prop'} - self.fixtures = [ - self.image_factory.new_image(image_id='1', owner=TENANT1, - extra_properties=extra_props), - self.image_factory.new_image(owner=TENANT2, visibility='public'), - self.image_factory.new_image(image_id='3', owner=TENANT1, - extra_properties=extra_props_2), - ] - self.context = glance.context.RequestContext(roles=['spl_role']) - image_repo = self.ImageRepoStub(self.fixtures) - self.image_repo = property_protections.ProtectedImageRepoProxy( - image_repo, self.context, self.property_rules) - - def test_get_image(self): - image_id = '1' - result_image = self.image_repo.get(image_id) - result_extra_props = result_image.extra_properties - self.assertEqual('c', result_extra_props['spl_create_prop']) - self.assertEqual('r', result_extra_props['spl_read_prop']) - self.assertEqual('u', result_extra_props['spl_update_prop']) - self.assertEqual('d', result_extra_props['spl_delete_prop']) - self.assertNotIn('forbidden', result_extra_props.keys()) - - def test_list_image(self): - result_images = self.image_repo.list() - self.assertEqual(3, len(result_images)) - result_extra_props = result_images[0].extra_properties - self.assertEqual('c', result_extra_props['spl_create_prop']) - self.assertEqual('r', result_extra_props['spl_read_prop']) - self.assertEqual('u', result_extra_props['spl_update_prop']) - self.assertEqual('d', result_extra_props['spl_delete_prop']) - self.assertNotIn('forbidden', result_extra_props.keys()) - - result_extra_props = result_images[1].extra_properties - self.assertEqual({}, result_extra_props) - - result_extra_props = result_images[2].extra_properties - self.assertEqual('r', result_extra_props['spl_read_prop']) - self.assertNotIn('forbidden', result_extra_props.keys()) - - -class TestProtectedImageProxy(utils.BaseTestCase): - - def setUp(self): - super(TestProtectedImageProxy, self).setUp() - self.set_property_protections() - self.policy = policy.Enforcer() - self.property_rules = property_utils.PropertyRules(self.policy) - - class ImageStub(object): - def __init__(self, extra_prop): - self.extra_properties = extra_prop - - def test_read_image_with_extra_prop(self): - context = glance.context.RequestContext(roles=['spl_role']) - extra_prop = {'spl_read_prop': 'read', 'spl_fake_prop': 'prop'} - image = self.ImageStub(extra_prop) - result_image = property_protections.ProtectedImageProxy( - image, context, self.property_rules) - result_extra_props = result_image.extra_properties - self.assertEqual('read', result_extra_props['spl_read_prop']) - self.assertNotIn('spl_fake_prop', result_extra_props.keys()) - - -class TestExtraPropertiesProxy(utils.BaseTestCase): - - def setUp(self): - super(TestExtraPropertiesProxy, self).setUp() - self.set_property_protections() - self.policy = policy.Enforcer() - self.property_rules = property_utils.PropertyRules(self.policy) - - def test_read_extra_property_as_admin_role(self): - extra_properties = {'foo': 'bar', 'ping': 'pong'} - context = glance.context.RequestContext(roles=['admin']) - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - context, extra_properties, self.property_rules) - test_result = extra_prop_proxy['foo'] - self.assertEqual('bar', test_result) - - def test_read_extra_property_as_unpermitted_role(self): - extra_properties = {'foo': 'bar', 'ping': 'pong'} - context = glance.context.RequestContext(roles=['unpermitted_role']) - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - context, extra_properties, self.property_rules) - self.assertRaises(KeyError, extra_prop_proxy.__getitem__, 'foo') - - def test_update_extra_property_as_permitted_role_after_read(self): - extra_properties = {'foo': 'bar', 'ping': 'pong'} - context = glance.context.RequestContext(roles=['admin']) - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - context, extra_properties, self.property_rules) - extra_prop_proxy['foo'] = 'par' - self.assertEqual('par', extra_prop_proxy['foo']) - - def test_update_extra_property_as_unpermitted_role_after_read(self): - extra_properties = {'spl_read_prop': 'bar'} - context = glance.context.RequestContext(roles=['spl_role']) - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - context, extra_properties, self.property_rules) - self.assertRaises(exception.ReservedProperty, - extra_prop_proxy.__setitem__, - 'spl_read_prop', 'par') - - def test_update_reserved_extra_property(self): - extra_properties = {'spl_create_prop': 'bar'} - context = glance.context.RequestContext(roles=['spl_role']) - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - context, extra_properties, self.property_rules) - self.assertRaises(exception.ReservedProperty, - extra_prop_proxy.__setitem__, 'spl_create_prop', - 'par') - - def test_update_empty_extra_property(self): - extra_properties = {'foo': ''} - context = glance.context.RequestContext(roles=['admin']) - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - context, extra_properties, self.property_rules) - extra_prop_proxy['foo'] = 'bar' - self.assertEqual('bar', extra_prop_proxy['foo']) - - def test_create_extra_property_admin(self): - extra_properties = {} - context = glance.context.RequestContext(roles=['admin']) - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - context, extra_properties, self.property_rules) - extra_prop_proxy['boo'] = 'doo' - self.assertEqual('doo', extra_prop_proxy['boo']) - - def test_create_reserved_extra_property(self): - extra_properties = {} - context = glance.context.RequestContext(roles=['spl_role']) - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - context, extra_properties, self.property_rules) - self.assertRaises(exception.ReservedProperty, - extra_prop_proxy.__setitem__, 'boo', - 'doo') - - def test_delete_extra_property_as_admin_role(self): - extra_properties = {'foo': 'bar'} - context = glance.context.RequestContext(roles=['admin']) - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - context, extra_properties, self.property_rules) - del extra_prop_proxy['foo'] - self.assertRaises(KeyError, extra_prop_proxy.__getitem__, 'foo') - - def test_delete_nonexistant_extra_property_as_admin_role(self): - extra_properties = {} - context = glance.context.RequestContext(roles=['admin']) - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - context, extra_properties, self.property_rules) - self.assertRaises(KeyError, extra_prop_proxy.__delitem__, 'foo') - - def test_delete_reserved_extra_property(self): - extra_properties = {'spl_read_prop': 'r'} - context = glance.context.RequestContext(roles=['spl_role']) - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - context, extra_properties, self.property_rules) - # Ensure property has been created and can be read - self.assertEqual('r', extra_prop_proxy['spl_read_prop']) - self.assertRaises(exception.ReservedProperty, - extra_prop_proxy.__delitem__, 'spl_read_prop') - - def test_delete_nonexistant_extra_property(self): - extra_properties = {} - roles = ['spl_role'] - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - roles, extra_properties, self.property_rules) - self.assertRaises(KeyError, - extra_prop_proxy.__delitem__, 'spl_read_prop') - - def test_delete_empty_extra_property(self): - extra_properties = {'foo': ''} - context = glance.context.RequestContext(roles=['admin']) - extra_prop_proxy = property_protections.ExtraPropertiesProxy( - context, extra_properties, self.property_rules) - del extra_prop_proxy['foo'] - self.assertNotIn('foo', extra_prop_proxy) - - -class TestProtectedImageFactoryProxy(utils.BaseTestCase): - def setUp(self): - super(TestProtectedImageFactoryProxy, self).setUp() - self.set_property_protections() - self.policy = policy.Enforcer() - self.property_rules = property_utils.PropertyRules(self.policy) - self.factory = glance.domain.ImageFactory() - - def test_create_image_no_extra_prop(self): - self.context = glance.context.RequestContext(tenant=TENANT1, - roles=['spl_role']) - self.image_factory = property_protections.ProtectedImageFactoryProxy( - self.factory, self.context, - self.property_rules) - extra_props = {} - image = self.image_factory.new_image(extra_properties=extra_props) - expected_extra_props = {} - self.assertEqual(expected_extra_props, image.extra_properties) - - def test_create_image_extra_prop(self): - self.context = glance.context.RequestContext(tenant=TENANT1, - roles=['spl_role']) - self.image_factory = property_protections.ProtectedImageFactoryProxy( - self.factory, self.context, - self.property_rules) - extra_props = {'spl_create_prop': 'c'} - image = self.image_factory.new_image(extra_properties=extra_props) - expected_extra_props = {'spl_create_prop': 'c'} - self.assertEqual(expected_extra_props, image.extra_properties) - - def test_create_image_extra_prop_reserved_property(self): - self.context = glance.context.RequestContext(tenant=TENANT1, - roles=['spl_role']) - self.image_factory = property_protections.ProtectedImageFactoryProxy( - self.factory, self.context, - self.property_rules) - extra_props = {'foo': 'bar', 'spl_create_prop': 'c'} - # no reg ex for property 'foo' is mentioned for spl_role in config - self.assertRaises(exception.ReservedProperty, - self.image_factory.new_image, - extra_properties=extra_props) - - def test_create_image_extra_prop_admin(self): - self.context = glance.context.RequestContext(tenant=TENANT1, - roles=['admin']) - self.image_factory = property_protections.ProtectedImageFactoryProxy( - self.factory, self.context, - self.property_rules) - extra_props = {'foo': 'bar', 'spl_create_prop': 'c'} - image = self.image_factory.new_image(extra_properties=extra_props) - expected_extra_props = {'foo': 'bar', 'spl_create_prop': 'c'} - self.assertEqual(expected_extra_props, image.extra_properties) - - def test_create_image_extra_prop_invalid_role(self): - self.context = glance.context.RequestContext(tenant=TENANT1, - roles=['imaginary-role']) - self.image_factory = property_protections.ProtectedImageFactoryProxy( - self.factory, self.context, - self.property_rules) - extra_props = {'foo': 'bar', 'spl_create_prop': 'c'} - self.assertRaises(exception.ReservedProperty, - self.image_factory.new_image, - extra_properties=extra_props) diff --git a/glance/tests/unit/async/__init__.py b/glance/tests/unit/async/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/unit/async/flows/__init__.py b/glance/tests/unit/async/flows/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/unit/async/flows/test_convert.py b/glance/tests/unit/async/flows/test_convert.py deleted file mode 100644 index b3c3848b..00000000 --- a/glance/tests/unit/async/flows/test_convert.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import mock -import os - -import glance_store -from oslo_concurrency import processutils -from oslo_config import cfg -import six - -from glance.async.flows import convert -from glance.async import taskflow_executor -from glance.common.scripts import utils as script_utils -from glance.common import utils -from glance import domain -from glance import gateway -import glance.tests.utils as test_utils - -CONF = cfg.CONF - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' - - -class TestImportTask(test_utils.BaseTestCase): - - def setUp(self): - super(TestImportTask, self).setUp() - self.work_dir = os.path.join(self.test_dir, 'work_dir') - utils.safe_mkdirs(self.work_dir) - self.config(work_dir=self.work_dir, group='task') - - self.context = mock.MagicMock() - self.img_repo = mock.MagicMock() - self.task_repo = mock.MagicMock() - - self.gateway = gateway.Gateway() - self.task_factory = domain.TaskFactory() - self.img_factory = self.gateway.get_image_factory(self.context) - self.image = self.img_factory.new_image(image_id=UUID1, - disk_format='raw', - container_format='bare') - - task_input = { - "import_from": "http://cloud.foo/image.raw", - "import_from_format": "raw", - "image_properties": {'disk_format': 'qcow2', - 'container_format': 'bare'} - } - task_ttl = CONF.task.task_time_to_live - - self.task_type = 'import' - self.task = self.task_factory.new_task(self.task_type, TENANT1, - task_time_to_live=task_ttl, - task_input=task_input) - - glance_store.register_opts(CONF) - self.config(default_store='file', - stores=['file', 'http'], - filesystem_store_datadir=self.test_dir, - group="glance_store") - - self.config(conversion_format='qcow2', - group='taskflow_executor') - glance_store.create_stores(CONF) - - def test_convert_success(self): - image_convert = convert._Convert(self.task.task_id, - self.task_type, - self.img_repo) - - self.task_repo.get.return_value = self.task - image_id = mock.sentinel.image_id - image = mock.MagicMock(image_id=image_id, virtual_size=None) - self.img_repo.get.return_value = image - - with mock.patch.object(processutils, 'execute') as exc_mock: - exc_mock.return_value = ("", None) - with mock.patch.object(os, 'rename') as rm_mock: - rm_mock.return_value = None - image_convert.execute(image, 'file:///test/path.raw') - - # NOTE(hemanthm): Asserting that the source format is passed - # to qemu-utis to avoid inferring the image format. This - # shields us from an attack vector described at - # https://bugs.launchpad.net/glance/+bug/1449062/comments/72 - self.assertIn('-f', exc_mock.call_args[0]) - - def test_convert_revert_success(self): - image_convert = convert._Convert(self.task.task_id, - self.task_type, - self.img_repo) - - self.task_repo.get.return_value = self.task - image_id = mock.sentinel.image_id - image = mock.MagicMock(image_id=image_id, virtual_size=None) - self.img_repo.get.return_value = image - - with mock.patch.object(processutils, 'execute') as exc_mock: - exc_mock.return_value = ("", None) - with mock.patch.object(os, 'remove') as rmtree_mock: - rmtree_mock.return_value = None - image_convert.revert(image, 'file:///tmp/test') - - def test_import_flow_with_convert_and_introspect(self): - self.config(engine_mode='serial', - group='taskflow_executor') - - image = self.img_factory.new_image(image_id=UUID1, - disk_format='raw', - container_format='bare') - - img_factory = mock.MagicMock() - - executor = taskflow_executor.TaskExecutor( - self.context, - self.task_repo, - self.img_repo, - img_factory) - - self.task_repo.get.return_value = self.task - - def create_image(*args, **kwargs): - kwargs['image_id'] = UUID1 - return self.img_factory.new_image(*args, **kwargs) - - self.img_repo.get.return_value = image - img_factory.new_image.side_effect = create_image - - image_path = os.path.join(self.work_dir, image.image_id) - - def fake_execute(*args, **kwargs): - if 'info' in args: - # NOTE(flaper87): Make sure the file actually - # exists. Extra check to verify previous tasks did - # what they were supposed to do. - assert os.path.exists(args[3].split("file://")[-1]) - - return (json.dumps({ - "virtual-size": 10737418240, - "filename": "/tmp/image.qcow2", - "cluster-size": 65536, - "format": "qcow2", - "actual-size": 373030912, - "format-specific": { - "type": "qcow2", - "data": { - "compat": "0.10" - } - }, - "dirty-flag": False - }), None) - - open("%s.converted" % image_path, 'a').close() - return ("", None) - - with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: - dmock.return_value = six.BytesIO(b"TEST_IMAGE") - - with mock.patch.object(processutils, 'execute') as exc_mock: - exc_mock.side_effect = fake_execute - executor.begin_processing(self.task.task_id) - - # NOTE(flaper87): DeleteFromFS should've deleted this - # file. Make sure it doesn't exist. - self.assertFalse(os.path.exists(image_path)) - - # NOTE(flaper87): Workdir should be empty after all - # the tasks have been executed. - self.assertEqual([], os.listdir(self.work_dir)) - self.assertEqual('qcow2', image.disk_format) - self.assertEqual(10737418240, image.virtual_size) - - # NOTE(hemanthm): Asserting that the source format is passed - # to qemu-utis to avoid inferring the image format when - # converting. This shields us from an attack vector described - # at https://bugs.launchpad.net/glance/+bug/1449062/comments/72 - # - # A total of three calls will be made to 'execute': 'info', - # 'convert' and 'info' towards introspection, conversion and - # OVF packaging respectively. We care about the 'convert' call - # here, hence we fetch the 2nd set of args from the args list. - convert_call_args, _ = exc_mock.call_args_list[1] - self.assertIn('-f', convert_call_args) diff --git a/glance/tests/unit/async/flows/test_import.py b/glance/tests/unit/async/flows/test_import.py deleted file mode 100644 index bbc708d2..00000000 --- a/glance/tests/unit/async/flows/test_import.py +++ /dev/null @@ -1,439 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import mock -import os - -import glance_store -from oslo_concurrency import processutils as putils -from oslo_config import cfg -import six -from six.moves import urllib -from taskflow import task -from taskflow.types import failure - -import glance.async.flows.base_import as import_flow -from glance.async import taskflow_executor -from glance.async import utils as async_utils -from glance.common.scripts.image_import import main as image_import -from glance.common.scripts import utils as script_utils -from glance.common import utils -from glance import domain -from glance import gateway -import glance.tests.utils as test_utils - -CONF = cfg.CONF - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' - - -class _ErrorTask(task.Task): - - def execute(self): - raise RuntimeError() - - -class TestImportTask(test_utils.BaseTestCase): - - def setUp(self): - super(TestImportTask, self).setUp() - - glance_store.register_opts(CONF) - self.config(default_store='file', - stores=['file', 'http'], - filesystem_store_datadir=self.test_dir, - group="glance_store") - glance_store.create_stores(CONF) - - self.work_dir = os.path.join(self.test_dir, 'work_dir') - utils.safe_mkdirs(self.work_dir) - self.config(work_dir=self.work_dir, group='task') - - self.context = mock.MagicMock() - self.img_repo = mock.MagicMock() - self.task_repo = mock.MagicMock() - - self.gateway = gateway.Gateway() - self.task_factory = domain.TaskFactory() - self.img_factory = self.gateway.get_image_factory(self.context) - self.image = self.img_factory.new_image(image_id=UUID1, - disk_format='qcow2', - container_format='bare') - - task_input = { - "import_from": "http://cloud.foo/image.qcow2", - "import_from_format": "qcow2", - "image_properties": {'disk_format': 'qcow2', - 'container_format': 'bare'} - } - task_ttl = CONF.task.task_time_to_live - - self.task_type = 'import' - self.task = self.task_factory.new_task(self.task_type, TENANT1, - task_time_to_live=task_ttl, - task_input=task_input) - - def _assert_qemu_process_limits(self, exec_mock): - # NOTE(hemanthm): Assert that process limits are being applied - # on "qemu-img info" calls. See bug #1449062 for more details. - kw_args = exec_mock.call_args[1] - self.assertIn('prlimit', kw_args) - self.assertEqual(async_utils.QEMU_IMG_PROC_LIMITS, - kw_args.get('prlimit')) - - def test_import_flow(self): - self.config(engine_mode='serial', - group='taskflow_executor') - - img_factory = mock.MagicMock() - - executor = taskflow_executor.TaskExecutor( - self.context, - self.task_repo, - self.img_repo, - img_factory) - - self.task_repo.get.return_value = self.task - - def create_image(*args, **kwargs): - kwargs['image_id'] = UUID1 - return self.img_factory.new_image(*args, **kwargs) - - self.img_repo.get.return_value = self.image - img_factory.new_image.side_effect = create_image - - with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: - dmock.return_value = six.BytesIO(b"TEST_IMAGE") - - with mock.patch.object(putils, 'trycmd') as tmock: - tmock.return_value = (json.dumps({ - 'format': 'qcow2', - }), None) - - executor.begin_processing(self.task.task_id) - image_path = os.path.join(self.test_dir, self.image.image_id) - tmp_image_path = os.path.join(self.work_dir, - "%s.tasks_import" % image_path) - - self.assertFalse(os.path.exists(tmp_image_path)) - self.assertTrue(os.path.exists(image_path)) - self.assertEqual(1, len(list(self.image.locations))) - self.assertEqual("file://%s/%s" % (self.test_dir, - self.image.image_id), - self.image.locations[0]['url']) - - self._assert_qemu_process_limits(tmock) - - def test_import_flow_missing_work_dir(self): - self.config(engine_mode='serial', group='taskflow_executor') - self.config(work_dir=None, group='task') - - img_factory = mock.MagicMock() - - executor = taskflow_executor.TaskExecutor( - self.context, - self.task_repo, - self.img_repo, - img_factory) - - self.task_repo.get.return_value = self.task - - def create_image(*args, **kwargs): - kwargs['image_id'] = UUID1 - return self.img_factory.new_image(*args, **kwargs) - - self.img_repo.get.return_value = self.image - img_factory.new_image.side_effect = create_image - - with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: - dmock.return_value = six.BytesIO(b"TEST_IMAGE") - - with mock.patch.object(import_flow._ImportToFS, 'execute') as emk: - executor.begin_processing(self.task.task_id) - self.assertFalse(emk.called) - - image_path = os.path.join(self.test_dir, self.image.image_id) - tmp_image_path = os.path.join(self.work_dir, - "%s.tasks_import" % image_path) - self.assertFalse(os.path.exists(tmp_image_path)) - self.assertTrue(os.path.exists(image_path)) - - def test_import_flow_revert_import_to_fs(self): - self.config(engine_mode='serial', group='taskflow_executor') - - img_factory = mock.MagicMock() - - executor = taskflow_executor.TaskExecutor( - self.context, - self.task_repo, - self.img_repo, - img_factory) - - self.task_repo.get.return_value = self.task - - def create_image(*args, **kwargs): - kwargs['image_id'] = UUID1 - return self.img_factory.new_image(*args, **kwargs) - - self.img_repo.get.return_value = self.image - img_factory.new_image.side_effect = create_image - - with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: - dmock.side_effect = RuntimeError - - with mock.patch.object(import_flow._ImportToFS, 'revert') as rmock: - self.assertRaises(RuntimeError, - executor.begin_processing, self.task.task_id) - self.assertTrue(rmock.called) - self.assertIsInstance(rmock.call_args[1]['result'], - failure.Failure) - - image_path = os.path.join(self.test_dir, self.image.image_id) - tmp_image_path = os.path.join(self.work_dir, - "%s.tasks_import" % image_path) - self.assertFalse(os.path.exists(tmp_image_path)) - # Note(sabari): The image should not have been uploaded to - # the store as the flow failed before ImportToStore Task. - self.assertFalse(os.path.exists(image_path)) - - def test_import_flow_backed_file_import_to_fs(self): - self.config(engine_mode='serial', group='taskflow_executor') - - img_factory = mock.MagicMock() - - executor = taskflow_executor.TaskExecutor( - self.context, - self.task_repo, - self.img_repo, - img_factory) - - self.task_repo.get.return_value = self.task - - def create_image(*args, **kwargs): - kwargs['image_id'] = UUID1 - return self.img_factory.new_image(*args, **kwargs) - - self.img_repo.get.return_value = self.image - img_factory.new_image.side_effect = create_image - - with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: - dmock.return_value = six.BytesIO(b"TEST_IMAGE") - - with mock.patch.object(putils, 'trycmd') as tmock: - tmock.return_value = (json.dumps({ - 'backing-filename': '/etc/password' - }), None) - - with mock.patch.object(import_flow._ImportToFS, - 'revert') as rmock: - self.assertRaises(RuntimeError, - executor.begin_processing, - self.task.task_id) - self.assertTrue(rmock.called) - self.assertIsInstance(rmock.call_args[1]['result'], - failure.Failure) - self._assert_qemu_process_limits(tmock) - - image_path = os.path.join(self.test_dir, - self.image.image_id) - - fname = "%s.tasks_import" % image_path - tmp_image_path = os.path.join(self.work_dir, fname) - - self.assertFalse(os.path.exists(tmp_image_path)) - # Note(sabari): The image should not have been uploaded to - # the store as the flow failed before ImportToStore Task. - self.assertFalse(os.path.exists(image_path)) - - def test_import_flow_revert(self): - self.config(engine_mode='serial', - group='taskflow_executor') - - img_factory = mock.MagicMock() - - executor = taskflow_executor.TaskExecutor( - self.context, - self.task_repo, - self.img_repo, - img_factory) - - self.task_repo.get.return_value = self.task - - def create_image(*args, **kwargs): - kwargs['image_id'] = UUID1 - return self.img_factory.new_image(*args, **kwargs) - - self.img_repo.get.return_value = self.image - img_factory.new_image.side_effect = create_image - - with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: - dmock.return_value = six.BytesIO(b"TEST_IMAGE") - - with mock.patch.object(putils, 'trycmd') as tmock: - tmock.return_value = (json.dumps({ - 'format': 'qcow2', - }), None) - - with mock.patch.object(import_flow, - "_get_import_flows") as imock: - imock.return_value = (x for x in [_ErrorTask()]) - self.assertRaises(RuntimeError, - executor.begin_processing, - self.task.task_id) - - self._assert_qemu_process_limits(tmock) - - image_path = os.path.join(self.test_dir, - self.image.image_id) - tmp_image_path = os.path.join(self.work_dir, - ("%s.tasks_import" % - image_path)) - self.assertFalse(os.path.exists(tmp_image_path)) - - # NOTE(flaper87): Eventually, we want this to be assertTrue - # The current issue is there's no way to tell taskflow to - # continue on failures. That is, revert the subflow but - # keep executing the parent flow. Under - # discussion/development. - self.assertFalse(os.path.exists(image_path)) - - def test_import_flow_no_import_flows(self): - self.config(engine_mode='serial', - group='taskflow_executor') - - img_factory = mock.MagicMock() - - executor = taskflow_executor.TaskExecutor( - self.context, - self.task_repo, - self.img_repo, - img_factory) - - self.task_repo.get.return_value = self.task - - def create_image(*args, **kwargs): - kwargs['image_id'] = UUID1 - return self.img_factory.new_image(*args, **kwargs) - - self.img_repo.get.return_value = self.image - img_factory.new_image.side_effect = create_image - - with mock.patch.object(urllib.request, 'urlopen') as umock: - content = b"TEST_IMAGE" - umock.return_value = six.BytesIO(content) - - with mock.patch.object(import_flow, "_get_import_flows") as imock: - imock.return_value = (x for x in []) - executor.begin_processing(self.task.task_id) - image_path = os.path.join(self.test_dir, self.image.image_id) - tmp_image_path = os.path.join(self.work_dir, - "%s.tasks_import" % image_path) - self.assertFalse(os.path.exists(tmp_image_path)) - self.assertTrue(os.path.exists(image_path)) - self.assertEqual(1, umock.call_count) - - with open(image_path, 'rb') as ifile: - self.assertEqual(content, ifile.read()) - - def test_create_image(self): - image_create = import_flow._CreateImage(self.task.task_id, - self.task_type, - self.task_repo, - self.img_repo, - self.img_factory) - - self.task_repo.get.return_value = self.task - with mock.patch.object(image_import, 'create_image') as ci_mock: - ci_mock.return_value = mock.Mock() - image_create.execute() - - ci_mock.assert_called_once_with(self.img_repo, - self.img_factory, - {'container_format': 'bare', - 'disk_format': 'qcow2'}, - self.task.task_id) - - def test_save_image(self): - save_image = import_flow._SaveImage(self.task.task_id, - self.task_type, - self.img_repo) - - with mock.patch.object(self.img_repo, 'get') as get_mock: - image_id = mock.sentinel.image_id - image = mock.MagicMock(image_id=image_id, status='saving') - get_mock.return_value = image - - with mock.patch.object(self.img_repo, 'save') as save_mock: - save_image.execute(image.image_id) - get_mock.assert_called_once_with(image_id) - save_mock.assert_called_once_with(image) - self.assertEqual('active', image.status) - - def test_import_to_fs(self): - import_fs = import_flow._ImportToFS(self.task.task_id, - self.task_type, - self.task_repo, - 'http://example.com/image.qcow2') - - with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: - content = b"test" - dmock.return_value = [content] - - with mock.patch.object(putils, 'trycmd') as tmock: - tmock.return_value = (json.dumps({ - 'format': 'qcow2', - }), None) - - image_id = UUID1 - path = import_fs.execute(image_id) - reader, size = glance_store.get_from_backend(path) - self.assertEqual(4, size) - self.assertEqual(content, b"".join(reader)) - - image_path = os.path.join(self.work_dir, image_id) - tmp_image_path = os.path.join(self.work_dir, image_path) - self.assertTrue(os.path.exists(tmp_image_path)) - self._assert_qemu_process_limits(tmock) - - def test_delete_from_fs(self): - delete_fs = import_flow._DeleteFromFS(self.task.task_id, - self.task_type) - - data = [b"test"] - - store = glance_store.get_store_from_scheme('file') - path = glance_store.store_add_to_backend(mock.sentinel.image_id, data, - mock.sentinel.image_size, - store, context=None)[0] - - path_wo_scheme = path.split("file://")[1] - self.assertTrue(os.path.exists(path_wo_scheme)) - delete_fs.execute(path) - self.assertFalse(os.path.exists(path_wo_scheme)) - - def test_complete_task(self): - complete_task = import_flow._CompleteTask(self.task.task_id, - self.task_type, - self.task_repo) - - image_id = mock.sentinel.image_id - image = mock.MagicMock(image_id=image_id) - - self.task_repo.get.return_value = self.task - with mock.patch.object(self.task, 'succeed') as succeed: - complete_task.execute(image.image_id) - succeed.assert_called_once_with({'image_id': image_id}) diff --git a/glance/tests/unit/async/flows/test_introspect.py b/glance/tests/unit/async/flows/test_introspect.py deleted file mode 100644 index 869f99de..00000000 --- a/glance/tests/unit/async/flows/test_introspect.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import mock - -import glance_store -from oslo_concurrency import processutils -from oslo_config import cfg - -from glance.async.flows import introspect -from glance.async import utils as async_utils -from glance import domain -import glance.tests.utils as test_utils - -CONF = cfg.CONF - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' - - -class TestImportTask(test_utils.BaseTestCase): - - def setUp(self): - super(TestImportTask, self).setUp() - self.task_factory = domain.TaskFactory() - task_input = { - "import_from": "http://cloud.foo/image.qcow2", - "import_from_format": "qcow2", - "image_properties": mock.sentinel.image_properties - } - task_ttl = CONF.task.task_time_to_live - - self.task_type = 'import' - self.task = self.task_factory.new_task(self.task_type, TENANT1, - task_time_to_live=task_ttl, - task_input=task_input) - - self.context = mock.Mock() - self.img_repo = mock.Mock() - self.task_repo = mock.Mock() - self.img_factory = mock.Mock() - - glance_store.register_opts(CONF) - self.config(default_store='file', - stores=['file', 'http'], - filesystem_store_datadir=self.test_dir, - group="glance_store") - glance_store.create_stores(CONF) - - def test_introspect_success(self): - image_create = introspect._Introspect(self.task.task_id, - self.task_type, - self.img_repo) - - self.task_repo.get.return_value = self.task - image_id = mock.sentinel.image_id - image = mock.MagicMock(image_id=image_id) - self.img_repo.get.return_value = image - - with mock.patch.object(processutils, 'execute') as exc_mock: - result = json.dumps({ - "virtual-size": 10737418240, - "filename": "/tmp/image.qcow2", - "cluster-size": 65536, - "format": "qcow2", - "actual-size": 373030912, - "format-specific": { - "type": "qcow2", - "data": { - "compat": "0.10" - } - }, - "dirty-flag": False - }) - - exc_mock.return_value = (result, None) - image_create.execute(image, '/test/path.qcow2') - self.assertEqual(10737418240, image.virtual_size) - - # NOTE(hemanthm): Assert that process limits are being applied on - # "qemu-img info" calls. See bug #1449062 for more details. - kw_args = exc_mock.call_args[1] - self.assertIn('prlimit', kw_args) - self.assertEqual(async_utils.QEMU_IMG_PROC_LIMITS, - kw_args.get('prlimit')) - - def test_introspect_no_image(self): - image_create = introspect._Introspect(self.task.task_id, - self.task_type, - self.img_repo) - - self.task_repo.get.return_value = self.task - image_id = mock.sentinel.image_id - image = mock.MagicMock(image_id=image_id, virtual_size=None) - self.img_repo.get.return_value = image - - # NOTE(flaper87): Don't mock, test the error. - with mock.patch.object(processutils, 'execute') as exc_mock: - exc_mock.return_value = (None, "some error") - # NOTE(flaper87): Pls, read the `OptionalTask._catch_all` - # docs to know why this is commented. - # self.assertRaises(RuntimeError, - # image_create.execute, - # image, '/test/path.qcow2') - image_create.execute(image, '/test/path.qcow2') - self.assertIsNone(image.virtual_size) diff --git a/glance/tests/unit/async/flows/test_ovf_process.py b/glance/tests/unit/async/flows/test_ovf_process.py deleted file mode 100644 index a68a9578..00000000 --- a/glance/tests/unit/async/flows/test_ovf_process.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2015 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os.path -import shutil -import tarfile -import tempfile - -import mock -try: - from xml.etree.cElementTree import ParseError -except ImportError: - from xml.etree.ElementTree import ParseError - -from glance.async.flows import ovf_process -import glance.tests.utils as test_utils -from oslo_config import cfg - - -class TestOvfProcessTask(test_utils.BaseTestCase): - - def setUp(self): - super(TestOvfProcessTask, self).setUp() - # The glance/tests/var dir containing sample ova packages used - # by the tests in this class - self.test_ova_dir = os.path.abspath(os.path.join( - os.path.dirname(__file__), - '../../../', 'var')) - self.tempdir = tempfile.mkdtemp() - self.config(work_dir=self.tempdir, group="task") - - # These are the properties that we will extract from the ovf - # file contained in a ova package - interested_properties = ( - '{\n' - ' "cim_pasd": [\n' - ' "InstructionSetExtensionName",\n' - ' "ProcessorArchitecture"]\n' - '}\n') - self.config_file_name = os.path.join(self.tempdir, 'ovf-metadata.json') - with open(self.config_file_name, 'w') as config_file: - config_file.write(interested_properties) - - self.image = mock.Mock() - self.image.container_format = 'ova' - self.image.context.is_admin = True - - self.img_repo = mock.Mock() - self.img_repo.get.return_value = self.image - - def tearDown(self): - if os.path.exists(self.tempdir): - shutil.rmtree(self.tempdir) - - super(TestOvfProcessTask, self).tearDown() - - def _copy_ova_to_tmpdir(self, ova_name): - # Copies an ova package to the tempdir from which - # it will be read by the system-under-test - shutil.copy(os.path.join(self.test_ova_dir, ova_name), self.tempdir) - return os.path.join(self.tempdir, ova_name) - - @mock.patch.object(cfg.ConfigOpts, 'find_file') - def test_ovf_process_success(self, mock_find_file): - mock_find_file.return_value = self.config_file_name - - ova_file_path = self._copy_ova_to_tmpdir('testserver.ova') - ova_uri = 'file://' + ova_file_path - - oprocess = ovf_process._OVF_Process('task_id', 'ovf_proc', - self.img_repo) - self.assertEqual(ova_uri, oprocess.execute('test_image_id', ova_uri)) - - # Note that the extracted disk image is overwritten onto the input ova - # file - with open(ova_file_path, 'rb') as disk_image_file: - content = disk_image_file.read() - # b'ABCD' is the exact contents of the disk image file - # testserver-disk1.vmdk contained in the testserver.ova package used - # by this test - self.assertEqual(b'ABCD', content) - # 'DMTF:x86:VT-d' is the value in the testerver.ovf file in the - # testserver.ova package - self.image.extra_properties.update.assert_called_once_with( - {'cim_pasd_InstructionSetExtensionName': 'DMTF:x86:VT-d'}) - self.assertEqual('bare', self.image.container_format) - - @mock.patch.object(cfg.ConfigOpts, 'find_file') - def test_ovf_process_no_config_file(self, mock_find_file): - # Mimics a Glance deployment without the ovf-metadata.json file - mock_find_file.return_value = None - - ova_file_path = self._copy_ova_to_tmpdir('testserver.ova') - ova_uri = 'file://' + ova_file_path - - oprocess = ovf_process._OVF_Process('task_id', 'ovf_proc', - self.img_repo) - self.assertEqual(ova_uri, oprocess.execute('test_image_id', ova_uri)) - - # Note that the extracted disk image is overwritten onto the input - # ova file. - with open(ova_file_path, 'rb') as disk_image_file: - content = disk_image_file.read() - # b'ABCD' is the exact contents of the disk image file - # testserver-disk1.vmdk contained in the testserver.ova package used - # by this test - self.assertEqual(b'ABCD', content) - # No properties must be selected from the ovf file - self.image.extra_properties.update.assert_called_once_with({}) - self.assertEqual('bare', self.image.container_format) - - @mock.patch.object(cfg.ConfigOpts, 'find_file') - def test_ovf_process_not_admin(self, mock_find_file): - mock_find_file.return_value = self.config_file_name - - ova_file_path = self._copy_ova_to_tmpdir('testserver.ova') - ova_uri = 'file://' + ova_file_path - - self.image.context.is_admin = False - - oprocess = ovf_process._OVF_Process('task_id', 'ovf_proc', - self.img_repo) - self.assertRaises(RuntimeError, oprocess.execute, 'test_image_id', - ova_uri) - - def test_extract_ova_not_tar(self): - # testserver-not-tar.ova package is not in tar format - ova_file_path = os.path.join(self.test_ova_dir, - 'testserver-not-tar.ova') - iextractor = ovf_process.OVAImageExtractor() - with open(ova_file_path, 'rb') as ova_file: - self.assertRaises(tarfile.ReadError, iextractor.extract, ova_file) - - def test_extract_ova_no_disk(self): - # testserver-no-disk.ova package contains no disk image file - ova_file_path = os.path.join(self.test_ova_dir, - 'testserver-no-disk.ova') - iextractor = ovf_process.OVAImageExtractor() - with open(ova_file_path, 'rb') as ova_file: - self.assertRaises(KeyError, iextractor.extract, ova_file) - - def test_extract_ova_no_ovf(self): - # testserver-no-ovf.ova package contains no ovf file - ova_file_path = os.path.join(self.test_ova_dir, - 'testserver-no-ovf.ova') - iextractor = ovf_process.OVAImageExtractor() - with open(ova_file_path, 'rb') as ova_file: - self.assertRaises(RuntimeError, iextractor.extract, ova_file) - - def test_extract_ova_bad_ovf(self): - # testserver-bad-ovf.ova package has an ovf file that contains - # invalid xml - ova_file_path = os.path.join(self.test_ova_dir, - 'testserver-bad-ovf.ova') - iextractor = ovf_process.OVAImageExtractor() - with open(ova_file_path, 'rb') as ova_file: - self.assertRaises(ParseError, iextractor._parse_OVF, ova_file) diff --git a/glance/tests/unit/async/test_async.py b/glance/tests/unit/async/test_async.py deleted file mode 100644 index 8efa48a5..00000000 --- a/glance/tests/unit/async/test_async.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock - -import glance.async -import glance.tests.utils as test_utils - - -class TestTaskExecutor(test_utils.BaseTestCase): - - def setUp(self): - super(TestTaskExecutor, self).setUp() - self.context = mock.Mock() - self.task_repo = mock.Mock() - self.image_repo = mock.Mock() - self.image_factory = mock.Mock() - self.executor = glance.async.TaskExecutor(self.context, - self.task_repo, - self.image_repo, - self.image_factory) - - def test_begin_processing(self): - # setup - task_id = mock.ANY - task_type = mock.ANY - task = mock.Mock() - - with mock.patch.object( - glance.async.TaskExecutor, - '_run') as mock_run: - self.task_repo.get.return_value = task - self.executor.begin_processing(task_id) - - # assert the call - mock_run.assert_called_once_with(task_id, task_type) diff --git a/glance/tests/unit/async/test_taskflow_executor.py b/glance/tests/unit/async/test_taskflow_executor.py deleted file mode 100644 index 6b722714..00000000 --- a/glance/tests/unit/async/test_taskflow_executor.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -import glance_store -from oslo_config import cfg -from taskflow import engines - -from glance.async import taskflow_executor -from glance import domain -import glance.tests.utils as test_utils - - -CONF = cfg.CONF -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' - - -class TestTaskExecutor(test_utils.BaseTestCase): - - def setUp(self): - super(TestTaskExecutor, self).setUp() - - glance_store.register_opts(CONF) - self.config(default_store='file', - stores=['file', 'http'], - filesystem_store_datadir=self.test_dir, - group="glance_store") - glance_store.create_stores(CONF) - - self.config(engine_mode='serial', - group='taskflow_executor') - - self.context = mock.Mock() - self.task_repo = mock.Mock() - self.image_repo = mock.Mock() - self.image_factory = mock.Mock() - - task_input = { - "import_from": "http://cloud.foo/image.qcow2", - "import_from_format": "qcow2", - "image_properties": {'disk_format': 'qcow2', - 'container_format': 'bare'} - } - task_ttl = CONF.task.task_time_to_live - - self.task_type = 'import' - self.task_factory = domain.TaskFactory() - self.task = self.task_factory.new_task(self.task_type, TENANT1, - task_time_to_live=task_ttl, - task_input=task_input) - - self.executor = taskflow_executor.TaskExecutor( - self.context, - self.task_repo, - self.image_repo, - self.image_factory) - - def test_begin_processing(self): - with mock.patch.object(engines, 'load') as load_mock: - engine = mock.Mock() - load_mock.return_value = engine - self.task_repo.get.return_value = self.task - self.executor.begin_processing(self.task.task_id) - - # assert the call - self.assertEqual(1, load_mock.call_count) - self.assertEqual(1, engine.run.call_count) - - def test_task_fail(self): - with mock.patch.object(engines, 'load') as load_mock: - engine = mock.Mock() - load_mock.return_value = engine - engine.run.side_effect = RuntimeError - self.task_repo.get.return_value = self.task - self.assertRaises(RuntimeError, self.executor.begin_processing, - self.task.task_id) - self.assertEqual('failure', self.task.status) - self.task_repo.save.assert_called_with(self.task) diff --git a/glance/tests/unit/base.py b/glance/tests/unit/base.py deleted file mode 100644 index 2777a77f..00000000 --- a/glance/tests/unit/base.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import glance_store as store -from glance_store import location -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_db import options -from oslo_serialization import jsonutils - -from glance.tests import stubs -from glance.tests import utils as test_utils - -CONF = cfg.CONF - - -class StoreClearingUnitTest(test_utils.BaseTestCase): - - def setUp(self): - super(StoreClearingUnitTest, self).setUp() - # Ensure stores + locations cleared - location.SCHEME_TO_CLS_MAP = {} - - self._create_stores() - self.addCleanup(setattr, location, 'SCHEME_TO_CLS_MAP', dict()) - - def _create_stores(self, passing_config=True): - """Create known stores. Mock out sheepdog's subprocess dependency - on collie. - - :param passing_config: making store driver passes basic configurations. - :returns: the number of how many store drivers been loaded. - """ - store.register_opts(CONF) - - self.config(default_store='filesystem', - filesystem_store_datadir=self.test_dir, - group="glance_store") - - store.create_stores(CONF) - - -class IsolatedUnitTest(StoreClearingUnitTest): - - """ - Unit test case that establishes a mock environment within - a testing directory (in isolation) - """ - registry = None - - def setUp(self): - super(IsolatedUnitTest, self).setUp() - options.set_defaults(CONF, connection='sqlite://') - lockutils.set_defaults(os.path.join(self.test_dir)) - - self.config(debug=False) - - self.config(default_store='filesystem', - filesystem_store_datadir=self.test_dir, - group="glance_store") - - store.create_stores() - stubs.stub_out_registry_and_store_server(self.stubs, - self.test_dir, - registry=self.registry) - - def set_policy_rules(self, rules): - fap = open(CONF.oslo_policy.policy_file, 'w') - fap.write(jsonutils.dumps(rules)) - fap.close() diff --git a/glance/tests/unit/common/__init__.py b/glance/tests/unit/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/unit/common/scripts/__init__.py b/glance/tests/unit/common/scripts/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/unit/common/scripts/image_import/__init__.py b/glance/tests/unit/common/scripts/image_import/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/unit/common/scripts/image_import/test_main.py b/glance/tests/unit/common/scripts/image_import/test_main.py deleted file mode 100644 index d9e6b134..00000000 --- a/glance/tests/unit/common/scripts/image_import/test_main.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from six.moves import urllib - -import glance.common.exception as exception -from glance.common.scripts.image_import import main as image_import_script -from glance.common.scripts import utils -from glance.common import store_utils - -import glance.tests.utils as test_utils - - -class TestImageImport(test_utils.BaseTestCase): - - def setUp(self): - super(TestImageImport, self).setUp() - - def test_run(self): - with mock.patch.object(image_import_script, - '_execute') as mock_execute: - task_id = mock.ANY - context = mock.ANY - task_repo = mock.ANY - image_repo = mock.ANY - image_factory = mock.ANY - image_import_script.run(task_id, context, task_repo, image_repo, - image_factory) - - mock_execute.assert_called_once_with(task_id, task_repo, image_repo, - image_factory) - - def test_import_image(self): - image_id = mock.ANY - image = mock.Mock(image_id=image_id) - image_repo = mock.Mock() - image_repo.get.return_value = image - image_factory = mock.ANY - task_input = mock.Mock(image_properties=mock.ANY) - uri = mock.ANY - with mock.patch.object(image_import_script, - 'create_image') as mock_create_image: - with mock.patch.object(image_import_script, - 'set_image_data') as mock_set_img_data: - mock_create_image.return_value = image - self.assertEqual( - image_id, - image_import_script.import_image(image_repo, image_factory, - task_input, None, uri)) - # Check image is in saving state before image_repo.save called - self.assertEqual('saving', image.status) - self.assertTrue(image_repo.save.called) - mock_set_img_data.assert_called_once_with(image, uri, None) - self.assertTrue(image_repo.get.called) - self.assertTrue(image_repo.save.called) - - def test_create_image(self): - image = mock.ANY - image_repo = mock.Mock() - image_factory = mock.Mock() - image_factory.new_image.return_value = image - - # Note: include some base properties to ensure no error while - # attempting to verify them - image_properties = {'disk_format': 'foo', - 'id': 'bar'} - - self.assertEqual(image, - image_import_script.create_image(image_repo, - image_factory, - image_properties, - None)) - - @mock.patch.object(utils, 'get_image_data_iter') - def test_set_image_data_http(self, mock_image_iter): - uri = 'http://www.example.com' - image = mock.Mock() - mock_image_iter.return_value = test_utils.FakeHTTPResponse() - self.assertIsNone(image_import_script.set_image_data(image, - uri, - None)) - - def test_set_image_data_http_error(self): - uri = 'blahhttp://www.example.com' - image = mock.Mock() - self.assertRaises(urllib.error.URLError, - image_import_script.set_image_data, image, uri, None) - - @mock.patch.object(image_import_script, 'create_image') - @mock.patch.object(image_import_script, 'set_image_data') - @mock.patch.object(store_utils, 'delete_image_location_from_backend') - def test_import_image_failed_with_expired_token( - self, mock_delete_data, mock_set_img_data, mock_create_image): - image_id = mock.ANY - locations = ['location'] - image = mock.Mock(image_id=image_id, locations=locations) - image_repo = mock.Mock() - image_repo.get.side_effect = [image, exception.NotAuthenticated] - image_factory = mock.ANY - task_input = mock.Mock(image_properties=mock.ANY) - uri = mock.ANY - - mock_create_image.return_value = image - self.assertRaises(exception.NotAuthenticated, - image_import_script.import_image, - image_repo, image_factory, - task_input, None, uri) - self.assertEqual(1, mock_set_img_data.call_count) - mock_delete_data.assert_called_once_with( - mock_create_image().context, image_id, 'location') diff --git a/glance/tests/unit/common/scripts/test_scripts_utils.py b/glance/tests/unit/common/scripts/test_scripts_utils.py deleted file mode 100644 index aacbe011..00000000 --- a/glance/tests/unit/common/scripts/test_scripts_utils.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from six.moves import urllib - -from glance.common import exception -from glance.common.scripts import utils as script_utils -import glance.tests.utils as test_utils - - -class TestScriptsUtils(test_utils.BaseTestCase): - def setUp(self): - super(TestScriptsUtils, self).setUp() - - def test_get_task(self): - task = mock.ANY - task_repo = mock.Mock(return_value=task) - task_id = mock.ANY - self.assertEqual(task, script_utils.get_task(task_repo, task_id)) - - def test_unpack_task_input(self): - task_input = {"import_from": "foo", - "import_from_format": "bar", - "image_properties": "baz"} - task = mock.Mock(task_input=task_input) - self.assertEqual(task_input, - script_utils.unpack_task_input(task)) - - def test_unpack_task_input_error(self): - task_input1 = {"import_from_format": "bar", "image_properties": "baz"} - task_input2 = {"import_from": "foo", "image_properties": "baz"} - task_input3 = {"import_from": "foo", "import_from_format": "bar"} - task1 = mock.Mock(task_input=task_input1) - task2 = mock.Mock(task_input=task_input2) - task3 = mock.Mock(task_input=task_input3) - self.assertRaises(exception.Invalid, - script_utils.unpack_task_input, task1) - self.assertRaises(exception.Invalid, - script_utils.unpack_task_input, task2) - self.assertRaises(exception.Invalid, - script_utils.unpack_task_input, task3) - - def test_set_base_image_properties(self): - properties = {} - script_utils.set_base_image_properties(properties) - self.assertIn('disk_format', properties) - self.assertIn('container_format', properties) - self.assertEqual('qcow2', properties['disk_format']) - self.assertEqual('bare', properties['container_format']) - - def test_set_base_image_properties_none(self): - properties = None - script_utils.set_base_image_properties(properties) - self.assertIsNone(properties) - - def test_set_base_image_properties_not_empty(self): - properties = {'disk_format': 'vmdk', 'container_format': 'bare'} - script_utils.set_base_image_properties(properties) - self.assertIn('disk_format', properties) - self.assertIn('container_format', properties) - self.assertEqual('vmdk', properties.get('disk_format')) - self.assertEqual('bare', properties.get('container_format')) - - def test_validate_location_http(self): - location = 'http://example.com' - self.assertEqual(location, - script_utils.validate_location_uri(location)) - - def test_validate_location_https(self): - location = 'https://example.com' - self.assertEqual(location, - script_utils.validate_location_uri(location)) - - def test_validate_location_none_error(self): - self.assertRaises(exception.BadStoreUri, - script_utils.validate_location_uri, '') - - def test_validate_location_file_location_error(self): - self.assertRaises(exception.BadStoreUri, - script_utils.validate_location_uri, "file:///tmp") - self.assertRaises(exception.BadStoreUri, - script_utils.validate_location_uri, - "filesystem:///tmp") - - def test_validate_location_unsupported_error(self): - location = 'swift' - self.assertRaises(urllib.error.URLError, - script_utils.validate_location_uri, location) - - location = 'swift+http' - self.assertRaises(urllib.error.URLError, - script_utils.validate_location_uri, location) - - location = 'swift+https' - self.assertRaises(urllib.error.URLError, - script_utils.validate_location_uri, location) - - location = 'swift+config' - self.assertRaises(urllib.error.URLError, - script_utils.validate_location_uri, location) - - location = 'vsphere' - self.assertRaises(urllib.error.URLError, - script_utils.validate_location_uri, location) - - location = 'sheepdog://' - self.assertRaises(urllib.error.URLError, - script_utils.validate_location_uri, location) - - location = 'rbd://' - self.assertRaises(urllib.error.URLError, - script_utils.validate_location_uri, location) - - location = 'cinder://' - self.assertRaises(urllib.error.URLError, - script_utils.validate_location_uri, location) diff --git a/glance/tests/unit/common/test_client.py b/glance/tests/unit/common/test_client.py deleted file mode 100644 index 7cf1ea87..00000000 --- a/glance/tests/unit/common/test_client.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from mox3 import mox -from six.moves import http_client -import testtools - -from glance.common import auth -from glance.common import client -from glance.tests import utils - - -class TestClient(testtools.TestCase): - - def setUp(self): - super(TestClient, self).setUp() - self.mock = mox.Mox() - self.mock.StubOutWithMock(http_client.HTTPConnection, 'request') - self.mock.StubOutWithMock(http_client.HTTPConnection, 'getresponse') - - self.endpoint = 'example.com' - self.client = client.BaseClient(self.endpoint, port=9191, - auth_token=u'abc123') - - def tearDown(self): - super(TestClient, self).tearDown() - self.mock.UnsetStubs() - - def test_make_auth_plugin(self): - creds = {'strategy': 'keystone'} - insecure = False - configure_via_auth = True - - self.mock.StubOutWithMock(auth, 'get_plugin_from_strategy') - auth.get_plugin_from_strategy('keystone', creds, insecure, - configure_via_auth) - - self.mock.ReplayAll() - - self.client.make_auth_plugin(creds, insecure) - - self.mock.VerifyAll() - - def test_http_encoding_headers(self): - http_client.HTTPConnection.request( - mox.IgnoreArg(), - mox.IgnoreArg(), - mox.IgnoreArg(), - mox.IgnoreArg()) - - # Lets fake the response - # returned by http_client - fake = utils.FakeHTTPResponse(data=b"Ok") - http_client.HTTPConnection.getresponse().AndReturn(fake) - self.mock.ReplayAll() - - headers = {"test": u'ni\xf1o'} - resp = self.client.do_request('GET', '/v1/images/detail', - headers=headers) - self.assertEqual(fake, resp) - - def test_http_encoding_params(self): - http_client.HTTPConnection.request( - mox.IgnoreArg(), - mox.IgnoreArg(), - mox.IgnoreArg(), - mox.IgnoreArg()) - - # Lets fake the response - # returned by http_client - fake = utils.FakeHTTPResponse(data=b"Ok") - http_client.HTTPConnection.getresponse().AndReturn(fake) - self.mock.ReplayAll() - - params = {"test": u'ni\xf1o'} - resp = self.client.do_request('GET', '/v1/images/detail', - params=params) - self.assertEqual(fake, resp) diff --git a/glance/tests/unit/common/test_config.py b/glance/tests/unit/common/test_config.py deleted file mode 100644 index 0888b050..00000000 --- a/glance/tests/unit/common/test_config.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os.path -import shutil - -import fixtures -import oslo_middleware -from oslotest import moxstubout - -from glance.api.middleware import context -from glance.common import config -from glance.tests import utils as test_utils - - -class TestPasteApp(test_utils.BaseTestCase): - - def setUp(self): - super(TestPasteApp, self).setUp() - mox_fixture = self.useFixture(moxstubout.MoxStubout()) - self.stubs = mox_fixture.stubs - - def _do_test_load_paste_app(self, - expected_app_type, - make_paste_file=True, - paste_flavor=None, - paste_config_file=None, - paste_append=None): - - def _writeto(path, str): - with open(path, 'w') as f: - f.write(str or '') - f.flush() - - def _appendto(orig, copy, str): - shutil.copy(orig, copy) - with open(copy, 'a') as f: - f.write(str or '') - f.flush() - - self.config(flavor=paste_flavor, - config_file=paste_config_file, - group='paste_deploy') - - temp_dir = self.useFixture(fixtures.TempDir()).path - temp_file = os.path.join(temp_dir, 'testcfg.conf') - - _writeto(temp_file, '[DEFAULT]\n') - - config.parse_args(['--config-file', temp_file]) - - paste_to = temp_file.replace('.conf', '-paste.ini') - if not paste_config_file and make_paste_file: - paste_from = os.path.join(os.getcwd(), - 'etc/glance-registry-paste.ini') - _appendto(paste_from, paste_to, paste_append) - - app = config.load_paste_app('glance-registry') - - self.assertIsInstance(app, expected_app_type) - - def test_load_paste_app(self): - expected_middleware = oslo_middleware.Healthcheck - self._do_test_load_paste_app(expected_middleware) - - def test_load_paste_app_paste_config_not_found(self): - expected_middleware = context.UnauthenticatedContextMiddleware - self.assertRaises(RuntimeError, self._do_test_load_paste_app, - expected_middleware, make_paste_file=False) - - def test_load_paste_app_with_paste_flavor(self): - pipeline = ('[pipeline:glance-registry-incomplete]\n' - 'pipeline = context registryapp') - expected_middleware = context.ContextMiddleware - self._do_test_load_paste_app(expected_middleware, - paste_flavor='incomplete', - paste_append=pipeline) - - def test_load_paste_app_with_paste_config_file(self): - paste_config_file = os.path.join(os.getcwd(), - 'etc/glance-registry-paste.ini') - expected_middleware = oslo_middleware.Healthcheck - self._do_test_load_paste_app(expected_middleware, - paste_config_file=paste_config_file) - - def test_get_path_non_exist(self): - self.assertRaises(RuntimeError, config._get_deployment_config_file) - - -class TestDefaultConfig(test_utils.BaseTestCase): - - def setUp(self): - super(TestDefaultConfig, self).setUp() - self.CONF = config.cfg.CONF - self.CONF.import_group('profiler', 'glance.common.wsgi') - - def test_osprofiler_disabled(self): - self.assertFalse(self.CONF.profiler.enabled) - self.assertFalse(self.CONF.profiler.trace_sqlalchemy) diff --git a/glance/tests/unit/common/test_exception.py b/glance/tests/unit/common/test_exception.py deleted file mode 100644 index 5681432c..00000000 --- a/glance/tests/unit/common/test_exception.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import encodeutils -import six -from six.moves import http_client as http - -from glance.common import exception -from glance.tests import utils as test_utils - - -class GlanceExceptionTestCase(test_utils.BaseTestCase): - - def test_default_error_msg(self): - class FakeGlanceException(exception.GlanceException): - message = "default message" - - exc = FakeGlanceException() - self.assertEqual('default message', - encodeutils.exception_to_unicode(exc)) - - def test_specified_error_msg(self): - msg = exception.GlanceException('test') - self.assertIn('test', encodeutils.exception_to_unicode(msg)) - - def test_default_error_msg_with_kwargs(self): - class FakeGlanceException(exception.GlanceException): - message = "default message: %(code)s" - - exc = FakeGlanceException(code=int(http.INTERNAL_SERVER_ERROR)) - self.assertEqual("default message: 500", - encodeutils.exception_to_unicode(exc)) - - def test_specified_error_msg_with_kwargs(self): - msg = exception.GlanceException('test: %(code)s', - code=int(http.INTERNAL_SERVER_ERROR)) - self.assertIn('test: 500', encodeutils.exception_to_unicode(msg)) - - def test_non_unicode_error_msg(self): - exc = exception.GlanceException(str('test')) - self.assertIsInstance(encodeutils.exception_to_unicode(exc), - six.text_type) diff --git a/glance/tests/unit/common/test_location_strategy.py b/glance/tests/unit/common/test_location_strategy.py deleted file mode 100644 index bef1a9f9..00000000 --- a/glance/tests/unit/common/test_location_strategy.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright 2014 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import stevedore - -from glance.common import location_strategy -from glance.common.location_strategy import location_order -from glance.common.location_strategy import store_type -from glance.tests.unit import base - - -class TestLocationStrategy(base.IsolatedUnitTest): - """Test routines in glance.common.location_strategy""" - - def _set_original_strategies(self, original_strategies): - for name in location_strategy._available_strategies.keys(): - if name not in original_strategies: - del location_strategy._available_strategies[name] - - def setUp(self): - super(TestLocationStrategy, self).setUp() - original_strategies = ['location_order', 'store_type'] - self.addCleanup(self._set_original_strategies, original_strategies) - - def test_load_strategy_modules(self): - modules = location_strategy._load_strategies() - # By default we have two built-in strategy modules. - self.assertEqual(2, len(modules)) - self.assertEqual(set(['location_order', 'store_type']), - set(modules.keys())) - self.assertEqual(location_strategy._available_strategies, modules) - - def test_load_strategy_module_with_deduplicating(self): - modules = ['module1', 'module2'] - - def _fake_stevedore_extension_manager(*args, **kwargs): - ret = lambda: None - ret.names = lambda: modules - return ret - - def _fake_stevedore_driver_manager(*args, **kwargs): - ret = lambda: None - ret.driver = lambda: None - ret.driver.__name__ = kwargs['name'] - # Module 1 and 2 has a same strategy name - ret.driver.get_strategy_name = lambda: 'module_name' - ret.driver.init = lambda: None - return ret - - self.stub = self.stubs.Set(stevedore.extension, "ExtensionManager", - _fake_stevedore_extension_manager) - self.stub = self.stubs.Set(stevedore.driver, "DriverManager", - _fake_stevedore_driver_manager) - - loaded_modules = location_strategy._load_strategies() - self.assertEqual(1, len(loaded_modules)) - self.assertIn('module_name', loaded_modules) - # Skipped module #2, duplicated one. - self.assertEqual('module1', loaded_modules['module_name'].__name__) - - def test_load_strategy_module_with_init_exception(self): - modules = ['module_init_exception', 'module_good'] - - def _fake_stevedore_extension_manager(*args, **kwargs): - ret = lambda: None - ret.names = lambda: modules - return ret - - def _fake_stevedore_driver_manager(*args, **kwargs): - if kwargs['name'] == 'module_init_exception': - raise Exception('strategy module failed to initialize.') - else: - ret = lambda: None - ret.driver = lambda: None - ret.driver.__name__ = kwargs['name'] - ret.driver.get_strategy_name = lambda: kwargs['name'] - ret.driver.init = lambda: None - return ret - - self.stub = self.stubs.Set(stevedore.extension, "ExtensionManager", - _fake_stevedore_extension_manager) - self.stub = self.stubs.Set(stevedore.driver, "DriverManager", - _fake_stevedore_driver_manager) - - loaded_modules = location_strategy._load_strategies() - self.assertEqual(1, len(loaded_modules)) - self.assertIn('module_good', loaded_modules) - # Skipped module #1, initialize failed one. - self.assertEqual('module_good', loaded_modules['module_good'].__name__) - - def test_verify_valid_location_strategy(self): - for strategy_name in ['location_order', 'store_type']: - self.config(location_strategy=strategy_name) - location_strategy.verify_location_strategy() - - def test_get_ordered_locations_with_none_or_empty_locations(self): - self.assertEqual([], location_strategy.get_ordered_locations(None)) - self.assertEqual([], location_strategy.get_ordered_locations([])) - - def test_get_ordered_locations(self): - self.config(location_strategy='location_order') - - original_locs = [{'url': 'loc1'}, {'url': 'loc2'}] - ordered_locs = location_strategy.get_ordered_locations(original_locs) - - # Original location list should remain unchanged - self.assertNotEqual(id(original_locs), id(ordered_locs)) - self.assertEqual(original_locs, ordered_locs) - - def test_choose_best_location_with_none_or_empty_locations(self): - self.assertIsNone(location_strategy.choose_best_location(None)) - self.assertIsNone(location_strategy.choose_best_location([])) - - def test_choose_best_location(self): - self.config(location_strategy='location_order') - - original_locs = [{'url': 'loc1'}, {'url': 'loc2'}] - best_loc = location_strategy.choose_best_location(original_locs) - - # Deep copy protect original location. - self.assertNotEqual(id(original_locs), id(best_loc)) - self.assertEqual(original_locs[0], best_loc) - - -class TestLocationOrderStrategyModule(base.IsolatedUnitTest): - """Test routines in glance.common.location_strategy.location_order""" - - def test_get_ordered_locations(self): - original_locs = [{'url': 'loc1'}, {'url': 'loc2'}] - ordered_locs = location_order.get_ordered_locations(original_locs) - # The result will ordered by original natural order. - self.assertEqual(original_locs, ordered_locs) - - -class TestStoreTypeStrategyModule(base.IsolatedUnitTest): - """Test routines in glance.common.location_strategy.store_type""" - - def test_get_ordered_locations(self): - self.config(store_type_preference=[' rbd', 'sheepdog ', ' file', - 'swift ', ' http ', 'vmware'], - group='store_type_location_strategy') - locs = [{'url': 'file://image0', 'metadata': {'idx': 3}}, - {'url': 'rbd://image1', 'metadata': {'idx': 0}}, - {'url': 'file://image3', 'metadata': {'idx': 4}}, - {'url': 'swift://image4', 'metadata': {'idx': 6}}, - {'url': 'cinder://image5', 'metadata': {'idx': 9}}, - {'url': 'file://image6', 'metadata': {'idx': 5}}, - {'url': 'rbd://image7', 'metadata': {'idx': 1}}, - {'url': 'vsphere://image9', 'metadata': {'idx': 8}}, - {'url': 'sheepdog://image8', 'metadata': {'idx': 2}}] - ordered_locs = store_type.get_ordered_locations(copy.deepcopy(locs)) - locs.sort(key=lambda loc: loc['metadata']['idx']) - # The result will ordered by preferred store type order. - self.assertEqual(locs, ordered_locs) - - def test_get_ordered_locations_with_invalid_store_name(self): - self.config(store_type_preference=[' rbd', 'sheepdog ', 'invalid', - 'swift ', ' http '], - group='store_type_location_strategy') - locs = [{'url': 'file://image0', 'metadata': {'idx': 4}}, - {'url': 'rbd://image1', 'metadata': {'idx': 0}}, - {'url': 'file://image3', 'metadata': {'idx': 5}}, - {'url': 'swift://image4', 'metadata': {'idx': 3}}, - {'url': 'cinder://image5', 'metadata': {'idx': 6}}, - {'url': 'file://image6', 'metadata': {'idx': 7}}, - {'url': 'rbd://image7', 'metadata': {'idx': 1}}, - {'url': 'sheepdog://image8', 'metadata': {'idx': 2}}] - ordered_locs = store_type.get_ordered_locations(copy.deepcopy(locs)) - locs.sort(key=lambda loc: loc['metadata']['idx']) - # The result will ordered by preferred store type order. - self.assertEqual(locs, ordered_locs) diff --git a/glance/tests/unit/common/test_property_utils.py b/glance/tests/unit/common/test_property_utils.py deleted file mode 100644 index f526b0bf..00000000 --- a/glance/tests/unit/common/test_property_utils.py +++ /dev/null @@ -1,491 +0,0 @@ -# Copyright 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.api import policy -from glance.common import exception -from glance.common import property_utils -import glance.context -from glance.tests.unit import base - -CONFIG_SECTIONS = [ - '^x_owner_.*', - 'spl_create_prop', - 'spl_read_prop', - 'spl_read_only_prop', - 'spl_update_prop', - 'spl_update_only_prop', - 'spl_delete_prop', - 'spl_delete_empty_prop', - '^x_all_permitted.*', - '^x_none_permitted.*', - 'x_none_read', - 'x_none_update', - 'x_none_delete', - 'x_case_insensitive', - 'x_foo_matcher', - 'x_foo_*', - '.*' -] - - -def create_context(policy, roles=None): - if roles is None: - roles = [] - return glance.context.RequestContext(roles=roles, - policy_enforcer=policy) - - -class TestPropertyRulesWithRoles(base.IsolatedUnitTest): - - def setUp(self): - super(TestPropertyRulesWithRoles, self).setUp() - self.set_property_protections() - self.policy = policy.Enforcer() - - def test_is_property_protections_enabled_true(self): - self.config(property_protection_file="property-protections.conf") - self.assertTrue(property_utils.is_property_protection_enabled()) - - def test_is_property_protections_enabled_false(self): - self.config(property_protection_file=None) - self.assertFalse(property_utils.is_property_protection_enabled()) - - def test_property_protection_file_doesnt_exist(self): - self.config(property_protection_file='fake-file.conf') - self.assertRaises(exception.InvalidPropertyProtectionConfiguration, - property_utils.PropertyRules) - - def test_property_protection_with_mutually_exclusive_rule(self): - exclusive_rules = {'.*': {'create': ['@', '!'], - 'read': ['fake-role'], - 'update': ['fake-role'], - 'delete': ['fake-role']}} - self.set_property_protection_rules(exclusive_rules) - self.assertRaises(exception.InvalidPropertyProtectionConfiguration, - property_utils.PropertyRules) - - def test_property_protection_with_malformed_rule(self): - malformed_rules = {'^[0-9)': {'create': ['fake-role'], - 'read': ['fake-role'], - 'update': ['fake-role'], - 'delete': ['fake-role']}} - self.set_property_protection_rules(malformed_rules) - self.assertRaises(exception.InvalidPropertyProtectionConfiguration, - property_utils.PropertyRules) - - def test_property_protection_with_missing_operation(self): - rules_with_missing_operation = {'^[0-9]': {'create': ['fake-role'], - 'update': ['fake-role'], - 'delete': ['fake-role']}} - self.set_property_protection_rules(rules_with_missing_operation) - self.assertRaises(exception.InvalidPropertyProtectionConfiguration, - property_utils.PropertyRules) - - def test_property_protection_with_misspelt_operation(self): - rules_with_misspelt_operation = {'^[0-9]': {'create': ['fake-role'], - 'rade': ['fake-role'], - 'update': ['fake-role'], - 'delete': ['fake-role']}} - self.set_property_protection_rules(rules_with_misspelt_operation) - self.assertRaises(exception.InvalidPropertyProtectionConfiguration, - property_utils.PropertyRules) - - def test_property_protection_with_whitespace(self): - rules_whitespace = { - '^test_prop.*': { - 'create': ['member ,fake-role'], - 'read': ['fake-role, member'], - 'update': ['fake-role, member'], - 'delete': ['fake-role, member'] - } - } - self.set_property_protection_rules(rules_whitespace) - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules('test_prop_1', - 'read', create_context(self.policy, ['member']))) - self.assertTrue(self.rules_checker.check_property_rules('test_prop_1', - 'read', create_context(self.policy, ['fake-role']))) - - def test_check_property_rules_invalid_action(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertFalse(self.rules_checker.check_property_rules('test_prop', - 'hall', create_context(self.policy, ['admin']))) - - def test_check_property_rules_read_permitted_admin_role(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertTrue(self.rules_checker.check_property_rules('test_prop', - 'read', create_context(self.policy, ['admin']))) - - def test_check_property_rules_read_permitted_specific_role(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_owner_prop', 'read', - create_context(self.policy, ['member']))) - - def test_check_property_rules_read_unpermitted_role(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertFalse(self.rules_checker.check_property_rules('test_prop', - 'read', create_context(self.policy, ['member']))) - - def test_check_property_rules_create_permitted_admin_role(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertTrue(self.rules_checker.check_property_rules('test_prop', - 'create', create_context(self.policy, ['admin']))) - - def test_check_property_rules_create_permitted_specific_role(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_owner_prop', 'create', - create_context(self.policy, ['member']))) - - def test_check_property_rules_create_unpermitted_role(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertFalse(self.rules_checker.check_property_rules('test_prop', - 'create', create_context(self.policy, ['member']))) - - def test_check_property_rules_update_permitted_admin_role(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertTrue(self.rules_checker.check_property_rules('test_prop', - 'update', create_context(self.policy, ['admin']))) - - def test_check_property_rules_update_permitted_specific_role(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_owner_prop', 'update', - create_context(self.policy, ['member']))) - - def test_check_property_rules_update_unpermitted_role(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertFalse(self.rules_checker.check_property_rules('test_prop', - 'update', create_context(self.policy, ['member']))) - - def test_check_property_rules_delete_permitted_admin_role(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertTrue(self.rules_checker.check_property_rules('test_prop', - 'delete', create_context(self.policy, ['admin']))) - - def test_check_property_rules_delete_permitted_specific_role(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_owner_prop', 'delete', - create_context(self.policy, ['member']))) - - def test_check_property_rules_delete_unpermitted_role(self): - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertFalse(self.rules_checker.check_property_rules('test_prop', - 'delete', create_context(self.policy, ['member']))) - - def test_property_config_loaded_in_order(self): - """ - Verify the order of loaded config sections matches that from the - configuration file - """ - self.rules_checker = property_utils.PropertyRules(self.policy) - self.assertEqual(CONFIG_SECTIONS, property_utils.CONFIG.sections()) - - def test_property_rules_loaded_in_order(self): - """ - Verify rules are iterable in the same order as read from the config - file - """ - self.rules_checker = property_utils.PropertyRules(self.policy) - for i in range(len(property_utils.CONFIG.sections())): - self.assertEqual(property_utils.CONFIG.sections()[i], - self.rules_checker.rules[i][0].pattern) - - def test_check_property_rules_create_all_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_all_permitted', 'create', create_context(self.policy, ['']))) - - def test_check_property_rules_read_all_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_all_permitted', 'read', create_context(self.policy, ['']))) - - def test_check_property_rules_update_all_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_all_permitted', 'update', create_context(self.policy, ['']))) - - def test_check_property_rules_delete_all_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_all_permitted', 'delete', create_context(self.policy, ['']))) - - def test_check_property_rules_create_none_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_permitted', 'create', create_context(self.policy, ['']))) - - def test_check_property_rules_read_none_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_permitted', 'read', create_context(self.policy, ['']))) - - def test_check_property_rules_update_none_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_permitted', 'update', create_context(self.policy, ['']))) - - def test_check_property_rules_delete_none_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_permitted', 'delete', create_context(self.policy, ['']))) - - def test_check_property_rules_read_none(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_read', 'create', - create_context(self.policy, ['admin', 'member']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_read', 'read', - create_context(self.policy, ['']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_read', 'update', - create_context(self.policy, ['']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_read', 'delete', - create_context(self.policy, ['']))) - - def test_check_property_rules_update_none(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_update', 'create', - create_context(self.policy, ['admin', 'member']))) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_update', 'read', - create_context(self.policy, ['admin', 'member']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_update', 'update', - create_context(self.policy, ['']))) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_update', 'delete', - create_context(self.policy, ['admin', 'member']))) - - def test_check_property_rules_delete_none(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_delete', 'create', - create_context(self.policy, ['admin', 'member']))) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_delete', 'read', - create_context(self.policy, ['admin', 'member']))) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_delete', 'update', - create_context(self.policy, ['admin', 'member']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_delete', 'delete', - create_context(self.policy, ['']))) - - def test_check_return_first_match(self): - self.rules_checker = property_utils.PropertyRules() - self.assertFalse(self.rules_checker.check_property_rules( - 'x_foo_matcher', 'create', - create_context(self.policy, ['']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_foo_matcher', 'read', - create_context(self.policy, ['']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_foo_matcher', 'update', - create_context(self.policy, ['']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_foo_matcher', 'delete', - create_context(self.policy, ['']))) - - def test_check_case_insensitive_property_rules(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_case_insensitive', 'create', - create_context(self.policy, ['member']))) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_case_insensitive', 'read', - create_context(self.policy, ['member']))) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_case_insensitive', 'update', - create_context(self.policy, ['member']))) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_case_insensitive', 'delete', - create_context(self.policy, ['member']))) - - -class TestPropertyRulesWithPolicies(base.IsolatedUnitTest): - - def setUp(self): - super(TestPropertyRulesWithPolicies, self).setUp() - self.set_property_protections(use_policies=True) - self.policy = policy.Enforcer() - self.rules_checker = property_utils.PropertyRules(self.policy) - - def test_check_property_rules_create_permitted_specific_policy(self): - self.assertTrue(self.rules_checker.check_property_rules( - 'spl_creator_policy', 'create', - create_context(self.policy, ['spl_role']))) - - def test_check_property_rules_create_unpermitted_policy(self): - self.assertFalse(self.rules_checker.check_property_rules( - 'spl_creator_policy', 'create', - create_context(self.policy, ['fake-role']))) - - def test_check_property_rules_read_permitted_specific_policy(self): - self.assertTrue(self.rules_checker.check_property_rules( - 'spl_creator_policy', 'read', - create_context(self.policy, ['spl_role']))) - - def test_check_property_rules_read_unpermitted_policy(self): - self.assertFalse(self.rules_checker.check_property_rules( - 'spl_creator_policy', 'read', - create_context(self.policy, ['fake-role']))) - - def test_check_property_rules_update_permitted_specific_policy(self): - self.assertTrue(self.rules_checker.check_property_rules( - 'spl_creator_policy', 'update', - create_context(self.policy, ['admin']))) - - def test_check_property_rules_update_unpermitted_policy(self): - self.assertFalse(self.rules_checker.check_property_rules( - 'spl_creator_policy', 'update', - create_context(self.policy, ['fake-role']))) - - def test_check_property_rules_delete_permitted_specific_policy(self): - self.assertTrue(self.rules_checker.check_property_rules( - 'spl_creator_policy', 'delete', - create_context(self.policy, ['admin']))) - - def test_check_property_rules_delete_unpermitted_policy(self): - self.assertFalse(self.rules_checker.check_property_rules( - 'spl_creator_policy', 'delete', - create_context(self.policy, ['fake-role']))) - - def test_property_protection_with_malformed_rule(self): - malformed_rules = {'^[0-9)': {'create': ['fake-policy'], - 'read': ['fake-policy'], - 'update': ['fake-policy'], - 'delete': ['fake-policy']}} - self.set_property_protection_rules(malformed_rules) - self.assertRaises(exception.InvalidPropertyProtectionConfiguration, - property_utils.PropertyRules) - - def test_property_protection_with_multiple_policies(self): - malformed_rules = {'^x_.*': {'create': ['fake-policy, another_pol'], - 'read': ['fake-policy'], - 'update': ['fake-policy'], - 'delete': ['fake-policy']}} - self.set_property_protection_rules(malformed_rules) - self.assertRaises(exception.InvalidPropertyProtectionConfiguration, - property_utils.PropertyRules) - - def test_check_property_rules_create_all_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_all_permitted', 'create', create_context(self.policy, ['']))) - - def test_check_property_rules_read_all_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_all_permitted', 'read', create_context(self.policy, ['']))) - - def test_check_property_rules_update_all_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_all_permitted', 'update', create_context(self.policy, ['']))) - - def test_check_property_rules_delete_all_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_all_permitted', 'delete', create_context(self.policy, ['']))) - - def test_check_property_rules_create_none_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_permitted', 'create', create_context(self.policy, ['']))) - - def test_check_property_rules_read_none_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_permitted', 'read', create_context(self.policy, ['']))) - - def test_check_property_rules_update_none_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_permitted', 'update', create_context(self.policy, ['']))) - - def test_check_property_rules_delete_none_permitted(self): - self.rules_checker = property_utils.PropertyRules() - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_permitted', 'delete', create_context(self.policy, ['']))) - - def test_check_property_rules_read_none(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_read', 'create', - create_context(self.policy, ['admin', 'member']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_read', 'read', - create_context(self.policy, ['']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_read', 'update', - create_context(self.policy, ['']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_read', 'delete', - create_context(self.policy, ['']))) - - def test_check_property_rules_update_none(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_update', 'create', - create_context(self.policy, ['admin', 'member']))) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_update', 'read', - create_context(self.policy, ['admin', 'member']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_update', 'update', - create_context(self.policy, ['']))) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_update', 'delete', - create_context(self.policy, ['admin', 'member']))) - - def test_check_property_rules_delete_none(self): - self.rules_checker = property_utils.PropertyRules() - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_delete', 'create', - create_context(self.policy, ['admin', 'member']))) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_delete', 'read', - create_context(self.policy, ['admin', 'member']))) - self.assertTrue(self.rules_checker.check_property_rules( - 'x_none_delete', 'update', - create_context(self.policy, ['admin', 'member']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_none_delete', 'delete', - create_context(self.policy, ['']))) - - def test_check_return_first_match(self): - self.rules_checker = property_utils.PropertyRules() - self.assertFalse(self.rules_checker.check_property_rules( - 'x_foo_matcher', 'create', - create_context(self.policy, ['']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_foo_matcher', 'read', - create_context(self.policy, ['']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_foo_matcher', 'update', - create_context(self.policy, ['']))) - self.assertFalse(self.rules_checker.check_property_rules( - 'x_foo_matcher', 'delete', - create_context(self.policy, ['']))) diff --git a/glance/tests/unit/common/test_rpc.py b/glance/tests/unit/common/test_rpc.py deleted file mode 100644 index 81f57aa3..00000000 --- a/glance/tests/unit/common/test_rpc.py +++ /dev/null @@ -1,358 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime - -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import routes -import six -from six.moves import http_client as http -import webob - -from glance.common import exception -from glance.common import rpc -from glance.common import wsgi -from glance.tests.unit import base -from glance.tests import utils as test_utils - - -class FakeResource(object): - """ - Fake resource defining some methods that - will be called later by the api. - """ - - def get_images(self, context, keyword=None): - return keyword - - def count_images(self, context, images): - return len(images) - - def get_all_images(self, context): - return False - - def raise_value_error(self, context): - raise ValueError("Yep, Just like that!") - - def raise_weird_error(self, context): - class WeirdError(Exception): - pass - raise WeirdError("Weirdness") - - -def create_api(): - deserializer = rpc.RPCJSONDeserializer() - serializer = rpc.RPCJSONSerializer() - controller = rpc.Controller() - controller.register(FakeResource()) - res = wsgi.Resource(controller, deserializer, serializer) - - mapper = routes.Mapper() - mapper.connect("/rpc", controller=res, - conditions=dict(method=["POST"]), - action="__call__") - return test_utils.FakeAuthMiddleware(wsgi.Router(mapper), is_admin=True) - - -class TestRPCController(base.IsolatedUnitTest): - - def setUp(self): - super(TestRPCController, self).setUp() - self.res = FakeResource() - self.controller = rpc.Controller() - self.controller.register(self.res) - - def test_register(self): - res = FakeResource() - controller = rpc.Controller() - controller.register(res) - self.assertIn("get_images", controller._registered) - self.assertIn("get_all_images", controller._registered) - - def test_reigster_filtered(self): - res = FakeResource() - controller = rpc.Controller() - controller.register(res, filtered=["get_all_images"]) - self.assertIn("get_all_images", controller._registered) - - def test_reigster_excluded(self): - res = FakeResource() - controller = rpc.Controller() - controller.register(res, excluded=["get_all_images"]) - self.assertIn("get_images", controller._registered) - - def test_reigster_refiner(self): - res = FakeResource() - controller = rpc.Controller() - - # Not callable - self.assertRaises(TypeError, - controller.register, - res, refiner="get_all_images") - - # Filter returns False - controller.register(res, refiner=lambda x: False) - self.assertNotIn("get_images", controller._registered) - self.assertNotIn("get_images", controller._registered) - - # Filter returns True - controller.register(res, refiner=lambda x: True) - self.assertIn("get_images", controller._registered) - self.assertIn("get_images", controller._registered) - - def test_request(self): - api = create_api() - req = webob.Request.blank('/rpc') - req.method = 'POST' - req.body = jsonutils.dump_as_bytes([ - { - "command": "get_images", - "kwargs": {"keyword": 1} - } - ]) - res = req.get_response(api) - returned = jsonutils.loads(res.body) - self.assertIsInstance(returned, list) - self.assertEqual(1, returned[0]) - - def test_request_exc(self): - api = create_api() - req = webob.Request.blank('/rpc') - req.method = 'POST' - req.body = jsonutils.dump_as_bytes([ - { - "command": "get_all_images", - "kwargs": {"keyword": 1} - } - ]) - - # Sending non-accepted keyword - # to get_all_images method - res = req.get_response(api) - returned = jsonutils.loads(res.body) - self.assertIn("_error", returned[0]) - - def test_rpc_errors(self): - api = create_api() - req = webob.Request.blank('/rpc') - req.method = 'POST' - req.content_type = 'application/json' - - # Body is not a list, it should fail - req.body = jsonutils.dump_as_bytes({}) - res = req.get_response(api) - self.assertEqual(http.BAD_REQUEST, res.status_int) - - # cmd is not dict, it should fail. - req.body = jsonutils.dump_as_bytes([None]) - res = req.get_response(api) - self.assertEqual(http.BAD_REQUEST, res.status_int) - - # No command key, it should fail. - req.body = jsonutils.dump_as_bytes([{}]) - res = req.get_response(api) - self.assertEqual(http.BAD_REQUEST, res.status_int) - - # kwargs not dict, it should fail. - req.body = jsonutils.dump_as_bytes([{"command": "test", "kwargs": 2}]) - res = req.get_response(api) - self.assertEqual(http.BAD_REQUEST, res.status_int) - - # Command does not exist, it should fail. - req.body = jsonutils.dump_as_bytes([{"command": "test"}]) - res = req.get_response(api) - self.assertEqual(http.NOT_FOUND, res.status_int) - - def test_rpc_exception_propagation(self): - api = create_api() - req = webob.Request.blank('/rpc') - req.method = 'POST' - req.content_type = 'application/json' - - req.body = jsonutils.dump_as_bytes([{"command": "raise_value_error"}]) - res = req.get_response(api) - self.assertEqual(http.OK, res.status_int) - - returned = jsonutils.loads(res.body)[0] - err_cls = 'builtins.ValueError' if six.PY3 else 'exceptions.ValueError' - self.assertEqual(err_cls, returned['_error']['cls']) - - req.body = jsonutils.dump_as_bytes([{"command": "raise_weird_error"}]) - res = req.get_response(api) - self.assertEqual(http.OK, res.status_int) - - returned = jsonutils.loads(res.body)[0] - self.assertEqual('glance.common.exception.RPCError', - returned['_error']['cls']) - - -class TestRPCClient(base.IsolatedUnitTest): - - def setUp(self): - super(TestRPCClient, self).setUp() - self.api = create_api() - self.client = rpc.RPCClient(host="http://127.0.0.1:9191") - self.client._do_request = self.fake_request - - def fake_request(self, method, url, body, headers): - req = webob.Request.blank(url.path) - body = encodeutils.to_utf8(body) - req.body = body - req.method = method - - webob_res = req.get_response(self.api) - return test_utils.FakeHTTPResponse(status=webob_res.status_int, - headers=webob_res.headers, - data=webob_res.body) - - def test_method_proxy(self): - proxy = self.client.some_method - self.assertIn("method_proxy", str(proxy)) - - def test_bulk_request(self): - commands = [{"command": "get_images", 'kwargs': {'keyword': True}}, - {"command": "get_all_images"}] - - res = self.client.bulk_request(commands) - self.assertEqual(2, len(res)) - self.assertTrue(res[0]) - self.assertFalse(res[1]) - - def test_exception_raise(self): - try: - self.client.raise_value_error() - self.fail("Exception not raised") - except ValueError as exc: - self.assertEqual("Yep, Just like that!", str(exc)) - - def test_rpc_exception(self): - try: - self.client.raise_weird_error() - self.fail("Exception not raised") - except exception.RPCError: - pass - - def test_non_str_or_dict_response(self): - rst = self.client.count_images(images=[1, 2, 3, 4]) - self.assertEqual(4, rst) - self.assertIsInstance(rst, int) - - -class TestRPCJSONSerializer(test_utils.BaseTestCase): - - def test_to_json(self): - fixture = {"key": "value"} - expected = b'{"key": "value"}' - actual = rpc.RPCJSONSerializer().to_json(fixture) - self.assertEqual(expected, actual) - - def test_to_json_with_date_format_value(self): - fixture = {"date": datetime.datetime(1900, 3, 8, 2)} - expected = {"date": {"_value": "1900-03-08T02:00:00", - "_type": "datetime"}} - actual = rpc.RPCJSONSerializer().to_json(fixture) - actual = jsonutils.loads(actual) - for k in expected['date']: - self.assertEqual(expected['date'][k], actual['date'][k]) - - def test_to_json_with_more_deep_format(self): - fixture = {"is_public": True, "name": [{"name1": "test"}]} - expected = {"is_public": True, "name": [{"name1": "test"}]} - actual = rpc.RPCJSONSerializer().to_json(fixture) - actual = wsgi.JSONResponseSerializer().to_json(fixture) - actual = jsonutils.loads(actual) - for k in expected: - self.assertEqual(expected[k], actual[k]) - - def test_default(self): - fixture = {"key": "value"} - response = webob.Response() - rpc.RPCJSONSerializer().default(response, fixture) - self.assertEqual(http.OK, response.status_int) - content_types = [h for h in response.headerlist - if h[0] == 'Content-Type'] - self.assertEqual(1, len(content_types)) - self.assertEqual('application/json', response.content_type) - self.assertEqual(b'{"key": "value"}', response.body) - - -class TestRPCJSONDeserializer(test_utils.BaseTestCase): - - def test_has_body_no_content_length(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = b'asdf' - request.headers.pop('Content-Length') - self.assertFalse(rpc.RPCJSONDeserializer().has_body(request)) - - def test_has_body_zero_content_length(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = b'asdf' - request.headers['Content-Length'] = 0 - self.assertFalse(rpc.RPCJSONDeserializer().has_body(request)) - - def test_has_body_has_content_length(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = b'asdf' - self.assertIn('Content-Length', request.headers) - self.assertTrue(rpc.RPCJSONDeserializer().has_body(request)) - - def test_no_body_no_content_length(self): - request = wsgi.Request.blank('/') - self.assertFalse(rpc.RPCJSONDeserializer().has_body(request)) - - def test_from_json(self): - fixture = '{"key": "value"}' - expected = {"key": "value"} - actual = rpc.RPCJSONDeserializer().from_json(fixture) - self.assertEqual(expected, actual) - - def test_from_json_malformed(self): - fixture = 'kjasdklfjsklajf' - self.assertRaises(webob.exc.HTTPBadRequest, - rpc.RPCJSONDeserializer().from_json, fixture) - - def test_default_no_body(self): - request = wsgi.Request.blank('/') - actual = rpc.RPCJSONDeserializer().default(request) - expected = {} - self.assertEqual(expected, actual) - - def test_default_with_body(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = b'{"key": "value"}' - actual = rpc.RPCJSONDeserializer().default(request) - expected = {"body": {"key": "value"}} - self.assertEqual(expected, actual) - - def test_has_body_has_transfer_encoding(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = b'fake_body' - request.headers['transfer-encoding'] = '' - self.assertIn('transfer-encoding', request.headers) - self.assertTrue(rpc.RPCJSONDeserializer().has_body(request)) - - def test_to_json_with_date_format_value(self): - fixture = ('{"date": {"_value": "1900-03-08T02:00:00.000000",' - '"_type": "datetime"}}') - expected = {"date": datetime.datetime(1900, 3, 8, 2)} - actual = rpc.RPCJSONDeserializer().from_json(fixture) - self.assertEqual(expected, actual) diff --git a/glance/tests/unit/common/test_scripts.py b/glance/tests/unit/common/test_scripts.py deleted file mode 100644 index 5c47ec99..00000000 --- a/glance/tests/unit/common/test_scripts.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock - -import glance.common.scripts as scripts -from glance.common.scripts.image_import import main as image_import -import glance.tests.utils as test_utils - - -class TestScripts(test_utils.BaseTestCase): - - def setUp(self): - super(TestScripts, self).setUp() - - def test_run_task(self): - task_id = mock.ANY - task_type = 'import' - context = mock.ANY - task_repo = mock.ANY - image_repo = mock.ANY - image_factory = mock.ANY - - with mock.patch.object(image_import, 'run') as mock_run: - scripts.run_task(task_id, task_type, context, task_repo, - image_repo, image_factory) - - mock_run.assert_called_once_with(task_id, context, task_repo, - image_repo, image_factory) diff --git a/glance/tests/unit/common/test_swift_store_utils.py b/glance/tests/unit/common/test_swift_store_utils.py deleted file mode 100644 index a1e965df..00000000 --- a/glance/tests/unit/common/test_swift_store_utils.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2014 Rackspace -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures - -from glance.common import exception -from glance.common import swift_store_utils -from glance.tests.unit import base - - -class TestSwiftParams(base.IsolatedUnitTest): - - def setUp(self): - super(TestSwiftParams, self).setUp() - conf_file = "glance-swift.conf" - test_dir = self.useFixture(fixtures.TempDir()).path - self.swift_config_file = self._copy_data_file(conf_file, test_dir) - self.config(swift_store_config_file=self.swift_config_file) - - def test_multiple_swift_account_enabled(self): - self.config(swift_store_config_file="glance-swift.conf") - self.assertTrue( - swift_store_utils.is_multiple_swift_store_accounts_enabled()) - - def test_multiple_swift_account_disabled(self): - self.config(swift_store_config_file=None) - self.assertFalse( - swift_store_utils.is_multiple_swift_store_accounts_enabled()) - - def test_swift_config_file_doesnt_exist(self): - self.config(swift_store_config_file='fake-file.conf') - self.assertRaises(exception.InvalidSwiftStoreConfiguration, - swift_store_utils.SwiftParams) - - def test_swift_config_uses_default_values_multiple_account_disabled(self): - default_user = 'user_default' - default_key = 'key_default' - default_auth_address = 'auth@default.com' - default_account_reference = 'ref_default' - confs = {'swift_store_config_file': None, - 'swift_store_user': default_user, - 'swift_store_key': default_key, - 'swift_store_auth_address': default_auth_address, - 'default_swift_reference': default_account_reference} - self.config(**confs) - swift_params = swift_store_utils.SwiftParams().params - self.assertEqual(1, len(swift_params.keys())) - self.assertEqual(default_user, - swift_params[default_account_reference]['user'] - ) - self.assertEqual(default_key, - swift_params[default_account_reference]['key'] - ) - self.assertEqual(default_auth_address, - swift_params[default_account_reference] - ['auth_address'] - ) - - def test_swift_store_config_validates_for_creds_auth_address(self): - swift_params = swift_store_utils.SwiftParams().params - self.assertEqual('tenant:user1', - swift_params['ref1']['user'] - ) - self.assertEqual('key1', - swift_params['ref1']['key'] - ) - self.assertEqual('example.com', - swift_params['ref1']['auth_address']) - self.assertEqual('user2', - swift_params['ref2']['user']) - self.assertEqual('key2', - swift_params['ref2']['key']) - self.assertEqual('http://example.com', - swift_params['ref2']['auth_address'] - ) diff --git a/glance/tests/unit/common/test_timeutils.py b/glance/tests/unit/common/test_timeutils.py deleted file mode 100644 index 3658fd6e..00000000 --- a/glance/tests/unit/common/test_timeutils.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import calendar -import datetime - -import iso8601 -import mock - -from glance.common import timeutils -from glance.tests import utils as test_utils - - -class TimeUtilsTest(test_utils.BaseTestCase): - - def setUp(self): - super(TimeUtilsTest, self).setUp() - self.skynet_self_aware_time_str = '1997-08-29T06:14:00Z' - self.skynet_self_aware_time_ms_str = '1997-08-29T06:14:00.000123Z' - self.skynet_self_aware_time = datetime.datetime(1997, 8, 29, 6, 14, 0) - self.skynet_self_aware_ms_time = datetime.datetime( - 1997, 8, 29, 6, 14, 0, 123) - self.one_minute_before = datetime.datetime(1997, 8, 29, 6, 13, 0) - self.one_minute_after = datetime.datetime(1997, 8, 29, 6, 15, 0) - self.skynet_self_aware_time_perfect_str = '1997-08-29T06:14:00.000000' - self.skynet_self_aware_time_perfect = datetime.datetime(1997, 8, 29, - 6, 14, 0) - - def test_isotime(self): - with mock.patch('datetime.datetime') as datetime_mock: - datetime_mock.utcnow.return_value = self.skynet_self_aware_time - dt = timeutils.isotime() - self.assertEqual(dt, self.skynet_self_aware_time_str) - - def test_isotimei_micro_second_precision(self): - with mock.patch('datetime.datetime') as datetime_mock: - datetime_mock.utcnow.return_value = self.skynet_self_aware_ms_time - dt = timeutils.isotime(subsecond=True) - self.assertEqual(dt, self.skynet_self_aware_time_ms_str) - - def test_parse_isotime(self): - expect = timeutils.parse_isotime(self.skynet_self_aware_time_str) - skynet_self_aware_time_utc = self.skynet_self_aware_time.replace( - tzinfo=iso8601.iso8601.UTC) - self.assertEqual(skynet_self_aware_time_utc, expect) - - def test_parse_isotime_micro_second_precision(self): - expect = timeutils.parse_isotime(self.skynet_self_aware_time_ms_str) - skynet_self_aware_time_ms_utc = self.skynet_self_aware_ms_time.replace( - tzinfo=iso8601.iso8601.UTC) - self.assertEqual(skynet_self_aware_time_ms_utc, expect) - - def test_utcnow(self): - with mock.patch('datetime.datetime') as datetime_mock: - datetime_mock.utcnow.return_value = self.skynet_self_aware_time - self.assertEqual(timeutils.utcnow(), self.skynet_self_aware_time) - - self.assertFalse(timeutils.utcnow() == self.skynet_self_aware_time) - self.assertTrue(timeutils.utcnow()) - - def test_delta_seconds(self): - before = timeutils.utcnow() - after = before + datetime.timedelta(days=7, seconds=59, - microseconds=123456) - self.assertAlmostEquals(604859.123456, - timeutils.delta_seconds(before, after)) - - def test_iso8601_from_timestamp(self): - utcnow = timeutils.utcnow() - iso = timeutils.isotime(utcnow) - ts = calendar.timegm(utcnow.timetuple()) - self.assertEqual(iso, timeutils.iso8601_from_timestamp(ts)) - - -class TestIso8601Time(test_utils.BaseTestCase): - - def _instaneous(self, timestamp, yr, mon, day, hr, minute, sec, micro): - self.assertEqual(timestamp.year, yr) - self.assertEqual(timestamp.month, mon) - self.assertEqual(timestamp.day, day) - self.assertEqual(timestamp.hour, hr) - self.assertEqual(timestamp.minute, minute) - self.assertEqual(timestamp.second, sec) - self.assertEqual(timestamp.microsecond, micro) - - def _do_test(self, time_str, yr, mon, day, hr, minute, sec, micro, shift): - DAY_SECONDS = 24 * 60 * 60 - timestamp = timeutils.parse_isotime(time_str) - self._instaneous(timestamp, yr, mon, day, hr, minute, sec, micro) - offset = timestamp.tzinfo.utcoffset(None) - self.assertEqual(offset.seconds + offset.days * DAY_SECONDS, shift) - - def test_zulu(self): - time_str = '2012-02-14T20:53:07Z' - self._do_test(time_str, 2012, 2, 14, 20, 53, 7, 0, 0) - - def test_zulu_micros(self): - time_str = '2012-02-14T20:53:07.123Z' - self._do_test(time_str, 2012, 2, 14, 20, 53, 7, 123000, 0) - - def test_offset_east(self): - time_str = '2012-02-14T20:53:07+04:30' - offset = 4.5 * 60 * 60 - self._do_test(time_str, 2012, 2, 14, 20, 53, 7, 0, offset) - - def test_offset_east_micros(self): - time_str = '2012-02-14T20:53:07.42+04:30' - offset = 4.5 * 60 * 60 - self._do_test(time_str, 2012, 2, 14, 20, 53, 7, 420000, offset) - - def test_offset_west(self): - time_str = '2012-02-14T20:53:07-05:30' - offset = -5.5 * 60 * 60 - self._do_test(time_str, 2012, 2, 14, 20, 53, 7, 0, offset) - - def test_offset_west_micros(self): - time_str = '2012-02-14T20:53:07.654321-05:30' - offset = -5.5 * 60 * 60 - self._do_test(time_str, 2012, 2, 14, 20, 53, 7, 654321, offset) - - def test_compare(self): - zulu = timeutils.parse_isotime('2012-02-14T20:53:07') - east = timeutils.parse_isotime('2012-02-14T20:53:07-01:00') - west = timeutils.parse_isotime('2012-02-14T20:53:07+01:00') - self.assertGreater(east, west) - self.assertGreater(east, zulu) - self.assertGreater(zulu, west) - - def test_compare_micros(self): - zulu = timeutils.parse_isotime('2012-02-14T20:53:07.6544') - east = timeutils.parse_isotime('2012-02-14T19:53:07.654321-01:00') - west = timeutils.parse_isotime('2012-02-14T21:53:07.655+01:00') - self.assertLess(east, west) - self.assertLess(east, zulu) - self.assertLess(zulu, west) - - def test_zulu_roundtrip(self): - time_str = '2012-02-14T20:53:07Z' - zulu = timeutils.parse_isotime(time_str) - self.assertEqual(zulu.tzinfo, iso8601.iso8601.UTC) - self.assertEqual(timeutils.isotime(zulu), time_str) - - def test_east_roundtrip(self): - time_str = '2012-02-14T20:53:07-07:00' - east = timeutils.parse_isotime(time_str) - self.assertEqual(east.tzinfo.tzname(None), '-07:00') - self.assertEqual(timeutils.isotime(east), time_str) - - def test_west_roundtrip(self): - time_str = '2012-02-14T20:53:07+11:30' - west = timeutils.parse_isotime(time_str) - self.assertEqual(west.tzinfo.tzname(None), '+11:30') - self.assertEqual(timeutils.isotime(west), time_str) - - def test_now_roundtrip(self): - time_str = timeutils.isotime() - now = timeutils.parse_isotime(time_str) - self.assertEqual(now.tzinfo, iso8601.iso8601.UTC) - self.assertEqual(timeutils.isotime(now), time_str) - - def test_zulu_normalize(self): - time_str = '2012-02-14T20:53:07Z' - zulu = timeutils.parse_isotime(time_str) - normed = timeutils.normalize_time(zulu) - self._instaneous(normed, 2012, 2, 14, 20, 53, 7, 0) - - def test_east_normalize(self): - time_str = '2012-02-14T20:53:07-07:00' - east = timeutils.parse_isotime(time_str) - normed = timeutils.normalize_time(east) - self._instaneous(normed, 2012, 2, 15, 3, 53, 7, 0) - - def test_west_normalize(self): - time_str = '2012-02-14T20:53:07+21:00' - west = timeutils.parse_isotime(time_str) - normed = timeutils.normalize_time(west) - self._instaneous(normed, 2012, 2, 13, 23, 53, 7, 0) - - def test_normalize_aware_to_naive(self): - dt = datetime.datetime(2011, 2, 14, 20, 53, 7) - time_str = '2011-02-14T20:53:07+21:00' - aware = timeutils.parse_isotime(time_str) - naive = timeutils.normalize_time(aware) - self.assertLess(naive, dt) - - def test_normalize_zulu_aware_to_naive(self): - dt = datetime.datetime(2011, 2, 14, 20, 53, 7) - time_str = '2011-02-14T19:53:07Z' - aware = timeutils.parse_isotime(time_str) - naive = timeutils.normalize_time(aware) - self.assertLess(naive, dt) - - def test_normalize_naive(self): - dt = datetime.datetime(2011, 2, 14, 20, 53, 7) - dtn = datetime.datetime(2011, 2, 14, 19, 53, 7) - naive = timeutils.normalize_time(dtn) - self.assertLess(naive, dt) diff --git a/glance/tests/unit/common/test_utils.py b/glance/tests/unit/common/test_utils.py deleted file mode 100644 index 34d93563..00000000 --- a/glance/tests/unit/common/test_utils.py +++ /dev/null @@ -1,498 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2015 Mirantis, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import tempfile - -import six -import webob - -from glance.common import exception -from glance.common import utils -from glance.tests import utils as test_utils - - -class TestUtils(test_utils.BaseTestCase): - """Test routines in glance.utils""" - - def test_cooperative_reader(self): - """Ensure cooperative reader class accesses all bytes of file""" - BYTES = 1024 - bytes_read = 0 - with tempfile.TemporaryFile('w+') as tmp_fd: - tmp_fd.write('*' * BYTES) - tmp_fd.seek(0) - for chunk in utils.CooperativeReader(tmp_fd): - bytes_read += len(chunk) - - self.assertEqual(BYTES, bytes_read) - - bytes_read = 0 - with tempfile.TemporaryFile('w+') as tmp_fd: - tmp_fd.write('*' * BYTES) - tmp_fd.seek(0) - reader = utils.CooperativeReader(tmp_fd) - byte = reader.read(1) - while len(byte) != 0: - bytes_read += 1 - byte = reader.read(1) - - self.assertEqual(BYTES, bytes_read) - - def test_cooperative_reader_of_iterator(self): - """Ensure cooperative reader supports iterator backends too""" - data = b'abcdefgh' - data_list = [data[i:i + 1] * 3 for i in range(len(data))] - reader = utils.CooperativeReader(data_list) - chunks = [] - while True: - chunks.append(reader.read(3)) - if chunks[-1] == b'': - break - meat = b''.join(chunks) - self.assertEqual(b'aaabbbcccdddeeefffggghhh', meat) - - def test_cooperative_reader_of_iterator_stop_iteration_err(self): - """Ensure cooperative reader supports iterator backends too""" - reader = utils.CooperativeReader([l * 3 for l in '']) - chunks = [] - while True: - chunks.append(reader.read(3)) - if chunks[-1] == b'': - break - meat = b''.join(chunks) - self.assertEqual(b'', meat) - - def _create_generator(self, chunk_size, max_iterations): - chars = b'abc' - iteration = 0 - while True: - index = iteration % len(chars) - chunk = chars[index:index + 1] * chunk_size - yield chunk - iteration += 1 - if iteration >= max_iterations: - raise StopIteration() - - def _test_reader_chunked(self, chunk_size, read_size, max_iterations=5): - generator = self._create_generator(chunk_size, max_iterations) - reader = utils.CooperativeReader(generator) - result = bytearray() - while True: - data = reader.read(read_size) - if len(data) == 0: - break - self.assertLessEqual(len(data), read_size) - result += data - expected = (b'a' * chunk_size + - b'b' * chunk_size + - b'c' * chunk_size + - b'a' * chunk_size + - b'b' * chunk_size) - self.assertEqual(expected, bytes(result)) - - def test_cooperative_reader_preserves_size_chunk_less_then_read(self): - self._test_reader_chunked(43, 101) - - def test_cooperative_reader_preserves_size_chunk_equals_read(self): - self._test_reader_chunked(1024, 1024) - - def test_cooperative_reader_preserves_size_chunk_more_then_read(self): - chunk_size = 16 * 1024 * 1024 # 16 Mb, as in remote http source - read_size = 8 * 1024 # 8k, as in httplib - self._test_reader_chunked(chunk_size, read_size) - - def test_limiting_reader(self): - """Ensure limiting reader class accesses all bytes of file""" - BYTES = 1024 - bytes_read = 0 - data = six.StringIO("*" * BYTES) - for chunk in utils.LimitingReader(data, BYTES): - bytes_read += len(chunk) - - self.assertEqual(BYTES, bytes_read) - - bytes_read = 0 - data = six.StringIO("*" * BYTES) - reader = utils.LimitingReader(data, BYTES) - byte = reader.read(1) - while len(byte) != 0: - bytes_read += 1 - byte = reader.read(1) - - self.assertEqual(BYTES, bytes_read) - - def test_limiting_reader_fails(self): - """Ensure limiting reader class throws exceptions if limit exceeded""" - BYTES = 1024 - - def _consume_all_iter(): - bytes_read = 0 - data = six.StringIO("*" * BYTES) - for chunk in utils.LimitingReader(data, BYTES - 1): - bytes_read += len(chunk) - - self.assertRaises(exception.ImageSizeLimitExceeded, _consume_all_iter) - - def _consume_all_read(): - bytes_read = 0 - data = six.StringIO("*" * BYTES) - reader = utils.LimitingReader(data, BYTES - 1) - byte = reader.read(1) - while len(byte) != 0: - bytes_read += 1 - byte = reader.read(1) - - self.assertRaises(exception.ImageSizeLimitExceeded, _consume_all_read) - - def test_get_meta_from_headers(self): - resp = webob.Response() - resp.headers = {"x-image-meta-name": 'test', - 'x-image-meta-virtual-size': 80} - result = utils.get_image_meta_from_headers(resp) - self.assertEqual({'name': 'test', 'properties': {}, - 'virtual_size': 80}, result) - - def test_get_meta_from_headers_none_virtual_size(self): - resp = webob.Response() - resp.headers = {"x-image-meta-name": 'test', - 'x-image-meta-virtual-size': 'None'} - result = utils.get_image_meta_from_headers(resp) - self.assertEqual({'name': 'test', 'properties': {}, - 'virtual_size': None}, result) - - def test_get_meta_from_headers_bad_headers(self): - resp = webob.Response() - resp.headers = {"x-image-meta-bad": 'test'} - self.assertRaises(webob.exc.HTTPBadRequest, - utils.get_image_meta_from_headers, resp) - resp.headers = {"x-image-meta-": 'test'} - self.assertRaises(webob.exc.HTTPBadRequest, - utils.get_image_meta_from_headers, resp) - resp.headers = {"x-image-meta-*": 'test'} - self.assertRaises(webob.exc.HTTPBadRequest, - utils.get_image_meta_from_headers, resp) - - def test_image_meta(self): - image_meta = {'x-image-meta-size': 'test'} - image_meta_properties = {'properties': {'test': "test"}} - actual = utils.image_meta_to_http_headers(image_meta) - actual_test2 = utils.image_meta_to_http_headers( - image_meta_properties) - self.assertEqual({'x-image-meta-x-image-meta-size': u'test'}, actual) - self.assertEqual({'x-image-meta-property-test': u'test'}, - actual_test2) - - def test_create_mashup_dict_with_different_core_custom_properties(self): - image_meta = { - 'id': 'test-123', - 'name': 'fake_image', - 'status': 'active', - 'created_at': '', - 'min_disk': '10G', - 'min_ram': '1024M', - 'protected': False, - 'locations': '', - 'checksum': 'c1234', - 'owner': '', - 'disk_format': 'raw', - 'container_format': 'bare', - 'size': '123456789', - 'virtual_size': '123456789', - 'is_public': 'public', - 'deleted': True, - 'updated_at': '', - 'properties': {'test_key': 'test_1234'}, - } - - mashup_dict = utils.create_mashup_dict(image_meta) - self.assertNotIn('properties', mashup_dict) - self.assertEqual(image_meta['properties']['test_key'], - mashup_dict['test_key']) - - def test_create_mashup_dict_with_same_core_custom_properties(self): - image_meta = { - 'id': 'test-123', - 'name': 'fake_image', - 'status': 'active', - 'created_at': '', - 'min_disk': '10G', - 'min_ram': '1024M', - 'protected': False, - 'locations': '', - 'checksum': 'c1234', - 'owner': '', - 'disk_format': 'raw', - 'container_format': 'bare', - 'size': '123456789', - 'virtual_size': '123456789', - 'is_public': 'public', - 'deleted': True, - 'updated_at': '', - 'properties': {'min_ram': '2048M'}, - } - - mashup_dict = utils.create_mashup_dict(image_meta) - self.assertNotIn('properties', mashup_dict) - self.assertNotEqual(image_meta['properties']['min_ram'], - mashup_dict['min_ram']) - self.assertEqual(image_meta['min_ram'], mashup_dict['min_ram']) - - def test_mutating(self): - class FakeContext(object): - def __init__(self): - self.read_only = False - - class Fake(object): - def __init__(self): - self.context = FakeContext() - - def fake_function(req, context): - return 'test passed' - - req = webob.Request.blank('/some_request') - result = utils.mutating(fake_function) - self.assertEqual("test passed", result(req, Fake())) - - def test_validate_key_cert_key(self): - self.config(digest_algorithm='sha256') - var_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), - '../../', 'var')) - keyfile = os.path.join(var_dir, 'privatekey.key') - certfile = os.path.join(var_dir, 'certificate.crt') - utils.validate_key_cert(keyfile, certfile) - - def test_validate_key_cert_no_private_key(self): - with tempfile.NamedTemporaryFile('w+') as tmpf: - self.assertRaises(RuntimeError, - utils.validate_key_cert, - "/not/a/file", tmpf.name) - - def test_validate_key_cert_cert_cant_read(self): - with tempfile.NamedTemporaryFile('w+') as keyf: - with tempfile.NamedTemporaryFile('w+') as certf: - os.chmod(certf.name, 0) - self.assertRaises(RuntimeError, - utils.validate_key_cert, - keyf.name, certf.name) - - def test_validate_key_cert_key_cant_read(self): - with tempfile.NamedTemporaryFile('w+') as keyf: - with tempfile.NamedTemporaryFile('w+') as certf: - os.chmod(keyf.name, 0) - self.assertRaises(RuntimeError, - utils.validate_key_cert, - keyf.name, certf.name) - - def test_invalid_digest_algorithm(self): - self.config(digest_algorithm='fake_algorithm') - var_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), - '../../', 'var')) - keyfile = os.path.join(var_dir, 'privatekey.key') - certfile = os.path.join(var_dir, 'certificate.crt') - self.assertRaises(ValueError, - utils.validate_key_cert, - keyfile, certfile) - - def test_valid_hostname(self): - valid_inputs = ['localhost', - 'glance04-a' - 'G', - '528491'] - - for input_str in valid_inputs: - self.assertTrue(utils.is_valid_hostname(input_str)) - - def test_valid_hostname_fail(self): - invalid_inputs = ['localhost.localdomain', - '192.168.0.1', - u'\u2603', - 'glance02.stack42.local'] - - for input_str in invalid_inputs: - self.assertFalse(utils.is_valid_hostname(input_str)) - - def test_valid_fqdn(self): - valid_inputs = ['localhost.localdomain', - 'glance02.stack42.local' - 'glance04-a.stack47.local', - 'img83.glance.xn--penstack-r74e.org'] - - for input_str in valid_inputs: - self.assertTrue(utils.is_valid_fqdn(input_str)) - - def test_valid_fqdn_fail(self): - invalid_inputs = ['localhost', - '192.168.0.1', - '999.88.77.6', - u'\u2603.local', - 'glance02.stack42'] - - for input_str in invalid_inputs: - self.assertFalse(utils.is_valid_fqdn(input_str)) - - def test_valid_host_port_string(self): - valid_pairs = ['10.11.12.13:80', - '172.17.17.1:65535', - '[fe80::a:b:c:d]:9990', - 'localhost:9990', - 'localhost.localdomain:9990', - 'glance02.stack42.local:1234', - 'glance04-a.stack47.local:1234', - 'img83.glance.xn--penstack-r74e.org:13080'] - - for pair_str in valid_pairs: - host, port = utils.parse_valid_host_port(pair_str) - - escaped = pair_str.startswith('[') - expected_host = '%s%s%s' % ('[' if escaped else '', host, - ']' if escaped else '') - - self.assertTrue(pair_str.startswith(expected_host)) - self.assertGreater(port, 0) - - expected_pair = '%s:%d' % (expected_host, port) - self.assertEqual(expected_pair, pair_str) - - def test_valid_host_port_string_fail(self): - invalid_pairs = ['', - '10.11.12.13', - '172.17.17.1:99999', - '290.12.52.80:5673', - 'absurd inputs happen', - u'\u2601', - u'\u2603:8080', - 'fe80::1', - '[fe80::2]', - ':5673', - '[fe80::a:b:c:d]9990', - 'fe80:a:b:c:d:e:f:1:2:3:4', - 'fe80:a:b:c:d:e:f:g', - 'fe80::1:8080', - '[fe80:a:b:c:d:e:f:g]:9090', - '[a:b:s:u:r:d]:fe80'] - - for pair in invalid_pairs: - self.assertRaises(ValueError, - utils.parse_valid_host_port, - pair) - - -class SplitFilterOpTestCase(test_utils.BaseTestCase): - - def test_less_than_operator(self): - expr = 'lt:bar' - returned = utils.split_filter_op(expr) - self.assertEqual(('lt', 'bar'), returned) - - def test_less_than_equal_operator(self): - expr = 'lte:bar' - returned = utils.split_filter_op(expr) - self.assertEqual(('lte', 'bar'), returned) - - def test_greater_than_operator(self): - expr = 'gt:bar' - returned = utils.split_filter_op(expr) - self.assertEqual(('gt', 'bar'), returned) - - def test_greater_than_equal_operator(self): - expr = 'gte:bar' - returned = utils.split_filter_op(expr) - self.assertEqual(('gte', 'bar'), returned) - - def test_not_equal_operator(self): - expr = 'neq:bar' - returned = utils.split_filter_op(expr) - self.assertEqual(('neq', 'bar'), returned) - - def test_equal_operator(self): - expr = 'eq:bar' - returned = utils.split_filter_op(expr) - self.assertEqual(('eq', 'bar'), returned) - - def test_in_operator(self): - expr = 'in:bar' - returned = utils.split_filter_op(expr) - self.assertEqual(('in', 'bar'), returned) - - def test_split_filter_value_for_quotes(self): - expr = '\"fake\\\"name\",fakename,\"fake,name\"' - returned = utils.split_filter_value_for_quotes(expr) - list_values = ['fake\\"name', 'fakename', 'fake,name'] - self.assertEqual(list_values, returned) - - def test_validate_quotes(self): - expr = '\"aaa\\\"aa\",bb,\"cc\"' - returned = utils.validate_quotes(expr) - self.assertIsNone(returned) - - invalid_expr = ['\"aa', 'ss\"', 'aa\"bb\"cc', '\"aa\"\"bb\"'] - for expr in invalid_expr: - self.assertRaises(exception.InvalidParameterValue, - utils.validate_quotes, - expr) - - def test_default_operator(self): - expr = 'bar' - returned = utils.split_filter_op(expr) - self.assertEqual(('eq', expr), returned) - - def test_default_operator_with_datetime(self): - expr = '2015-08-27T09:49:58Z' - returned = utils.split_filter_op(expr) - self.assertEqual(('eq', expr), returned) - - def test_operator_with_datetime(self): - expr = 'lt:2015-08-27T09:49:58Z' - returned = utils.split_filter_op(expr) - self.assertEqual(('lt', '2015-08-27T09:49:58Z'), returned) - - -class EvaluateFilterOpTestCase(test_utils.BaseTestCase): - - def test_less_than_operator(self): - self.assertTrue(utils.evaluate_filter_op(9, 'lt', 10)) - self.assertFalse(utils.evaluate_filter_op(10, 'lt', 10)) - self.assertFalse(utils.evaluate_filter_op(11, 'lt', 10)) - - def test_less_than_equal_operator(self): - self.assertTrue(utils.evaluate_filter_op(9, 'lte', 10)) - self.assertTrue(utils.evaluate_filter_op(10, 'lte', 10)) - self.assertFalse(utils.evaluate_filter_op(11, 'lte', 10)) - - def test_greater_than_operator(self): - self.assertFalse(utils.evaluate_filter_op(9, 'gt', 10)) - self.assertFalse(utils.evaluate_filter_op(10, 'gt', 10)) - self.assertTrue(utils.evaluate_filter_op(11, 'gt', 10)) - - def test_greater_than_equal_operator(self): - self.assertFalse(utils.evaluate_filter_op(9, 'gte', 10)) - self.assertTrue(utils.evaluate_filter_op(10, 'gte', 10)) - self.assertTrue(utils.evaluate_filter_op(11, 'gte', 10)) - - def test_not_equal_operator(self): - self.assertTrue(utils.evaluate_filter_op(9, 'neq', 10)) - self.assertFalse(utils.evaluate_filter_op(10, 'neq', 10)) - self.assertTrue(utils.evaluate_filter_op(11, 'neq', 10)) - - def test_equal_operator(self): - self.assertFalse(utils.evaluate_filter_op(9, 'eq', 10)) - self.assertTrue(utils.evaluate_filter_op(10, 'eq', 10)) - self.assertFalse(utils.evaluate_filter_op(11, 'eq', 10)) - - def test_invalid_operator(self): - self.assertRaises(exception.InvalidFilterOperatorValue, - utils.evaluate_filter_op, '10', 'bar', '8') diff --git a/glance/tests/unit/common/test_wsgi.py b/glance/tests/unit/common/test_wsgi.py deleted file mode 100644 index 38f5633d..00000000 --- a/glance/tests/unit/common/test_wsgi.py +++ /dev/null @@ -1,722 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# Copyright 2014 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import gettext -import os -import socket - -from babel import localedata -import eventlet.patcher -import fixtures -import mock -from oslo_concurrency import processutils -from oslo_serialization import jsonutils -import routes -import six -from six.moves import http_client as http -import webob - -from glance.api.v1 import router as router_v1 -from glance.api.v2 import router as router_v2 -from glance.common import exception -from glance.common import utils -from glance.common import wsgi -from glance import i18n -from glance.tests import utils as test_utils - - -class RequestTest(test_utils.BaseTestCase): - - def _set_expected_languages(self, all_locales=None, avail_locales=None): - if all_locales is None: - all_locales = [] - - # Override localedata.locale_identifiers to return some locales. - def returns_some_locales(*args, **kwargs): - return all_locales - - self.stubs.Set(localedata, 'locale_identifiers', returns_some_locales) - - # Override gettext.find to return other than None for some languages. - def fake_gettext_find(lang_id, *args, **kwargs): - found_ret = '/glance/%s/LC_MESSAGES/glance.mo' % lang_id - if avail_locales is None: - # All locales are available. - return found_ret - languages = kwargs['languages'] - if languages[0] in avail_locales: - return found_ret - return None - - self.stubs.Set(gettext, 'find', fake_gettext_find) - - def test_content_range(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Range"] = 'bytes 10-99/*' - range_ = request.get_range_from_request(120) - self.assertEqual(10, range_.start) - self.assertEqual(100, range_.stop) # non-inclusive - self.assertIsNone(range_.length) - - def test_content_range_invalid(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Range"] = 'bytes=0-99' - self.assertRaises(webob.exc.HTTPRequestRangeNotSatisfiable, - request.get_range_from_request, 120) - - def test_range(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Range"] = 'bytes=10-99' - range_ = request.get_range_from_request(120) - self.assertEqual(10, range_.start) - self.assertEqual(100, range_.end) # non-inclusive - - def test_range_invalid(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Range"] = 'bytes=150-' - self.assertRaises(webob.exc.HTTPRequestRangeNotSatisfiable, - request.get_range_from_request, 120) - - def test_content_type_missing(self): - request = wsgi.Request.blank('/tests/123') - self.assertRaises(exception.InvalidContentType, - request.get_content_type, ('application/xml',)) - - def test_content_type_unsupported(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Type"] = "text/html" - self.assertRaises(exception.InvalidContentType, - request.get_content_type, ('application/xml',)) - - def test_content_type_with_charset(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Type"] = "application/json; charset=UTF-8" - result = request.get_content_type(('application/json',)) - self.assertEqual("application/json", result) - - def test_content_type_from_accept_xml(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml" - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_content_type_from_accept_json(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/json" - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_content_type_from_accept_xml_json(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml, application/json" - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_content_type_from_accept_json_xml_quality(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = ("application/json; q=0.3, " - "application/xml; q=0.9") - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_content_type_accept_default(self): - request = wsgi.Request.blank('/tests/123.unsupported') - request.headers["Accept"] = "application/unsupported1" - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_language_accept_default(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept-Language"] = "zz-ZZ,zz;q=0.8" - result = request.best_match_language() - self.assertIsNone(result) - - def test_language_accept_none(self): - request = wsgi.Request.blank('/tests/123') - result = request.best_match_language() - self.assertIsNone(result) - - def test_best_match_language_expected(self): - # If Accept-Language is a supported language, best_match_language() - # returns it. - self._set_expected_languages(all_locales=['it']) - - req = wsgi.Request.blank('/', headers={'Accept-Language': 'it'}) - self.assertEqual('it', req.best_match_language()) - - def test_request_match_language_unexpected(self): - # If Accept-Language is a language we do not support, - # best_match_language() returns None. - self._set_expected_languages(all_locales=['it']) - - req = wsgi.Request.blank('/', headers={'Accept-Language': 'unknown'}) - self.assertIsNone(req.best_match_language()) - - @mock.patch.object(webob.acceptparse.AcceptLanguage, 'best_match') - def test_best_match_language_unknown(self, mock_best_match): - # Test that we are actually invoking language negotiation by webop - request = wsgi.Request.blank('/') - accepted = 'unknown-lang' - request.headers = {'Accept-Language': accepted} - - mock_best_match.return_value = None - - self.assertIsNone(request.best_match_language()) - - # If Accept-Language is missing or empty, match should be None - request.headers = {'Accept-Language': ''} - self.assertIsNone(request.best_match_language()) - request.headers.pop('Accept-Language') - self.assertIsNone(request.best_match_language()) - - def test_http_error_response_codes(self): - sample_id, member_id, tag_val, task_id = 'abc', '123', '1', '2' - - """Makes sure v1 unallowed methods return 405""" - unallowed_methods = [ - ('/images', ['PUT', 'DELETE', 'HEAD', 'PATCH']), - ('/images/detail', ['POST', 'PUT', 'DELETE', 'PATCH']), - ('/images/%s' % sample_id, ['POST', 'PATCH']), - ('/images/%s/members' % sample_id, - ['POST', 'DELETE', 'HEAD', 'PATCH']), - ('/images/%s/members/%s' % (sample_id, member_id), - ['POST', 'HEAD', 'PATCH']), - ] - api = test_utils.FakeAuthMiddleware(router_v1.API(routes.Mapper())) - for uri, methods in unallowed_methods: - for method in methods: - req = webob.Request.blank(uri) - req.method = method - res = req.get_response(api) - self.assertEqual(http.METHOD_NOT_ALLOWED, res.status_int) - - """Makes sure v2 unallowed methods return 405""" - unallowed_methods = [ - ('/schemas/image', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), - ('/schemas/images', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), - ('/schemas/member', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), - ('/schemas/members', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), - ('/schemas/task', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), - ('/schemas/tasks', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), - ('/images', ['PUT', 'DELETE', 'PATCH', 'HEAD']), - ('/images/%s' % sample_id, ['POST', 'PUT', 'HEAD']), - ('/images/%s/file' % sample_id, - ['POST', 'DELETE', 'PATCH', 'HEAD']), - ('/images/%s/tags/%s' % (sample_id, tag_val), - ['GET', 'POST', 'PATCH', 'HEAD']), - ('/images/%s/members' % sample_id, - ['PUT', 'DELETE', 'PATCH', 'HEAD']), - ('/images/%s/members/%s' % (sample_id, member_id), - ['POST', 'PATCH', 'HEAD']), - ('/tasks', ['PUT', 'DELETE', 'PATCH', 'HEAD']), - ('/tasks/%s' % task_id, ['POST', 'PUT', 'PATCH', 'HEAD']), - ] - api = test_utils.FakeAuthMiddleware(router_v2.API(routes.Mapper())) - for uri, methods in unallowed_methods: - for method in methods: - req = webob.Request.blank(uri) - req.method = method - res = req.get_response(api) - self.assertEqual(http.METHOD_NOT_ALLOWED, res.status_int) - - # Makes sure not implemented methods return 405 - req = webob.Request.blank('/schemas/image') - req.method = 'NonexistentMethod' - res = req.get_response(api) - self.assertEqual(http.METHOD_NOT_ALLOWED, res.status_int) - - -class ResourceTest(test_utils.BaseTestCase): - - def test_get_action_args(self): - env = { - 'wsgiorg.routing_args': [ - None, - { - 'controller': None, - 'format': None, - 'action': 'update', - 'id': 12, - }, - ], - } - - expected = {'action': 'update', 'id': 12} - actual = wsgi.Resource(None, None, None).get_action_args(env) - - self.assertEqual(expected, actual) - - def test_get_action_args_invalid_index(self): - env = {'wsgiorg.routing_args': []} - expected = {} - actual = wsgi.Resource(None, None, None).get_action_args(env) - self.assertEqual(expected, actual) - - def test_get_action_args_del_controller_error(self): - actions = {'format': None, - 'action': 'update', - 'id': 12} - env = {'wsgiorg.routing_args': [None, actions]} - expected = {'action': 'update', 'id': 12} - actual = wsgi.Resource(None, None, None).get_action_args(env) - self.assertEqual(expected, actual) - - def test_get_action_args_del_format_error(self): - actions = {'action': 'update', 'id': 12} - env = {'wsgiorg.routing_args': [None, actions]} - expected = {'action': 'update', 'id': 12} - actual = wsgi.Resource(None, None, None).get_action_args(env) - self.assertEqual(expected, actual) - - def test_dispatch(self): - class Controller(object): - def index(self, shirt, pants=None): - return (shirt, pants) - - resource = wsgi.Resource(None, None, None) - actual = resource.dispatch(Controller(), 'index', 'on', pants='off') - expected = ('on', 'off') - self.assertEqual(expected, actual) - - def test_dispatch_default(self): - class Controller(object): - def default(self, shirt, pants=None): - return (shirt, pants) - - resource = wsgi.Resource(None, None, None) - actual = resource.dispatch(Controller(), 'index', 'on', pants='off') - expected = ('on', 'off') - self.assertEqual(expected, actual) - - def test_dispatch_no_default(self): - class Controller(object): - def show(self, shirt, pants=None): - return (shirt, pants) - - resource = wsgi.Resource(None, None, None) - self.assertRaises(AttributeError, resource.dispatch, Controller(), - 'index', 'on', pants='off') - - def test_call(self): - class FakeController(object): - def index(self, shirt, pants=None): - return (shirt, pants) - - resource = wsgi.Resource(FakeController(), None, None) - - def dispatch(self, obj, action, *args, **kwargs): - if isinstance(obj, wsgi.JSONRequestDeserializer): - return [] - if isinstance(obj, wsgi.JSONResponseSerializer): - raise webob.exc.HTTPForbidden() - - self.stubs.Set(wsgi.Resource, 'dispatch', dispatch) - - request = wsgi.Request.blank('/') - - response = resource.__call__(request) - - self.assertIsInstance(response, webob.exc.HTTPForbidden) - self.assertEqual(http.FORBIDDEN, response.status_code) - - def test_call_raises_exception(self): - class FakeController(object): - def index(self, shirt, pants=None): - return (shirt, pants) - - resource = wsgi.Resource(FakeController(), None, None) - - def dispatch(self, obj, action, *args, **kwargs): - raise Exception("test exception") - - self.stubs.Set(wsgi.Resource, 'dispatch', dispatch) - - request = wsgi.Request.blank('/') - - response = resource.__call__(request) - - self.assertIsInstance(response, webob.exc.HTTPInternalServerError) - self.assertEqual(http.INTERNAL_SERVER_ERROR, response.status_code) - - @mock.patch.object(wsgi, 'translate_exception') - def test_resource_call_error_handle_localized(self, - mock_translate_exception): - class Controller(object): - def delete(self, req, identity): - raise webob.exc.HTTPBadRequest(explanation='Not Found') - - actions = {'action': 'delete', 'identity': 12} - env = {'wsgiorg.routing_args': [None, actions]} - request = wsgi.Request.blank('/tests/123', environ=env) - message_es = 'No Encontrado' - - resource = wsgi.Resource(Controller(), - wsgi.JSONRequestDeserializer(), - None) - translated_exc = webob.exc.HTTPBadRequest(message_es) - mock_translate_exception.return_value = translated_exc - - e = self.assertRaises(webob.exc.HTTPBadRequest, - resource, request) - self.assertEqual(message_es, str(e)) - - @mock.patch.object(webob.acceptparse.AcceptLanguage, 'best_match') - @mock.patch.object(i18n, 'translate') - def test_translate_exception(self, mock_translate, mock_best_match): - - mock_translate.return_value = 'No Encontrado' - mock_best_match.return_value = 'de' - - req = wsgi.Request.blank('/tests/123') - req.headers["Accept-Language"] = "de" - - e = webob.exc.HTTPNotFound(explanation='Not Found') - e = wsgi.translate_exception(req, e) - self.assertEqual('No Encontrado', e.explanation) - - def test_response_headers_encoded(self): - # prepare environment - for_openstack_comrades = \ - u'\u0417\u0430 \u043e\u043f\u0435\u043d\u0441\u0442\u0435\u043a, ' \ - u'\u0442\u043e\u0432\u0430\u0440\u0438\u0449\u0438' - - class FakeController(object): - def index(self, shirt, pants=None): - return (shirt, pants) - - class FakeSerializer(object): - def index(self, response, result): - response.headers['unicode_test'] = for_openstack_comrades - - # make request - resource = wsgi.Resource(FakeController(), None, FakeSerializer()) - actions = {'action': 'index'} - env = {'wsgiorg.routing_args': [None, actions]} - request = wsgi.Request.blank('/tests/123', environ=env) - response = resource.__call__(request) - - # ensure it has been encoded correctly - value = (response.headers['unicode_test'].decode('utf-8') - if six.PY2 else response.headers['unicode_test']) - self.assertEqual(for_openstack_comrades, value) - - -class JSONResponseSerializerTest(test_utils.BaseTestCase): - - def test_to_json(self): - fixture = {"key": "value"} - expected = b'{"key": "value"}' - actual = wsgi.JSONResponseSerializer().to_json(fixture) - self.assertEqual(expected, actual) - - def test_to_json_with_date_format_value(self): - fixture = {"date": datetime.datetime(1901, 3, 8, 2)} - expected = b'{"date": "1901-03-08T02:00:00.000000"}' - actual = wsgi.JSONResponseSerializer().to_json(fixture) - self.assertEqual(expected, actual) - - def test_to_json_with_more_deep_format(self): - fixture = {"is_public": True, "name": [{"name1": "test"}]} - expected = {"is_public": True, "name": [{"name1": "test"}]} - actual = wsgi.JSONResponseSerializer().to_json(fixture) - actual = jsonutils.loads(actual) - for k in expected: - self.assertEqual(expected[k], actual[k]) - - def test_to_json_with_set(self): - fixture = set(["foo"]) - expected = b'["foo"]' - actual = wsgi.JSONResponseSerializer().to_json(fixture) - self.assertEqual(expected, actual) - - def test_default(self): - fixture = {"key": "value"} - response = webob.Response() - wsgi.JSONResponseSerializer().default(response, fixture) - self.assertEqual(http.OK, response.status_int) - content_types = [h for h in response.headerlist - if h[0] == 'Content-Type'] - self.assertEqual(1, len(content_types)) - self.assertEqual('application/json', response.content_type) - self.assertEqual(b'{"key": "value"}', response.body) - - -class JSONRequestDeserializerTest(test_utils.BaseTestCase): - - def test_has_body_no_content_length(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = b'asdf' - request.headers.pop('Content-Length') - self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request)) - - def test_has_body_zero_content_length(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = b'asdf' - request.headers['Content-Length'] = 0 - self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request)) - - def test_has_body_has_content_length(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = b'asdf' - self.assertIn('Content-Length', request.headers) - self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request)) - - def test_no_body_no_content_length(self): - request = wsgi.Request.blank('/') - self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request)) - - def test_from_json(self): - fixture = '{"key": "value"}' - expected = {"key": "value"} - actual = wsgi.JSONRequestDeserializer().from_json(fixture) - self.assertEqual(expected, actual) - - def test_from_json_malformed(self): - fixture = 'kjasdklfjsklajf' - self.assertRaises(webob.exc.HTTPBadRequest, - wsgi.JSONRequestDeserializer().from_json, fixture) - - def test_default_no_body(self): - request = wsgi.Request.blank('/') - actual = wsgi.JSONRequestDeserializer().default(request) - expected = {} - self.assertEqual(expected, actual) - - def test_default_with_body(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = b'{"key": "value"}' - actual = wsgi.JSONRequestDeserializer().default(request) - expected = {"body": {"key": "value"}} - self.assertEqual(expected, actual) - - def test_has_body_has_transfer_encoding(self): - self.assertTrue(self._check_transfer_encoding( - transfer_encoding='chunked')) - - def test_has_body_multiple_transfer_encoding(self): - self.assertTrue(self._check_transfer_encoding( - transfer_encoding='chunked, gzip')) - - def test_has_body_invalid_transfer_encoding(self): - self.assertFalse(self._check_transfer_encoding( - transfer_encoding='invalid', content_length=0)) - - def test_has_body_invalid_transfer_encoding_no_content_len_and_body(self): - self.assertFalse(self._check_transfer_encoding( - transfer_encoding='invalid', include_body=False)) - - def test_has_body_invalid_transfer_encoding_no_content_len_but_body(self): - self.assertTrue(self._check_transfer_encoding( - transfer_encoding='invalid', include_body=True)) - - def test_has_body_invalid_transfer_encoding_with_content_length(self): - self.assertTrue(self._check_transfer_encoding( - transfer_encoding='invalid', content_length=5)) - - def test_has_body_valid_transfer_encoding_with_content_length(self): - self.assertTrue(self._check_transfer_encoding( - transfer_encoding='chunked', content_length=1)) - - def test_has_body_valid_transfer_encoding_without_content_length(self): - self.assertTrue(self._check_transfer_encoding( - transfer_encoding='chunked')) - - def _check_transfer_encoding(self, transfer_encoding=None, - content_length=None, include_body=True): - request = wsgi.Request.blank('/') - request.method = 'POST' - if include_body: - request.body = b'fake_body' - request.headers['transfer-encoding'] = transfer_encoding - if content_length is not None: - request.headers['content-length'] = content_length - - return wsgi.JSONRequestDeserializer().has_body(request) - - def test_get_bind_addr_default_value(self): - expected = ('0.0.0.0', '123456') - actual = wsgi.get_bind_addr(default_port="123456") - self.assertEqual(expected, actual) - - -class ServerTest(test_utils.BaseTestCase): - def test_create_pool(self): - """Ensure the wsgi thread pool is an eventlet.greenpool.GreenPool.""" - actual = wsgi.Server(threads=1).create_pool() - self.assertIsInstance(actual, eventlet.greenpool.GreenPool) - - @mock.patch.object(wsgi.Server, 'configure_socket') - def test_http_keepalive(self, mock_configure_socket): - self.config(http_keepalive=False) - self.config(workers=0) - - server = wsgi.Server(threads=1) - server.sock = 'fake_socket' - # mocking eventlet.wsgi server method to check it is called with - # configured 'http_keepalive' value. - with mock.patch.object(eventlet.wsgi, - 'server') as mock_server: - fake_application = "fake-application" - server.start(fake_application, 0) - server.wait() - mock_server.assert_called_once_with('fake_socket', - fake_application, - log=server._logger, - debug=False, - custom_pool=server.pool, - keepalive=False, - socket_timeout=900) - - def test_number_of_workers(self): - """Ensure the default number of workers matches num cpus.""" - def pid(): - i = 1 - while True: - i = i + 1 - yield i - - with mock.patch.object(os, 'fork') as mock_fork: - mock_fork.side_effect = pid - server = wsgi.Server() - server.configure = mock.Mock() - fake_application = "fake-application" - server.start(fake_application, None) - self.assertEqual(processutils.get_worker_count(), - len(server.children)) - - -class TestHelpers(test_utils.BaseTestCase): - - def test_headers_are_unicode(self): - """ - Verifies that the headers returned by conversion code are unicode. - - Headers are passed via http in non-testing mode, which automatically - converts them to unicode. Verifying that the method does the - conversion proves that we aren't passing data that works in tests - but will fail in production. - """ - fixture = {'name': 'fake public image', - 'is_public': True, - 'size': 19, - 'location': "file:///tmp/glance-tests/2", - 'properties': {'distro': 'Ubuntu 10.04 LTS'}} - headers = utils.image_meta_to_http_headers(fixture) - for k, v in six.iteritems(headers): - self.assertIsInstance(v, six.text_type) - - def test_data_passed_properly_through_headers(self): - """ - Verifies that data is the same after being passed through headers - """ - fixture = {'is_public': True, - 'deleted': False, - 'name': None, - 'size': 19, - 'location': "file:///tmp/glance-tests/2", - 'properties': {'distro': 'Ubuntu 10.04 LTS'}} - headers = utils.image_meta_to_http_headers(fixture) - - class FakeResponse(object): - pass - - response = FakeResponse() - response.headers = headers - result = utils.get_image_meta_from_headers(response) - for k, v in six.iteritems(fixture): - if v is not None: - self.assertEqual(v, result[k]) - else: - self.assertNotIn(k, result) - - -class GetSocketTestCase(test_utils.BaseTestCase): - - def setUp(self): - super(GetSocketTestCase, self).setUp() - self.useFixture(fixtures.MonkeyPatch( - "glance.common.wsgi.get_bind_addr", - lambda x: ('192.168.0.13', 1234))) - addr_info_list = [(2, 1, 6, '', ('192.168.0.13', 80)), - (2, 2, 17, '', ('192.168.0.13', 80)), - (2, 3, 0, '', ('192.168.0.13', 80))] - self.useFixture(fixtures.MonkeyPatch( - "glance.common.wsgi.socket.getaddrinfo", - lambda *x: addr_info_list)) - self.useFixture(fixtures.MonkeyPatch( - "glance.common.wsgi.time.time", - mock.Mock(side_effect=[0, 1, 5, 10, 20, 35]))) - self.useFixture(fixtures.MonkeyPatch( - "glance.common.wsgi.utils.validate_key_cert", - lambda *x: None)) - wsgi.CONF.cert_file = '/etc/ssl/cert' - wsgi.CONF.key_file = '/etc/ssl/key' - wsgi.CONF.ca_file = '/etc/ssl/ca_cert' - wsgi.CONF.tcp_keepidle = 600 - - def test_correct_configure_socket(self): - mock_socket = mock.Mock() - self.useFixture(fixtures.MonkeyPatch( - 'glance.common.wsgi.ssl.wrap_socket', - mock_socket)) - self.useFixture(fixtures.MonkeyPatch( - 'glance.common.wsgi.eventlet.listen', - lambda *x, **y: mock_socket)) - server = wsgi.Server() - server.default_port = 1234 - server.configure_socket() - self.assertIn(mock.call.setsockopt( - socket.SOL_SOCKET, - socket.SO_REUSEADDR, - 1), mock_socket.mock_calls) - self.assertIn(mock.call.setsockopt( - socket.SOL_SOCKET, - socket.SO_KEEPALIVE, - 1), mock_socket.mock_calls) - if hasattr(socket, 'TCP_KEEPIDLE'): - self.assertIn(mock.call().setsockopt( - socket.IPPROTO_TCP, - socket.TCP_KEEPIDLE, - wsgi.CONF.tcp_keepidle), mock_socket.mock_calls) - - def test_get_socket_without_all_ssl_reqs(self): - wsgi.CONF.key_file = None - self.assertRaises(RuntimeError, wsgi.get_socket, 1234) - - def test_get_socket_with_bind_problems(self): - self.useFixture(fixtures.MonkeyPatch( - 'glance.common.wsgi.eventlet.listen', - mock.Mock(side_effect=( - [wsgi.socket.error(socket.errno.EADDRINUSE)] * 3 + [None])))) - self.useFixture(fixtures.MonkeyPatch( - 'glance.common.wsgi.ssl.wrap_socket', - lambda *x, **y: None)) - - self.assertRaises(RuntimeError, wsgi.get_socket, 1234) - - def test_get_socket_with_unexpected_socket_errno(self): - self.useFixture(fixtures.MonkeyPatch( - 'glance.common.wsgi.eventlet.listen', - mock.Mock(side_effect=wsgi.socket.error(socket.errno.ENOMEM)))) - self.useFixture(fixtures.MonkeyPatch( - 'glance.common.wsgi.ssl.wrap_socket', - lambda *x, **y: None)) - self.assertRaises(wsgi.socket.error, wsgi.get_socket, 1234) diff --git a/glance/tests/unit/fake_rados.py b/glance/tests/unit/fake_rados.py deleted file mode 100644 index f94d4485..00000000 --- a/glance/tests/unit/fake_rados.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2013 Canonical Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class mock_rados(object): - - class ioctx(object): - def __init__(self, *args, **kwargs): - pass - - def __enter__(self, *args, **kwargs): - return self - - def __exit__(self, *args, **kwargs): - return False - - def close(self, *args, **kwargs): - pass - - class Rados(object): - - def __init__(self, *args, **kwargs): - pass - - def __enter__(self, *args, **kwargs): - return self - - def __exit__(self, *args, **kwargs): - return False - - def connect(self, *args, **kwargs): - pass - - def open_ioctx(self, *args, **kwargs): - return mock_rados.ioctx() - - def shutdown(self, *args, **kwargs): - pass - - -class mock_rbd(object): - - class ImageExists(Exception): - pass - - class ImageBusy(Exception): - pass - - class ImageNotFound(Exception): - pass - - class Image(object): - - def __init__(self, *args, **kwargs): - pass - - def __enter__(self, *args, **kwargs): - return self - - def __exit__(self, *args, **kwargs): - pass - - def create_snap(self, *args, **kwargs): - pass - - def remove_snap(self, *args, **kwargs): - pass - - def protect_snap(self, *args, **kwargs): - pass - - def unprotect_snap(self, *args, **kwargs): - pass - - def read(self, *args, **kwargs): - raise NotImplementedError() - - def write(self, *args, **kwargs): - raise NotImplementedError() - - def resize(self, *args, **kwargs): - raise NotImplementedError() - - def discard(self, offset, length): - raise NotImplementedError() - - def close(self): - pass - - def list_snaps(self): - raise NotImplementedError() - - def parent_info(self): - raise NotImplementedError() - - def size(self): - raise NotImplementedError() - - class RBD(object): - - def __init__(self, *args, **kwargs): - pass - - def __enter__(self, *args, **kwargs): - return self - - def __exit__(self, *args, **kwargs): - return False - - def create(self, *args, **kwargs): - pass - - def remove(self, *args, **kwargs): - pass - - def list(self, *args, **kwargs): - raise NotImplementedError() - - def clone(self, *args, **kwargs): - raise NotImplementedError() diff --git a/glance/tests/unit/image_cache/__init__.py b/glance/tests/unit/image_cache/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/unit/image_cache/drivers/__init__.py b/glance/tests/unit/image_cache/drivers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/unit/image_cache/drivers/test_sqlite.py b/glance/tests/unit/image_cache/drivers/test_sqlite.py deleted file mode 100644 index eb5ed7d5..00000000 --- a/glance/tests/unit/image_cache/drivers/test_sqlite.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) 2017 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for the sqlite image_cache driver. -""" - -import os - -import ddt -import mock - -from glance.image_cache.drivers import sqlite -from glance.tests import utils - - -@ddt.ddt -class TestSqlite(utils.BaseTestCase): - - @ddt.data(True, False) - def test_delete_cached_file(self, throw_not_exists): - - with mock.patch.object(os, 'unlink') as mock_unlink: - if throw_not_exists: - mock_unlink.side_effect = OSError((2, 'File not found')) - - # Should not raise an exception in all cases - sqlite.delete_cached_file('/tmp/dummy_file') diff --git a/glance/tests/unit/test_auth.py b/glance/tests/unit/test_auth.py deleted file mode 100644 index 56a62d47..00000000 --- a/glance/tests/unit/test_auth.py +++ /dev/null @@ -1,1095 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from oslotest import moxstubout -from six.moves import http_client as http -import webob - -from glance.api import authorization -from glance.common import auth -from glance.common import exception -from glance.common import timeutils -import glance.domain -from glance.tests.unit import utils as unittest_utils -from glance.tests import utils - - -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' - - -class FakeResponse(object): - """ - Simple class that masks the inconsistency between - webob.Response.status_int and httplib.Response.status - """ - def __init__(self, resp): - self.resp = resp - - def __getitem__(self, key): - return self.resp.headers.get(key) - - @property - def status(self): - return self.resp.status_int - - -class V2Token(object): - def __init__(self): - self.tok = self.base_token - - def add_service_no_type(self): - catalog = self.tok['access']['serviceCatalog'] - service_type = {"name": "glance_no_type"} - catalog.append(service_type) - service = catalog[-1] - service['endpoints'] = [self.base_endpoint] - - def add_service(self, s_type, region_list=None): - if region_list is None: - region_list = [] - - catalog = self.tok['access']['serviceCatalog'] - service_type = {"type": s_type, "name": "glance"} - catalog.append(service_type) - service = catalog[-1] - endpoint_list = [] - - if region_list == []: - endpoint_list.append(self.base_endpoint) - else: - for region in region_list: - endpoint = self.base_endpoint - endpoint['region'] = region - endpoint_list.append(endpoint) - - service['endpoints'] = endpoint_list - - @property - def token(self): - return self.tok - - @property - def base_endpoint(self): - return { - "adminURL": "http://localhost:9292", - "internalURL": "http://localhost:9292", - "publicURL": "http://localhost:9292" - } - - @property - def base_token(self): - return { - "access": { - "token": { - "expires": "2010-11-23T16:40:53.321584", - "id": "5c7f8799-2e54-43e4-851b-31f81871b6c", - "tenant": {"id": "1", "name": "tenant-ok"} - }, - "serviceCatalog": [ - ], - "user": { - "id": "2", - "roles": [{ - "tenantId": "1", - "id": "1", - "name": "Admin" - }], - "name": "joeadmin" - } - } - } - - -class TestKeystoneAuthPlugin(utils.BaseTestCase): - """Test that the Keystone auth plugin works properly""" - - def setUp(self): - super(TestKeystoneAuthPlugin, self).setUp() - mox_fixture = self.useFixture(moxstubout.MoxStubout()) - self.stubs = mox_fixture.stubs - - def test_get_plugin_from_strategy_keystone(self): - strategy = auth.get_plugin_from_strategy('keystone') - self.assertIsInstance(strategy, auth.KeystoneStrategy) - self.assertTrue(strategy.configure_via_auth) - - def test_get_plugin_from_strategy_keystone_configure_via_auth_false(self): - strategy = auth.get_plugin_from_strategy('keystone', - configure_via_auth=False) - self.assertIsInstance(strategy, auth.KeystoneStrategy) - self.assertFalse(strategy.configure_via_auth) - - def test_required_creds(self): - """ - Test that plugin created without required - credential pieces raises an exception - """ - bad_creds = [ - {}, # missing everything - { - 'username': 'user1', - 'strategy': 'keystone', - 'password': 'pass' - }, # missing auth_url - { - 'password': 'pass', - 'strategy': 'keystone', - 'auth_url': 'http://localhost/v1' - }, # missing username - { - 'username': 'user1', - 'strategy': 'keystone', - 'auth_url': 'http://localhost/v1' - }, # missing password - { - 'username': 'user1', - 'password': 'pass', - 'auth_url': 'http://localhost/v1' - }, # missing strategy - { - 'username': 'user1', - 'password': 'pass', - 'strategy': 'keystone', - 'auth_url': 'http://localhost/v2.0/' - }, # v2.0: missing tenant - { - 'username': None, - 'password': 'pass', - 'auth_url': 'http://localhost/v2.0/' - }, # None parameter - { - 'username': 'user1', - 'password': 'pass', - 'auth_url': 'http://localhost/v2.0/', - 'tenant': None - } # None tenant - ] - for creds in bad_creds: - try: - plugin = auth.KeystoneStrategy(creds) - plugin.authenticate() - self.fail("Failed to raise correct exception when supplying " - "bad credentials: %r" % creds) - except exception.MissingCredentialError: - continue # Expected - - def test_invalid_auth_url_v1(self): - """ - Test that a 400 during authenticate raises exception.AuthBadRequest - """ - def fake_do_request(*args, **kwargs): - resp = webob.Response() - resp.status = http.BAD_REQUEST - return FakeResponse(resp), "" - - self.stubs.Set(auth.KeystoneStrategy, '_do_request', fake_do_request) - - bad_creds = { - 'username': 'user1', - 'auth_url': 'http://localhost/badauthurl/', - 'password': 'pass', - 'strategy': 'keystone', - 'region': 'RegionOne' - } - - plugin = auth.KeystoneStrategy(bad_creds) - self.assertRaises(exception.AuthBadRequest, plugin.authenticate) - - def test_invalid_auth_url_v2(self): - """ - Test that a 400 during authenticate raises exception.AuthBadRequest - """ - def fake_do_request(*args, **kwargs): - resp = webob.Response() - resp.status = http.BAD_REQUEST - return FakeResponse(resp), "" - - self.stubs.Set(auth.KeystoneStrategy, '_do_request', fake_do_request) - - bad_creds = { - 'username': 'user1', - 'auth_url': 'http://localhost/badauthurl/v2.0/', - 'password': 'pass', - 'tenant': 'tenant1', - 'strategy': 'keystone', - 'region': 'RegionOne' - } - - plugin = auth.KeystoneStrategy(bad_creds) - self.assertRaises(exception.AuthBadRequest, plugin.authenticate) - - def test_v1_auth(self): - """Test v1 auth code paths""" - def fake_do_request(cls, url, method, headers=None, body=None): - if url.find("2.0") != -1: - self.fail("Invalid v1.0 token path (%s)" % url) - headers = headers or {} - - resp = webob.Response() - - if (headers.get('X-Auth-User') != 'user1' or - headers.get('X-Auth-Key') != 'pass'): - resp.status = http.UNAUTHORIZED - else: - resp.status = http.OK - resp.headers.update({"x-image-management-url": "example.com"}) - - return FakeResponse(resp), "" - - self.stubs.Set(auth.KeystoneStrategy, '_do_request', fake_do_request) - - unauthorized_creds = [ - { - 'username': 'wronguser', - 'auth_url': 'http://localhost/badauthurl/', - 'strategy': 'keystone', - 'region': 'RegionOne', - 'password': 'pass' - }, # wrong username - { - 'username': 'user1', - 'auth_url': 'http://localhost/badauthurl/', - 'strategy': 'keystone', - 'region': 'RegionOne', - 'password': 'badpass' - }, # bad password... - ] - - for creds in unauthorized_creds: - try: - plugin = auth.KeystoneStrategy(creds) - plugin.authenticate() - self.fail("Failed to raise NotAuthenticated when supplying " - "bad credentials: %r" % creds) - except exception.NotAuthenticated: - continue # Expected - - no_strategy_creds = { - 'username': 'user1', - 'auth_url': 'http://localhost/redirect/', - 'password': 'pass', - 'region': 'RegionOne' - } - - try: - plugin = auth.KeystoneStrategy(no_strategy_creds) - plugin.authenticate() - self.fail("Failed to raise MissingCredentialError when " - "supplying no strategy: %r" % no_strategy_creds) - except exception.MissingCredentialError: - pass # Expected - - good_creds = [ - { - 'username': 'user1', - 'auth_url': 'http://localhost/redirect/', - 'password': 'pass', - 'strategy': 'keystone', - 'region': 'RegionOne' - } - ] - - for creds in good_creds: - plugin = auth.KeystoneStrategy(creds) - self.assertIsNone(plugin.authenticate()) - self.assertEqual("example.com", plugin.management_url) - - # Assert it does not update management_url via auth response - for creds in good_creds: - plugin = auth.KeystoneStrategy(creds, configure_via_auth=False) - self.assertIsNone(plugin.authenticate()) - self.assertIsNone(plugin.management_url) - - def test_v2_auth(self): - """Test v2 auth code paths""" - mock_token = None - - def fake_do_request(cls, url, method, headers=None, body=None): - if (not url.rstrip('/').endswith('v2.0/tokens') or - url.count("2.0") != 1): - self.fail("Invalid v2.0 token path (%s)" % url) - - creds = jsonutils.loads(body)['auth'] - username = creds['passwordCredentials']['username'] - password = creds['passwordCredentials']['password'] - tenant = creds['tenantName'] - resp = webob.Response() - - if (username != 'user1' or password != 'pass' or - tenant != 'tenant-ok'): - resp.status = http.UNAUTHORIZED - else: - resp.status = http.OK - body = mock_token.token - - return FakeResponse(resp), jsonutils.dumps(body) - - mock_token = V2Token() - mock_token.add_service('image', ['RegionOne']) - self.stubs.Set(auth.KeystoneStrategy, '_do_request', fake_do_request) - - unauthorized_creds = [ - { - 'username': 'wronguser', - 'auth_url': 'http://localhost/v2.0', - 'password': 'pass', - 'tenant': 'tenant-ok', - 'strategy': 'keystone', - 'region': 'RegionOne' - }, # wrong username - { - 'username': 'user1', - 'auth_url': 'http://localhost/v2.0', - 'password': 'badpass', - 'tenant': 'tenant-ok', - 'strategy': 'keystone', - 'region': 'RegionOne' - }, # bad password... - { - 'username': 'user1', - 'auth_url': 'http://localhost/v2.0', - 'password': 'pass', - 'tenant': 'carterhayes', - 'strategy': 'keystone', - 'region': 'RegionOne' - }, # bad tenant... - ] - - for creds in unauthorized_creds: - try: - plugin = auth.KeystoneStrategy(creds) - plugin.authenticate() - self.fail("Failed to raise NotAuthenticated when supplying " - "bad credentials: %r" % creds) - except exception.NotAuthenticated: - continue # Expected - - no_region_creds = { - 'username': 'user1', - 'tenant': 'tenant-ok', - 'auth_url': 'http://localhost/redirect/v2.0/', - 'password': 'pass', - 'strategy': 'keystone' - } - - plugin = auth.KeystoneStrategy(no_region_creds) - self.assertIsNone(plugin.authenticate()) - self.assertEqual('http://localhost:9292', plugin.management_url) - - # Add another image service, with a different region - mock_token.add_service('image', ['RegionTwo']) - - try: - plugin = auth.KeystoneStrategy(no_region_creds) - plugin.authenticate() - self.fail("Failed to raise RegionAmbiguity when no region present " - "and multiple regions exist: %r" % no_region_creds) - except exception.RegionAmbiguity: - pass # Expected - - wrong_region_creds = { - 'username': 'user1', - 'tenant': 'tenant-ok', - 'auth_url': 'http://localhost/redirect/v2.0/', - 'password': 'pass', - 'strategy': 'keystone', - 'region': 'NonExistentRegion' - } - - try: - plugin = auth.KeystoneStrategy(wrong_region_creds) - plugin.authenticate() - self.fail("Failed to raise NoServiceEndpoint when supplying " - "wrong region: %r" % wrong_region_creds) - except exception.NoServiceEndpoint: - pass # Expected - - no_strategy_creds = { - 'username': 'user1', - 'tenant': 'tenant-ok', - 'auth_url': 'http://localhost/redirect/v2.0/', - 'password': 'pass', - 'region': 'RegionOne' - } - - try: - plugin = auth.KeystoneStrategy(no_strategy_creds) - plugin.authenticate() - self.fail("Failed to raise MissingCredentialError when " - "supplying no strategy: %r" % no_strategy_creds) - except exception.MissingCredentialError: - pass # Expected - - bad_strategy_creds = { - 'username': 'user1', - 'tenant': 'tenant-ok', - 'auth_url': 'http://localhost/redirect/v2.0/', - 'password': 'pass', - 'region': 'RegionOne', - 'strategy': 'keypebble' - } - - try: - plugin = auth.KeystoneStrategy(bad_strategy_creds) - plugin.authenticate() - self.fail("Failed to raise BadAuthStrategy when supplying " - "bad auth strategy: %r" % bad_strategy_creds) - except exception.BadAuthStrategy: - pass # Expected - - mock_token = V2Token() - mock_token.add_service('image', ['RegionOne', 'RegionTwo']) - - good_creds = [ - { - 'username': 'user1', - 'auth_url': 'http://localhost/v2.0/', - 'password': 'pass', - 'tenant': 'tenant-ok', - 'strategy': 'keystone', - 'region': 'RegionOne' - }, # auth_url with trailing '/' - { - 'username': 'user1', - 'auth_url': 'http://localhost/v2.0', - 'password': 'pass', - 'tenant': 'tenant-ok', - 'strategy': 'keystone', - 'region': 'RegionOne' - }, # auth_url without trailing '/' - { - 'username': 'user1', - 'auth_url': 'http://localhost/v2.0', - 'password': 'pass', - 'tenant': 'tenant-ok', - 'strategy': 'keystone', - 'region': 'RegionTwo' - } # Second region - ] - - for creds in good_creds: - plugin = auth.KeystoneStrategy(creds) - self.assertIsNone(plugin.authenticate()) - self.assertEqual('http://localhost:9292', plugin.management_url) - - ambiguous_region_creds = { - 'username': 'user1', - 'auth_url': 'http://localhost/v2.0/', - 'password': 'pass', - 'tenant': 'tenant-ok', - 'strategy': 'keystone', - 'region': 'RegionOne' - } - - mock_token = V2Token() - # Add two identical services - mock_token.add_service('image', ['RegionOne']) - mock_token.add_service('image', ['RegionOne']) - - try: - plugin = auth.KeystoneStrategy(ambiguous_region_creds) - plugin.authenticate() - self.fail("Failed to raise RegionAmbiguity when " - "non-unique regions exist: %r" % ambiguous_region_creds) - except exception.RegionAmbiguity: - pass - - mock_token = V2Token() - mock_token.add_service('bad-image', ['RegionOne']) - - good_creds = { - 'username': 'user1', - 'auth_url': 'http://localhost/v2.0/', - 'password': 'pass', - 'tenant': 'tenant-ok', - 'strategy': 'keystone', - 'region': 'RegionOne' - } - - try: - plugin = auth.KeystoneStrategy(good_creds) - plugin.authenticate() - self.fail("Failed to raise NoServiceEndpoint when bad service " - "type encountered") - except exception.NoServiceEndpoint: - pass - - mock_token = V2Token() - mock_token.add_service_no_type() - - try: - plugin = auth.KeystoneStrategy(good_creds) - plugin.authenticate() - self.fail("Failed to raise NoServiceEndpoint when bad service " - "type encountered") - except exception.NoServiceEndpoint: - pass - - try: - plugin = auth.KeystoneStrategy(good_creds, - configure_via_auth=False) - plugin.authenticate() - except exception.NoServiceEndpoint: - self.fail("NoServiceEndpoint was raised when authenticate " - "should not check for endpoint.") - - -class TestEndpoints(utils.BaseTestCase): - - def setUp(self): - super(TestEndpoints, self).setUp() - - self.service_catalog = [ - { - 'endpoint_links': [], - 'endpoints': [ - { - 'adminURL': 'http://localhost:8080/', - 'region': 'RegionOne', - 'internalURL': 'http://internalURL/', - 'publicURL': 'http://publicURL/', - }, - ], - 'type': 'object-store', - 'name': 'Object Storage Service', - } - ] - - def test_get_endpoint_with_custom_server_type(self): - endpoint = auth.get_endpoint(self.service_catalog, - service_type='object-store') - self.assertEqual('http://publicURL/', endpoint) - - def test_get_endpoint_with_custom_endpoint_type(self): - endpoint = auth.get_endpoint(self.service_catalog, - service_type='object-store', - endpoint_type='internalURL') - self.assertEqual('http://internalURL/', endpoint) - - def test_get_endpoint_raises_with_invalid_service_type(self): - self.assertRaises(exception.NoServiceEndpoint, - auth.get_endpoint, - self.service_catalog, - service_type='foo') - - def test_get_endpoint_raises_with_invalid_endpoint_type(self): - self.assertRaises(exception.NoServiceEndpoint, - auth.get_endpoint, - self.service_catalog, - service_type='object-store', - endpoint_type='foo') - - def test_get_endpoint_raises_with_invalid_endpoint_region(self): - self.assertRaises(exception.NoServiceEndpoint, - auth.get_endpoint, - self.service_catalog, - service_type='object-store', - endpoint_region='foo', - endpoint_type='internalURL') - - -class TestImageMutability(utils.BaseTestCase): - - def setUp(self): - super(TestImageMutability, self).setUp() - self.image_factory = glance.domain.ImageFactory() - - def _is_mutable(self, tenant, owner, is_admin=False): - context = glance.context.RequestContext(tenant=tenant, - is_admin=is_admin) - image = self.image_factory.new_image(owner=owner) - return authorization.is_image_mutable(context, image) - - def test_admin_everything_mutable(self): - self.assertTrue(self._is_mutable(None, None, is_admin=True)) - self.assertTrue(self._is_mutable(None, TENANT1, is_admin=True)) - self.assertTrue(self._is_mutable(TENANT1, None, is_admin=True)) - self.assertTrue(self._is_mutable(TENANT1, TENANT1, is_admin=True)) - self.assertTrue(self._is_mutable(TENANT1, TENANT2, is_admin=True)) - - def test_no_tenant_nothing_mutable(self): - self.assertFalse(self._is_mutable(None, None)) - self.assertFalse(self._is_mutable(None, TENANT1)) - - def test_regular_user(self): - self.assertFalse(self._is_mutable(TENANT1, None)) - self.assertFalse(self._is_mutable(TENANT1, TENANT2)) - self.assertTrue(self._is_mutable(TENANT1, TENANT1)) - - -class TestImmutableImage(utils.BaseTestCase): - def setUp(self): - super(TestImmutableImage, self).setUp() - image_factory = glance.domain.ImageFactory() - self.context = glance.context.RequestContext(tenant=TENANT1) - image = image_factory.new_image( - image_id=UUID1, - name='Marvin', - owner=TENANT1, - disk_format='raw', - container_format='bare', - extra_properties={'foo': 'bar'}, - tags=['ping', 'pong'], - ) - self.image = authorization.ImmutableImageProxy(image, self.context) - - def _test_change(self, attr, value): - self.assertRaises(exception.Forbidden, - setattr, self.image, attr, value) - self.assertRaises(exception.Forbidden, - delattr, self.image, attr) - - def test_change_id(self): - self._test_change('image_id', UUID2) - - def test_change_name(self): - self._test_change('name', 'Freddie') - - def test_change_owner(self): - self._test_change('owner', TENANT2) - - def test_change_min_disk(self): - self._test_change('min_disk', 100) - - def test_change_min_ram(self): - self._test_change('min_ram', 1024) - - def test_change_disk_format(self): - self._test_change('disk_format', 'vhd') - - def test_change_container_format(self): - self._test_change('container_format', 'ova') - - def test_change_visibility(self): - self._test_change('visibility', 'public') - - def test_change_status(self): - self._test_change('status', 'active') - - def test_change_created_at(self): - self._test_change('created_at', timeutils.utcnow()) - - def test_change_updated_at(self): - self._test_change('updated_at', timeutils.utcnow()) - - def test_change_locations(self): - self._test_change('locations', ['http://a/b/c']) - self.assertRaises(exception.Forbidden, - self.image.locations.append, 'http://a/b/c') - self.assertRaises(exception.Forbidden, - self.image.locations.extend, ['http://a/b/c']) - self.assertRaises(exception.Forbidden, - self.image.locations.insert, 'foo') - self.assertRaises(exception.Forbidden, - self.image.locations.pop) - self.assertRaises(exception.Forbidden, - self.image.locations.remove, 'foo') - self.assertRaises(exception.Forbidden, - self.image.locations.reverse) - self.assertRaises(exception.Forbidden, - self.image.locations.sort) - self.assertRaises(exception.Forbidden, - self.image.locations.__delitem__, 0) - self.assertRaises(exception.Forbidden, - self.image.locations.__delslice__, 0, 2) - self.assertRaises(exception.Forbidden, - self.image.locations.__setitem__, 0, 'foo') - self.assertRaises(exception.Forbidden, - self.image.locations.__setslice__, - 0, 2, ['foo', 'bar']) - self.assertRaises(exception.Forbidden, - self.image.locations.__iadd__, 'foo') - self.assertRaises(exception.Forbidden, - self.image.locations.__imul__, 2) - - def test_change_size(self): - self._test_change('size', 32) - - def test_change_tags(self): - self.assertRaises(exception.Forbidden, - delattr, self.image, 'tags') - self.assertRaises(exception.Forbidden, - setattr, self.image, 'tags', ['king', 'kong']) - self.assertRaises(exception.Forbidden, self.image.tags.pop) - self.assertRaises(exception.Forbidden, self.image.tags.clear) - self.assertRaises(exception.Forbidden, self.image.tags.add, 'king') - self.assertRaises(exception.Forbidden, self.image.tags.remove, 'ping') - self.assertRaises(exception.Forbidden, - self.image.tags.update, set(['king', 'kong'])) - self.assertRaises(exception.Forbidden, - self.image.tags.intersection_update, set([])) - self.assertRaises(exception.Forbidden, - self.image.tags.difference_update, set([])) - self.assertRaises(exception.Forbidden, - self.image.tags.symmetric_difference_update, - set([])) - - def test_change_properties(self): - self.assertRaises(exception.Forbidden, - delattr, self.image, 'extra_properties') - self.assertRaises(exception.Forbidden, - setattr, self.image, 'extra_properties', {}) - self.assertRaises(exception.Forbidden, - self.image.extra_properties.__delitem__, 'foo') - self.assertRaises(exception.Forbidden, - self.image.extra_properties.__setitem__, 'foo', 'b') - self.assertRaises(exception.Forbidden, - self.image.extra_properties.__setitem__, 'z', 'j') - self.assertRaises(exception.Forbidden, - self.image.extra_properties.pop) - self.assertRaises(exception.Forbidden, - self.image.extra_properties.popitem) - self.assertRaises(exception.Forbidden, - self.image.extra_properties.setdefault, 'p', 'j') - self.assertRaises(exception.Forbidden, - self.image.extra_properties.update, {}) - - def test_delete(self): - self.assertRaises(exception.Forbidden, self.image.delete) - - def test_set_data(self): - self.assertRaises(exception.Forbidden, - self.image.set_data, 'blah', 4) - - def test_deactivate_image(self): - self.assertRaises(exception.Forbidden, self.image.deactivate) - - def test_reactivate_image(self): - self.assertRaises(exception.Forbidden, self.image.reactivate) - - def test_get_data(self): - class FakeImage(object): - def get_data(self): - return 'tiddlywinks' - - image = glance.api.authorization.ImmutableImageProxy( - FakeImage(), self.context) - self.assertEqual('tiddlywinks', image.get_data()) - - -class TestImageFactoryProxy(utils.BaseTestCase): - def setUp(self): - super(TestImageFactoryProxy, self).setUp() - factory = glance.domain.ImageFactory() - self.context = glance.context.RequestContext(tenant=TENANT1) - self.image_factory = authorization.ImageFactoryProxy(factory, - self.context) - - def test_default_owner_is_set(self): - image = self.image_factory.new_image() - self.assertEqual(TENANT1, image.owner) - - def test_wrong_owner_cannot_be_set(self): - self.assertRaises(exception.Forbidden, - self.image_factory.new_image, owner=TENANT2) - - def test_cannot_set_owner_to_none(self): - self.assertRaises(exception.Forbidden, - self.image_factory.new_image, owner=None) - - def test_admin_can_set_any_owner(self): - self.context.is_admin = True - image = self.image_factory.new_image(owner=TENANT2) - self.assertEqual(TENANT2, image.owner) - - def test_admin_can_set_owner_to_none(self): - self.context.is_admin = True - image = self.image_factory.new_image(owner=None) - self.assertIsNone(image.owner) - - def test_admin_still_gets_default_tenant(self): - self.context.is_admin = True - image = self.image_factory.new_image() - self.assertEqual(TENANT1, image.owner) - - -class TestImageRepoProxy(utils.BaseTestCase): - - class ImageRepoStub(object): - def __init__(self, fixtures): - self.fixtures = fixtures - - def get(self, image_id): - for f in self.fixtures: - if f.image_id == image_id: - return f - else: - raise ValueError(image_id) - - def list(self, *args, **kwargs): - return self.fixtures - - def setUp(self): - super(TestImageRepoProxy, self).setUp() - image_factory = glance.domain.ImageFactory() - self.fixtures = [ - image_factory.new_image(owner=TENANT1), - image_factory.new_image(owner=TENANT2, visibility='public'), - image_factory.new_image(owner=TENANT2), - ] - self.context = glance.context.RequestContext(tenant=TENANT1) - image_repo = self.ImageRepoStub(self.fixtures) - self.image_repo = authorization.ImageRepoProxy(image_repo, - self.context) - - def test_get_mutable_image(self): - image = self.image_repo.get(self.fixtures[0].image_id) - self.assertEqual(image.image_id, self.fixtures[0].image_id) - - def test_get_immutable_image(self): - image = self.image_repo.get(self.fixtures[1].image_id) - self.assertRaises(exception.Forbidden, - setattr, image, 'name', 'Vince') - - def test_list(self): - images = self.image_repo.list() - self.assertEqual(images[0].image_id, self.fixtures[0].image_id) - self.assertRaises(exception.Forbidden, - setattr, images[1], 'name', 'Wally') - self.assertRaises(exception.Forbidden, - setattr, images[2], 'name', 'Calvin') - - -class TestImmutableTask(utils.BaseTestCase): - def setUp(self): - super(TestImmutableTask, self).setUp() - task_factory = glance.domain.TaskFactory() - self.context = glance.context.RequestContext(tenant=TENANT2) - task_type = 'import' - owner = TENANT2 - task = task_factory.new_task(task_type, owner) - self.task = authorization.ImmutableTaskProxy(task) - - def _test_change(self, attr, value): - self.assertRaises( - exception.Forbidden, - setattr, - self.task, - attr, - value - ) - self.assertRaises( - exception.Forbidden, - delattr, - self.task, - attr - ) - - def test_change_id(self): - self._test_change('task_id', UUID2) - - def test_change_type(self): - self._test_change('type', 'fake') - - def test_change_status(self): - self._test_change('status', 'success') - - def test_change_owner(self): - self._test_change('owner', 'fake') - - def test_change_expires_at(self): - self._test_change('expires_at', 'fake') - - def test_change_created_at(self): - self._test_change('created_at', 'fake') - - def test_change_updated_at(self): - self._test_change('updated_at', 'fake') - - def test_begin_processing(self): - self.assertRaises( - exception.Forbidden, - self.task.begin_processing - ) - - def test_succeed(self): - self.assertRaises( - exception.Forbidden, - self.task.succeed, - 'result' - ) - - def test_fail(self): - self.assertRaises( - exception.Forbidden, - self.task.fail, - 'message' - ) - - -class TestImmutableTaskStub(utils.BaseTestCase): - def setUp(self): - super(TestImmutableTaskStub, self).setUp() - task_factory = glance.domain.TaskFactory() - self.context = glance.context.RequestContext(tenant=TENANT2) - task_type = 'import' - owner = TENANT2 - task = task_factory.new_task(task_type, owner) - self.task = authorization.ImmutableTaskStubProxy(task) - - def _test_change(self, attr, value): - self.assertRaises( - exception.Forbidden, - setattr, - self.task, - attr, - value - ) - self.assertRaises( - exception.Forbidden, - delattr, - self.task, - attr - ) - - def test_change_id(self): - self._test_change('task_id', UUID2) - - def test_change_type(self): - self._test_change('type', 'fake') - - def test_change_status(self): - self._test_change('status', 'success') - - def test_change_owner(self): - self._test_change('owner', 'fake') - - def test_change_expires_at(self): - self._test_change('expires_at', 'fake') - - def test_change_created_at(self): - self._test_change('created_at', 'fake') - - def test_change_updated_at(self): - self._test_change('updated_at', 'fake') - - -class TestTaskFactoryProxy(utils.BaseTestCase): - def setUp(self): - super(TestTaskFactoryProxy, self).setUp() - factory = glance.domain.TaskFactory() - self.context = glance.context.RequestContext(tenant=TENANT1) - self.context_owner_is_none = glance.context.RequestContext() - self.task_factory = authorization.TaskFactoryProxy( - factory, - self.context - ) - self.task_type = 'import' - self.task_input = '{"loc": "fake"}' - self.owner = 'foo' - - self.request1 = unittest_utils.get_fake_request(tenant=TENANT1) - self.request2 = unittest_utils.get_fake_request(tenant=TENANT2) - - def test_task_create_default_owner(self): - owner = self.request1.context.owner - task = self.task_factory.new_task(task_type=self.task_type, - owner=owner) - self.assertEqual(TENANT1, task.owner) - - def test_task_create_wrong_owner(self): - self.assertRaises(exception.Forbidden, - self.task_factory.new_task, - task_type=self.task_type, - task_input=self.task_input, - owner=self.owner) - - def test_task_create_owner_as_None(self): - self.assertRaises(exception.Forbidden, - self.task_factory.new_task, - task_type=self.task_type, - task_input=self.task_input, - owner=None) - - def test_task_create_admin_context_owner_as_None(self): - self.context.is_admin = True - self.assertRaises(exception.Forbidden, - self.task_factory.new_task, - task_type=self.task_type, - task_input=self.task_input, - owner=None) - - -class TestTaskRepoProxy(utils.BaseTestCase): - - class TaskRepoStub(object): - def __init__(self, fixtures): - self.fixtures = fixtures - - def get(self, task_id): - for f in self.fixtures: - if f.task_id == task_id: - return f - else: - raise ValueError(task_id) - - class TaskStubRepoStub(object): - def __init__(self, fixtures): - self.fixtures = fixtures - - def list(self, *args, **kwargs): - return self.fixtures - - def setUp(self): - super(TestTaskRepoProxy, self).setUp() - task_factory = glance.domain.TaskFactory() - task_type = 'import' - owner = None - self.fixtures = [ - task_factory.new_task(task_type, owner), - task_factory.new_task(task_type, owner), - task_factory.new_task(task_type, owner), - ] - self.context = glance.context.RequestContext(tenant=TENANT1) - task_repo = self.TaskRepoStub(self.fixtures) - task_stub_repo = self.TaskStubRepoStub(self.fixtures) - self.task_repo = authorization.TaskRepoProxy( - task_repo, - self.context - ) - self.task_stub_repo = authorization.TaskStubRepoProxy( - task_stub_repo, - self.context - ) - - def test_get_mutable_task(self): - task = self.task_repo.get(self.fixtures[0].task_id) - self.assertEqual(task.task_id, self.fixtures[0].task_id) - - def test_get_immutable_task(self): - task_id = self.fixtures[1].task_id - task = self.task_repo.get(task_id) - self.assertRaises(exception.Forbidden, - setattr, task, 'input', 'foo') - - def test_list(self): - tasks = self.task_stub_repo.list() - self.assertEqual(tasks[0].task_id, self.fixtures[0].task_id) - self.assertRaises(exception.Forbidden, - setattr, - tasks[1], - 'owner', - 'foo') - self.assertRaises(exception.Forbidden, - setattr, - tasks[2], - 'owner', - 'foo') diff --git a/glance/tests/unit/test_cache_middleware.py b/glance/tests/unit/test_cache_middleware.py deleted file mode 100644 index 04b8ac7c..00000000 --- a/glance/tests/unit/test_cache_middleware.py +++ /dev/null @@ -1,865 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import http_client as http -from six.moves import range -import testtools -import webob - -import glance.api.middleware.cache -import glance.api.policy -from glance.common import exception -from glance import context -import glance.registry.client.v1.api as registry -from glance.tests.unit import base -from glance.tests.unit import utils as unit_test_utils - - -class ImageStub(object): - def __init__(self, image_id, extra_properties=None, visibility='private'): - if extra_properties is None: - extra_properties = {} - self.image_id = image_id - self.visibility = visibility - self.status = 'active' - self.extra_properties = extra_properties - self.checksum = 'c1234' - self.size = 123456789 - - -class TestCacheMiddlewareURLMatching(testtools.TestCase): - def test_v1_no_match_detail(self): - req = webob.Request.blank('/v1/images/detail') - out = glance.api.middleware.cache.CacheFilter._match_request(req) - self.assertIsNone(out) - - def test_v1_no_match_detail_with_query_params(self): - req = webob.Request.blank('/v1/images/detail?limit=10') - out = glance.api.middleware.cache.CacheFilter._match_request(req) - self.assertIsNone(out) - - def test_v1_match_id_with_query_param(self): - req = webob.Request.blank('/v1/images/asdf?ping=pong') - out = glance.api.middleware.cache.CacheFilter._match_request(req) - self.assertEqual(('v1', 'GET', 'asdf'), out) - - def test_v2_match_id(self): - req = webob.Request.blank('/v2/images/asdf/file') - out = glance.api.middleware.cache.CacheFilter._match_request(req) - self.assertEqual(('v2', 'GET', 'asdf'), out) - - def test_v2_no_match_bad_path(self): - req = webob.Request.blank('/v2/images/asdf') - out = glance.api.middleware.cache.CacheFilter._match_request(req) - self.assertIsNone(out) - - def test_no_match_unknown_version(self): - req = webob.Request.blank('/v3/images/asdf') - out = glance.api.middleware.cache.CacheFilter._match_request(req) - self.assertIsNone(out) - - -class TestCacheMiddlewareRequestStashCacheInfo(testtools.TestCase): - def setUp(self): - super(TestCacheMiddlewareRequestStashCacheInfo, self).setUp() - self.request = webob.Request.blank('') - self.middleware = glance.api.middleware.cache.CacheFilter - - def test_stash_cache_request_info(self): - self.middleware._stash_request_info(self.request, 'asdf', 'GET', 'v2') - self.assertEqual('asdf', self.request.environ['api.cache.image_id']) - self.assertEqual('GET', self.request.environ['api.cache.method']) - self.assertEqual('v2', self.request.environ['api.cache.version']) - - def test_fetch_cache_request_info(self): - self.request.environ['api.cache.image_id'] = 'asdf' - self.request.environ['api.cache.method'] = 'GET' - self.request.environ['api.cache.version'] = 'v2' - (image_id, method, version) = self.middleware._fetch_request_info( - self.request) - self.assertEqual('asdf', image_id) - self.assertEqual('GET', method) - self.assertEqual('v2', version) - - def test_fetch_cache_request_info_unset(self): - out = self.middleware._fetch_request_info(self.request) - self.assertIsNone(out) - - -class ChecksumTestCacheFilter(glance.api.middleware.cache.CacheFilter): - def __init__(self): - class DummyCache(object): - def get_caching_iter(self, image_id, image_checksum, app_iter): - self.image_checksum = image_checksum - - self.cache = DummyCache() - self.policy = unit_test_utils.FakePolicyEnforcer() - - -class TestCacheMiddlewareChecksumVerification(base.IsolatedUnitTest): - def setUp(self): - super(TestCacheMiddlewareChecksumVerification, self).setUp() - self.context = context.RequestContext(is_admin=True) - self.request = webob.Request.blank('') - self.request.context = self.context - - def test_checksum_v1_header(self): - cache_filter = ChecksumTestCacheFilter() - headers = {"x-image-meta-checksum": "1234567890"} - resp = webob.Response(request=self.request, headers=headers) - cache_filter._process_GET_response(resp, None) - - self.assertEqual("1234567890", cache_filter.cache.image_checksum) - - def test_checksum_v2_header(self): - cache_filter = ChecksumTestCacheFilter() - headers = { - "x-image-meta-checksum": "1234567890", - "Content-MD5": "abcdefghi" - } - resp = webob.Response(request=self.request, headers=headers) - cache_filter._process_GET_response(resp, None) - - self.assertEqual("abcdefghi", cache_filter.cache.image_checksum) - - def test_checksum_missing_header(self): - cache_filter = ChecksumTestCacheFilter() - resp = webob.Response(request=self.request) - cache_filter._process_GET_response(resp, None) - - self.assertIsNone(cache_filter.cache.image_checksum) - - -class FakeImageSerializer(object): - def show(self, response, raw_response): - return True - - -class ProcessRequestTestCacheFilter(glance.api.middleware.cache.CacheFilter): - def __init__(self): - self.serializer = FakeImageSerializer() - - class DummyCache(object): - def __init__(self): - self.deleted_images = [] - - def is_cached(self, image_id): - return True - - def get_caching_iter(self, image_id, image_checksum, app_iter): - pass - - def delete_cached_image(self, image_id): - self.deleted_images.append(image_id) - - def get_image_size(self, image_id): - pass - - self.cache = DummyCache() - self.policy = unit_test_utils.FakePolicyEnforcer() - - -class TestCacheMiddlewareProcessRequest(base.IsolatedUnitTest): - def _enforcer_from_rules(self, unparsed_rules): - rules = policy.Rules.from_dict(unparsed_rules) - enforcer = glance.api.policy.Enforcer() - enforcer.set_rules(rules, overwrite=True) - return enforcer - - def test_v1_deleted_image_fetch(self): - """ - Test for determining that when an admin tries to download a deleted - image it returns 404 Not Found error. - """ - def dummy_img_iterator(): - for i in range(3): - yield i - - image_id = 'test1' - image_meta = { - 'id': image_id, - 'name': 'fake_image', - 'status': 'deleted', - 'created_at': '', - 'min_disk': '10G', - 'min_ram': '1024M', - 'protected': False, - 'locations': '', - 'checksum': 'c1234', - 'owner': '', - 'disk_format': 'raw', - 'container_format': 'bare', - 'size': '123456789', - 'virtual_size': '123456789', - 'is_public': 'public', - 'deleted': True, - 'updated_at': '', - 'properties': {}, - } - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext() - cache_filter = ProcessRequestTestCacheFilter() - self.assertRaises(exception.NotFound, cache_filter._process_v1_request, - request, image_id, dummy_img_iterator, image_meta) - - def test_process_v1_request_for_deleted_but_cached_image(self): - """ - Test for determining image is deleted from cache when it is not found - in Glance Registry. - """ - def fake_process_v1_request(request, image_id, image_iterator, - image_meta): - raise exception.ImageNotFound() - - def fake_get_v1_image_metadata(request, image_id): - return {'status': 'active', 'properties': {}} - - image_id = 'test1' - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext() - - cache_filter = ProcessRequestTestCacheFilter() - self.stubs.Set(cache_filter, '_get_v1_image_metadata', - fake_get_v1_image_metadata) - self.stubs.Set(cache_filter, '_process_v1_request', - fake_process_v1_request) - cache_filter.process_request(request) - self.assertIn(image_id, cache_filter.cache.deleted_images) - - def test_v1_process_request_image_fetch(self): - - def dummy_img_iterator(): - for i in range(3): - yield i - - image_id = 'test1' - image_meta = { - 'id': image_id, - 'name': 'fake_image', - 'status': 'active', - 'created_at': '', - 'min_disk': '10G', - 'min_ram': '1024M', - 'protected': False, - 'locations': '', - 'checksum': 'c1234', - 'owner': '', - 'disk_format': 'raw', - 'container_format': 'bare', - 'size': '123456789', - 'virtual_size': '123456789', - 'is_public': 'public', - 'deleted': False, - 'updated_at': '', - 'properties': {}, - } - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext() - cache_filter = ProcessRequestTestCacheFilter() - actual = cache_filter._process_v1_request( - request, image_id, dummy_img_iterator, image_meta) - self.assertTrue(actual) - - def test_v1_remove_location_image_fetch(self): - - class CheckNoLocationDataSerializer(object): - def show(self, response, raw_response): - return 'location_data' in raw_response['image_meta'] - - def dummy_img_iterator(): - for i in range(3): - yield i - - image_id = 'test1' - image_meta = { - 'id': image_id, - 'name': 'fake_image', - 'status': 'active', - 'created_at': '', - 'min_disk': '10G', - 'min_ram': '1024M', - 'protected': False, - 'locations': '', - 'checksum': 'c1234', - 'owner': '', - 'disk_format': 'raw', - 'container_format': 'bare', - 'size': '123456789', - 'virtual_size': '123456789', - 'is_public': 'public', - 'deleted': False, - 'updated_at': '', - 'properties': {}, - } - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext() - cache_filter = ProcessRequestTestCacheFilter() - cache_filter.serializer = CheckNoLocationDataSerializer() - actual = cache_filter._process_v1_request( - request, image_id, dummy_img_iterator, image_meta) - self.assertFalse(actual) - - def test_verify_metadata_deleted_image(self): - """ - Test verify_metadata raises exception.NotFound for a deleted image - """ - image_meta = {'status': 'deleted', 'is_public': True, 'deleted': True} - cache_filter = ProcessRequestTestCacheFilter() - self.assertRaises(exception.NotFound, - cache_filter._verify_metadata, image_meta) - - def test_verify_metadata_zero_size(self): - """ - Test verify_metadata updates metadata with cached image size for images - with 0 size - """ - image_size = 1 - - def fake_get_image_size(image_id): - return image_size - - image_id = 'test1' - image_meta = {'size': 0, 'deleted': False, 'id': image_id, - 'status': 'active'} - cache_filter = ProcessRequestTestCacheFilter() - self.stubs.Set(cache_filter.cache, 'get_image_size', - fake_get_image_size) - cache_filter._verify_metadata(image_meta) - self.assertEqual(image_size, image_meta['size']) - - def test_v2_process_request_response_headers(self): - def dummy_img_iterator(): - for i in range(3): - yield i - - image_id = 'test1' - request = webob.Request.blank('/v2/images/test1/file') - request.context = context.RequestContext() - request.environ['api.cache.image'] = ImageStub(image_id) - - image_meta = { - 'id': image_id, - 'name': 'fake_image', - 'status': 'active', - 'created_at': '', - 'min_disk': '10G', - 'min_ram': '1024M', - 'protected': False, - 'locations': '', - 'checksum': 'c1234', - 'owner': '', - 'disk_format': 'raw', - 'container_format': 'bare', - 'size': '123456789', - 'virtual_size': '123456789', - 'is_public': 'public', - 'deleted': False, - 'updated_at': '', - 'properties': {}, - } - - cache_filter = ProcessRequestTestCacheFilter() - response = cache_filter._process_v2_request( - request, image_id, dummy_img_iterator, image_meta) - self.assertEqual('application/octet-stream', - response.headers['Content-Type']) - self.assertEqual('c1234', response.headers['Content-MD5']) - self.assertEqual('123456789', response.headers['Content-Length']) - - def test_v2_process_request_without_checksum(self): - def dummy_img_iterator(): - for i in range(3): - yield i - - image_id = 'test1' - request = webob.Request.blank('/v2/images/test1/file') - request.context = context.RequestContext() - image = ImageStub(image_id) - image.checksum = None - request.environ['api.cache.image'] = image - - image_meta = { - 'id': image_id, - 'name': 'fake_image', - 'status': 'active', - 'size': '123456789', - } - - cache_filter = ProcessRequestTestCacheFilter() - response = cache_filter._process_v2_request( - request, image_id, dummy_img_iterator, image_meta) - self.assertNotIn('Content-MD5', response.headers.keys()) - - def test_process_request_without_download_image_policy(self): - """ - Test for cache middleware skip processing when request - context has not 'download_image' role. - """ - - def fake_get_v1_image_metadata(*args, **kwargs): - return {'status': 'active', 'properties': {}} - - image_id = 'test1' - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext() - - cache_filter = ProcessRequestTestCacheFilter() - cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata - - enforcer = self._enforcer_from_rules({'download_image': '!'}) - cache_filter.policy = enforcer - self.assertRaises(webob.exc.HTTPForbidden, - cache_filter.process_request, request) - - def test_v1_process_request_download_restricted(self): - """ - Test process_request for v1 api where _member_ role not able to - download the image with custom property. - """ - image_id = 'test1' - - def fake_get_v1_image_metadata(*args, **kwargs): - return { - 'id': image_id, - 'name': 'fake_image', - 'status': 'active', - 'created_at': '', - 'min_disk': '10G', - 'min_ram': '1024M', - 'protected': False, - 'locations': '', - 'checksum': 'c1234', - 'owner': '', - 'disk_format': 'raw', - 'container_format': 'bare', - 'size': '123456789', - 'virtual_size': '123456789', - 'is_public': 'public', - 'deleted': False, - 'updated_at': '', - 'x_test_key': 'test_1234' - } - - enforcer = self._enforcer_from_rules({ - "restricted": - "not ('test_1234':%(x_test_key)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - }) - - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext(roles=['_member_']) - cache_filter = ProcessRequestTestCacheFilter() - cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata - cache_filter.policy = enforcer - self.assertRaises(webob.exc.HTTPForbidden, - cache_filter.process_request, request) - - def test_v1_process_request_download_permitted(self): - """ - Test process_request for v1 api where member role able to - download the image with custom property. - """ - image_id = 'test1' - - def fake_get_v1_image_metadata(*args, **kwargs): - return { - 'id': image_id, - 'name': 'fake_image', - 'status': 'active', - 'created_at': '', - 'min_disk': '10G', - 'min_ram': '1024M', - 'protected': False, - 'locations': '', - 'checksum': 'c1234', - 'owner': '', - 'disk_format': 'raw', - 'container_format': 'bare', - 'size': '123456789', - 'virtual_size': '123456789', - 'is_public': 'public', - 'deleted': False, - 'updated_at': '', - 'x_test_key': 'test_1234' - } - - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext(roles=['member']) - cache_filter = ProcessRequestTestCacheFilter() - cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata - - rules = { - "restricted": - "not ('test_1234':%(x_test_key)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - self.set_policy_rules(rules) - cache_filter.policy = glance.api.policy.Enforcer() - actual = cache_filter.process_request(request) - self.assertTrue(actual) - - def test_v1_process_request_image_meta_not_found(self): - """ - Test process_request for v1 api where registry raises NotFound - exception as image metadata not found. - """ - image_id = 'test1' - - def fake_get_v1_image_metadata(*args, **kwargs): - raise exception.NotFound() - - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext(roles=['_member_']) - cache_filter = ProcessRequestTestCacheFilter() - self.stubs.Set(registry, 'get_image_metadata', - fake_get_v1_image_metadata) - - rules = { - "restricted": - "not ('test_1234':%(x_test_key)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - self.set_policy_rules(rules) - cache_filter.policy = glance.api.policy.Enforcer() - self.assertRaises(webob.exc.HTTPNotFound, - cache_filter.process_request, request) - - def test_v2_process_request_download_restricted(self): - """ - Test process_request for v2 api where _member_ role not able to - download the image with custom property. - """ - image_id = 'test1' - extra_properties = { - 'x_test_key': 'test_1234' - } - - def fake_get_v2_image_metadata(*args, **kwargs): - image = ImageStub(image_id, extra_properties=extra_properties) - request.environ['api.cache.image'] = image - return glance.api.policy.ImageTarget(image) - - enforcer = self._enforcer_from_rules({ - "restricted": - "not ('test_1234':%(x_test_key)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - }) - - request = webob.Request.blank('/v2/images/test1/file') - request.context = context.RequestContext(roles=['_member_']) - cache_filter = ProcessRequestTestCacheFilter() - cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata - - cache_filter.policy = enforcer - self.assertRaises(webob.exc.HTTPForbidden, - cache_filter.process_request, request) - - def test_v2_process_request_download_permitted(self): - """ - Test process_request for v2 api where member role able to - download the image with custom property. - """ - image_id = 'test1' - extra_properties = { - 'x_test_key': 'test_1234' - } - - def fake_get_v2_image_metadata(*args, **kwargs): - image = ImageStub(image_id, extra_properties=extra_properties) - request.environ['api.cache.image'] = image - return glance.api.policy.ImageTarget(image) - - request = webob.Request.blank('/v2/images/test1/file') - request.context = context.RequestContext(roles=['member']) - cache_filter = ProcessRequestTestCacheFilter() - cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata - - rules = { - "restricted": - "not ('test_1234':%(x_test_key)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - self.set_policy_rules(rules) - cache_filter.policy = glance.api.policy.Enforcer() - actual = cache_filter.process_request(request) - self.assertTrue(actual) - - -class TestCacheMiddlewareProcessResponse(base.IsolatedUnitTest): - def test_process_v1_DELETE_response(self): - image_id = 'test1' - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext() - cache_filter = ProcessRequestTestCacheFilter() - headers = {"x-image-meta-deleted": True} - resp = webob.Response(request=request, headers=headers) - actual = cache_filter._process_DELETE_response(resp, image_id) - self.assertEqual(resp, actual) - - def test_get_status_code(self): - headers = {"x-image-meta-deleted": True} - resp = webob.Response(headers=headers) - cache_filter = ProcessRequestTestCacheFilter() - actual = cache_filter.get_status_code(resp) - self.assertEqual(http.OK, actual) - - def test_process_response(self): - def fake_fetch_request_info(*args, **kwargs): - return ('test1', 'GET', 'v1') - - def fake_get_v1_image_metadata(*args, **kwargs): - return {'properties': {}} - - cache_filter = ProcessRequestTestCacheFilter() - cache_filter._fetch_request_info = fake_fetch_request_info - cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata - image_id = 'test1' - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext() - headers = {"x-image-meta-deleted": True} - resp = webob.Response(request=request, headers=headers) - actual = cache_filter.process_response(resp) - self.assertEqual(resp, actual) - - def test_process_response_without_download_image_policy(self): - """ - Test for cache middleware raise webob.exc.HTTPForbidden directly - when request context has not 'download_image' role. - """ - def fake_fetch_request_info(*args, **kwargs): - return ('test1', 'GET', 'v1') - - def fake_get_v1_image_metadata(*args, **kwargs): - return {'properties': {}} - - cache_filter = ProcessRequestTestCacheFilter() - cache_filter._fetch_request_info = fake_fetch_request_info - cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata - rules = {'download_image': '!'} - self.set_policy_rules(rules) - cache_filter.policy = glance.api.policy.Enforcer() - - image_id = 'test1' - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext() - resp = webob.Response(request=request) - self.assertRaises(webob.exc.HTTPForbidden, - cache_filter.process_response, resp) - self.assertEqual([b''], resp.app_iter) - - def test_v1_process_response_download_restricted(self): - """ - Test process_response for v1 api where _member_ role not able to - download the image with custom property. - """ - image_id = 'test1' - - def fake_fetch_request_info(*args, **kwargs): - return ('test1', 'GET', 'v1') - - def fake_get_v1_image_metadata(*args, **kwargs): - return { - 'id': image_id, - 'name': 'fake_image', - 'status': 'active', - 'created_at': '', - 'min_disk': '10G', - 'min_ram': '1024M', - 'protected': False, - 'locations': '', - 'checksum': 'c1234', - 'owner': '', - 'disk_format': 'raw', - 'container_format': 'bare', - 'size': '123456789', - 'virtual_size': '123456789', - 'is_public': 'public', - 'deleted': False, - 'updated_at': '', - 'x_test_key': 'test_1234' - } - - cache_filter = ProcessRequestTestCacheFilter() - cache_filter._fetch_request_info = fake_fetch_request_info - cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata - rules = { - "restricted": - "not ('test_1234':%(x_test_key)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - self.set_policy_rules(rules) - cache_filter.policy = glance.api.policy.Enforcer() - - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext(roles=['_member_']) - resp = webob.Response(request=request) - self.assertRaises(webob.exc.HTTPForbidden, - cache_filter.process_response, resp) - - def test_v1_process_response_download_permitted(self): - """ - Test process_response for v1 api where member role able to - download the image with custom property. - """ - image_id = 'test1' - - def fake_fetch_request_info(*args, **kwargs): - return ('test1', 'GET', 'v1') - - def fake_get_v1_image_metadata(*args, **kwargs): - return { - 'id': image_id, - 'name': 'fake_image', - 'status': 'active', - 'created_at': '', - 'min_disk': '10G', - 'min_ram': '1024M', - 'protected': False, - 'locations': '', - 'checksum': 'c1234', - 'owner': '', - 'disk_format': 'raw', - 'container_format': 'bare', - 'size': '123456789', - 'virtual_size': '123456789', - 'is_public': 'public', - 'deleted': False, - 'updated_at': '', - 'x_test_key': 'test_1234' - } - - cache_filter = ProcessRequestTestCacheFilter() - cache_filter._fetch_request_info = fake_fetch_request_info - cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata - rules = { - "restricted": - "not ('test_1234':%(x_test_key)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - self.set_policy_rules(rules) - cache_filter.policy = glance.api.policy.Enforcer() - - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext(roles=['member']) - resp = webob.Response(request=request) - actual = cache_filter.process_response(resp) - self.assertEqual(resp, actual) - - def test_v1_process_response_image_meta_not_found(self): - """ - Test process_response for v1 api where registry raises NotFound - exception as image metadata not found. - """ - image_id = 'test1' - - def fake_fetch_request_info(*args, **kwargs): - return ('test1', 'GET', 'v1') - - def fake_get_v1_image_metadata(*args, **kwargs): - raise exception.NotFound() - - cache_filter = ProcessRequestTestCacheFilter() - cache_filter._fetch_request_info = fake_fetch_request_info - - self.stubs.Set(registry, 'get_image_metadata', - fake_get_v1_image_metadata) - - rules = { - "restricted": - "not ('test_1234':%(x_test_key)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - self.set_policy_rules(rules) - cache_filter.policy = glance.api.policy.Enforcer() - - request = webob.Request.blank('/v1/images/%s' % image_id) - request.context = context.RequestContext(roles=['_member_']) - resp = webob.Response(request=request) - self.assertRaises(webob.exc.HTTPNotFound, - cache_filter.process_response, resp) - - def test_v2_process_response_download_restricted(self): - """ - Test process_response for v2 api where _member_ role not able to - download the image with custom property. - """ - image_id = 'test1' - extra_properties = { - 'x_test_key': 'test_1234' - } - - def fake_fetch_request_info(*args, **kwargs): - return ('test1', 'GET', 'v2') - - def fake_get_v2_image_metadata(*args, **kwargs): - image = ImageStub(image_id, extra_properties=extra_properties) - request.environ['api.cache.image'] = image - return glance.api.policy.ImageTarget(image) - - cache_filter = ProcessRequestTestCacheFilter() - cache_filter._fetch_request_info = fake_fetch_request_info - cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata - - rules = { - "restricted": - "not ('test_1234':%(x_test_key)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - self.set_policy_rules(rules) - cache_filter.policy = glance.api.policy.Enforcer() - - request = webob.Request.blank('/v2/images/test1/file') - request.context = context.RequestContext(roles=['_member_']) - resp = webob.Response(request=request) - self.assertRaises(webob.exc.HTTPForbidden, - cache_filter.process_response, resp) - - def test_v2_process_response_download_permitted(self): - """ - Test process_response for v2 api where member role able to - download the image with custom property. - """ - image_id = 'test1' - extra_properties = { - 'x_test_key': 'test_1234' - } - - def fake_fetch_request_info(*args, **kwargs): - return ('test1', 'GET', 'v2') - - def fake_get_v2_image_metadata(*args, **kwargs): - image = ImageStub(image_id, extra_properties=extra_properties) - request.environ['api.cache.image'] = image - return glance.api.policy.ImageTarget(image) - - cache_filter = ProcessRequestTestCacheFilter() - cache_filter._fetch_request_info = fake_fetch_request_info - cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata - - rules = { - "restricted": - "not ('test_1234':%(x_test_key)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - self.set_policy_rules(rules) - cache_filter.policy = glance.api.policy.Enforcer() - - request = webob.Request.blank('/v2/images/test1/file') - request.context = context.RequestContext(roles=['member']) - resp = webob.Response(request=request) - actual = cache_filter.process_response(resp) - self.assertEqual(resp, actual) diff --git a/glance/tests/unit/test_cached_images.py b/glance/tests/unit/test_cached_images.py deleted file mode 100644 index 9bc8fcd4..00000000 --- a/glance/tests/unit/test_cached_images.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (C) 2013 Yahoo! Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools -import webob - -from glance.api import cached_images -from glance.api import policy -from glance.common import exception -from glance import image_cache - - -class FakePolicyEnforcer(policy.Enforcer): - def __init__(self): - self.default_rule = '' - self.policy_path = '' - self.policy_file_mtime = None - self.policy_file_contents = None - - def enforce(self, context, action, target): - return 'pass' - - def check(rule, target, creds, exc=None, *args, **kwargs): - return 'pass' - - def _check(self, context, rule, target, *args, **kwargs): - return 'pass' - - -class FakeCache(image_cache.ImageCache): - def __init__(self): - self.init_driver() - self.deleted_images = [] - - def init_driver(self): - pass - - def get_cached_images(self): - return {'id': 'test'} - - def delete_cached_image(self, image_id): - self.deleted_images.append(image_id) - - def delete_all_cached_images(self): - self.delete_cached_image(self.get_cached_images().get('id')) - return 1 - - def get_queued_images(self): - return {'test': 'passed'} - - def queue_image(self, image_id): - return 'pass' - - def delete_queued_image(self, image_id): - self.deleted_images.append(image_id) - - def delete_all_queued_images(self): - self.delete_queued_image('deleted_img') - return 1 - - -class FakeController(cached_images.Controller): - def __init__(self): - self.cache = FakeCache() - self.policy = FakePolicyEnforcer() - - -class TestController(testtools.TestCase): - def test_initialization_without_conf(self): - self.assertRaises(exception.BadDriverConfiguration, - cached_images.Controller) - - -class TestCachedImages(testtools.TestCase): - def setUp(self): - super(TestCachedImages, self).setUp() - test_controller = FakeController() - self.controller = test_controller - - def test_get_cached_images(self): - req = webob.Request.blank('') - req.context = 'test' - result = self.controller.get_cached_images(req) - self.assertEqual({'cached_images': {'id': 'test'}}, result) - - def test_delete_cached_image(self): - req = webob.Request.blank('') - req.context = 'test' - self.controller.delete_cached_image(req, image_id='test') - self.assertEqual(['test'], self.controller.cache.deleted_images) - - def test_delete_cached_images(self): - req = webob.Request.blank('') - req.context = 'test' - self.assertEqual({'num_deleted': 1}, - self.controller.delete_cached_images(req)) - self.assertEqual(['test'], self.controller.cache.deleted_images) - - def test_policy_enforce_forbidden(self): - def fake_enforce(context, action, target): - raise exception.Forbidden() - - self.controller.policy.enforce = fake_enforce - req = webob.Request.blank('') - req.context = 'test' - self.assertRaises(webob.exc.HTTPForbidden, - self.controller.get_cached_images, req) - - def test_get_queued_images(self): - req = webob.Request.blank('') - req.context = 'test' - result = self.controller.get_queued_images(req) - self.assertEqual({'queued_images': {'test': 'passed'}}, result) - - def test_queue_image(self): - req = webob.Request.blank('') - req.context = 'test' - self.controller.queue_image(req, image_id='test1') - - def test_delete_queued_image(self): - req = webob.Request.blank('') - req.context = 'test' - self.controller.delete_queued_image(req, 'deleted_img') - self.assertEqual(['deleted_img'], - self.controller.cache.deleted_images) - - def test_delete_queued_images(self): - req = webob.Request.blank('') - req.context = 'test' - self.assertEqual({'num_deleted': 1}, - self.controller.delete_queued_images(req)) - self.assertEqual(['deleted_img'], - self.controller.cache.deleted_images) diff --git a/glance/tests/unit/test_context.py b/glance/tests/unit/test_context.py deleted file mode 100644 index a4cf1706..00000000 --- a/glance/tests/unit/test_context.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance import context -from glance.tests.unit import utils as unit_utils -from glance.tests import utils - - -def _fake_image(owner, is_public): - return { - 'id': None, - 'owner': owner, - 'visibility': 'public' if is_public else 'shared', - } - - -def _fake_membership(can_share=False): - return {'can_share': can_share} - - -class TestContext(utils.BaseTestCase): - def setUp(self): - super(TestContext, self).setUp() - self.db_api = unit_utils.FakeDB() - - def do_visible(self, exp_res, img_owner, img_public, **kwargs): - """ - Perform a context visibility test. Creates a (fake) image - with the specified owner and is_public attributes, then - creates a context with the given keyword arguments and expects - exp_res as the result of an is_image_visible() call on the - context. - """ - - img = _fake_image(img_owner, img_public) - ctx = context.RequestContext(**kwargs) - - self.assertEqual(exp_res, self.db_api.is_image_visible(ctx, img)) - - def test_empty_public(self): - """ - Tests that an empty context (with is_admin set to True) can - access an image with is_public set to True. - """ - self.do_visible(True, None, True, is_admin=True) - - def test_empty_public_owned(self): - """ - Tests that an empty context (with is_admin set to True) can - access an owned image with is_public set to True. - """ - self.do_visible(True, 'pattieblack', True, is_admin=True) - - def test_empty_private(self): - """ - Tests that an empty context (with is_admin set to True) can - access an image with is_public set to False. - """ - self.do_visible(True, None, False, is_admin=True) - - def test_empty_private_owned(self): - """ - Tests that an empty context (with is_admin set to True) can - access an owned image with is_public set to False. - """ - self.do_visible(True, 'pattieblack', False, is_admin=True) - - def test_anon_public(self): - """ - Tests that an anonymous context (with is_admin set to False) - can access an image with is_public set to True. - """ - self.do_visible(True, None, True) - - def test_anon_public_owned(self): - """ - Tests that an anonymous context (with is_admin set to False) - can access an owned image with is_public set to True. - """ - self.do_visible(True, 'pattieblack', True) - - def test_anon_private(self): - """ - Tests that an anonymous context (with is_admin set to False) - can access an unowned image with is_public set to False. - """ - self.do_visible(True, None, False) - - def test_anon_private_owned(self): - """ - Tests that an anonymous context (with is_admin set to False) - cannot access an owned image with is_public set to False. - """ - self.do_visible(False, 'pattieblack', False) - - def test_auth_public(self): - """ - Tests that an authenticated context (with is_admin set to - False) can access an image with is_public set to True. - """ - self.do_visible(True, None, True, tenant='froggy') - - def test_auth_public_unowned(self): - """ - Tests that an authenticated context (with is_admin set to - False) can access an image (which it does not own) with - is_public set to True. - """ - self.do_visible(True, 'pattieblack', True, tenant='froggy') - - def test_auth_public_owned(self): - """ - Tests that an authenticated context (with is_admin set to - False) can access an image (which it does own) with is_public - set to True. - """ - self.do_visible(True, 'pattieblack', True, tenant='pattieblack') - - def test_auth_private(self): - """ - Tests that an authenticated context (with is_admin set to - False) can access an image with is_public set to False. - """ - self.do_visible(True, None, False, tenant='froggy') - - def test_auth_private_unowned(self): - """ - Tests that an authenticated context (with is_admin set to - False) cannot access an image (which it does not own) with - is_public set to False. - """ - self.do_visible(False, 'pattieblack', False, tenant='froggy') - - def test_auth_private_owned(self): - """ - Tests that an authenticated context (with is_admin set to - False) can access an image (which it does own) with is_public - set to False. - """ - self.do_visible(True, 'pattieblack', False, tenant='pattieblack') - - def test_request_id(self): - contexts = [context.RequestContext().request_id for _ in range(5)] - # Check for uniqueness -- set() will normalize its argument - self.assertEqual(5, len(set(contexts))) - - def test_service_catalog(self): - ctx = context.RequestContext(service_catalog=['foo']) - self.assertEqual(['foo'], ctx.service_catalog) - - def test_user_identity(self): - ctx = context.RequestContext(user="user", - tenant="tenant", - domain="domain", - user_domain="user-domain", - project_domain="project-domain") - self.assertEqual('user tenant domain user-domain project-domain', - ctx.to_dict()["user_identity"]) diff --git a/glance/tests/unit/test_context_middleware.py b/glance/tests/unit/test_context_middleware.py deleted file mode 100644 index a35355c8..00000000 --- a/glance/tests/unit/test_context_middleware.py +++ /dev/null @@ -1,164 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import webob - -from glance.api.middleware import context -import glance.context -from glance.tests.unit import base - - -class TestContextMiddleware(base.IsolatedUnitTest): - def _build_request(self, roles=None, identity_status='Confirmed', - service_catalog=None): - req = webob.Request.blank('/') - req.headers['x-auth-token'] = 'token1' - req.headers['x-identity-status'] = identity_status - req.headers['x-user-id'] = 'user1' - req.headers['x-tenant-id'] = 'tenant1' - _roles = roles or ['role1', 'role2'] - req.headers['x-roles'] = ','.join(_roles) - if service_catalog: - req.headers['x-service-catalog'] = service_catalog - - return req - - def _build_middleware(self): - return context.ContextMiddleware(None) - - def test_header_parsing(self): - req = self._build_request() - self._build_middleware().process_request(req) - self.assertEqual('token1', req.context.auth_token) - self.assertEqual('user1', req.context.user) - self.assertEqual('tenant1', req.context.tenant) - self.assertEqual(['role1', 'role2'], req.context.roles) - - def test_is_admin_flag(self): - # is_admin check should look for 'admin' role by default - req = self._build_request(roles=['admin', 'role2']) - self._build_middleware().process_request(req) - self.assertTrue(req.context.is_admin) - - # without the 'admin' role, is_admin should be False - req = self._build_request() - self._build_middleware().process_request(req) - self.assertFalse(req.context.is_admin) - - # if we change the admin_role attribute, we should be able to use it - req = self._build_request() - self.config(admin_role='role1') - self._build_middleware().process_request(req) - self.assertTrue(req.context.is_admin) - - def test_roles_case_insensitive(self): - # accept role from request - req = self._build_request(roles=['Admin', 'role2']) - self._build_middleware().process_request(req) - self.assertTrue(req.context.is_admin) - - # accept role from config - req = self._build_request(roles=['role1']) - self.config(admin_role='rOLe1') - self._build_middleware().process_request(req) - self.assertTrue(req.context.is_admin) - - def test_roles_stripping(self): - # stripping extra spaces in request - req = self._build_request(roles=['\trole1']) - self.config(admin_role='role1') - self._build_middleware().process_request(req) - self.assertTrue(req.context.is_admin) - - # stripping extra spaces in config - req = self._build_request(roles=['\trole1\n']) - self.config(admin_role=' role1\t') - self._build_middleware().process_request(req) - self.assertTrue(req.context.is_admin) - - def test_anonymous_access_enabled(self): - req = self._build_request(identity_status='Nope') - self.config(allow_anonymous_access=True) - middleware = self._build_middleware() - middleware.process_request(req) - self.assertIsNone(req.context.auth_token) - self.assertIsNone(req.context.user) - self.assertIsNone(req.context.tenant) - self.assertEqual([], req.context.roles) - self.assertFalse(req.context.is_admin) - self.assertTrue(req.context.read_only) - - def test_anonymous_access_defaults_to_disabled(self): - req = self._build_request(identity_status='Nope') - middleware = self._build_middleware() - self.assertRaises(webob.exc.HTTPUnauthorized, - middleware.process_request, req) - - def test_service_catalog(self): - catalog_json = "[{}]" - req = self._build_request(service_catalog=catalog_json) - self._build_middleware().process_request(req) - self.assertEqual([{}], req.context.service_catalog) - - def test_invalid_service_catalog(self): - catalog_json = "bad json" - req = self._build_request(service_catalog=catalog_json) - middleware = self._build_middleware() - self.assertRaises(webob.exc.HTTPInternalServerError, - middleware.process_request, req) - - def test_response(self): - req = self._build_request() - req.context = glance.context.RequestContext() - request_id = req.context.request_id - - resp = webob.Response() - resp.request = req - self._build_middleware().process_response(resp) - self.assertEqual(request_id, resp.headers['x-openstack-request-id']) - resp_req_id = resp.headers['x-openstack-request-id'] - # Validate that request-id do not starts with 'req-req-' - if isinstance(resp_req_id, bytes): - resp_req_id = resp_req_id.decode('utf-8') - self.assertFalse(resp_req_id.startswith('req-req-')) - self.assertTrue(resp_req_id.startswith('req-')) - - -class TestUnauthenticatedContextMiddleware(base.IsolatedUnitTest): - def test_request(self): - middleware = context.UnauthenticatedContextMiddleware(None) - req = webob.Request.blank('/') - middleware.process_request(req) - self.assertIsNone(req.context.auth_token) - self.assertIsNone(req.context.user) - self.assertIsNone(req.context.tenant) - self.assertEqual([], req.context.roles) - self.assertTrue(req.context.is_admin) - - def test_response(self): - middleware = context.UnauthenticatedContextMiddleware(None) - req = webob.Request.blank('/') - req.context = glance.context.RequestContext() - request_id = req.context.request_id - - resp = webob.Response() - resp.request = req - middleware.process_response(resp) - self.assertEqual(request_id, resp.headers['x-openstack-request-id']) - resp_req_id = resp.headers['x-openstack-request-id'] - if isinstance(resp_req_id, bytes): - resp_req_id = resp_req_id.decode('utf-8') - # Validate that request-id do not starts with 'req-req-' - self.assertFalse(resp_req_id.startswith('req-req-')) - self.assertTrue(resp_req_id.startswith('req-')) diff --git a/glance/tests/unit/test_data_migration_framework.py b/glance/tests/unit/test_data_migration_framework.py deleted file mode 100644 index e23f7be9..00000000 --- a/glance/tests/unit/test_data_migration_framework.py +++ /dev/null @@ -1,205 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from glance.db.sqlalchemy.alembic_migrations import data_migrations -from glance.tests import utils as test_utils - - -class TestDataMigrationFramework(test_utils.BaseTestCase): - - @mock.patch('glance.db.sqlalchemy.alembic_migrations.data_migrations' - '._find_migration_modules') - def test_has_pending_migrations_no_migrations(self, mock_find): - mock_find.return_value = None - self.assertFalse(data_migrations.has_pending_migrations(mock.Mock())) - - @mock.patch('glance.db.sqlalchemy.alembic_migrations.data_migrations' - '._find_migration_modules') - def test_has_pending_migrations_one_migration_no_pending(self, mock_find): - mock_migration1 = mock.Mock() - mock_migration1.has_migrations.return_value = False - mock_find.return_value = [mock_migration1] - - self.assertFalse(data_migrations.has_pending_migrations(mock.Mock())) - - @mock.patch('glance.db.sqlalchemy.alembic_migrations.data_migrations' - '._find_migration_modules') - def test_has_pending_migrations_one_migration_with_pending(self, - mock_find): - mock_migration1 = mock.Mock() - mock_migration1.has_migrations.return_value = True - mock_find.return_value = [mock_migration1] - - self.assertTrue(data_migrations.has_pending_migrations(mock.Mock())) - - @mock.patch('glance.db.sqlalchemy.alembic_migrations.data_migrations' - '._find_migration_modules') - def test_has_pending_migrations_mult_migration_no_pending(self, mock_find): - mock_migration1 = mock.Mock() - mock_migration1.has_migrations.return_value = False - mock_migration2 = mock.Mock() - mock_migration2.has_migrations.return_value = False - mock_migration3 = mock.Mock() - mock_migration3.has_migrations.return_value = False - - mock_find.return_value = [mock_migration1, mock_migration2, - mock_migration3] - - self.assertFalse(data_migrations.has_pending_migrations(mock.Mock())) - - @mock.patch('glance.db.sqlalchemy.alembic_migrations.data_migrations' - '._find_migration_modules') - def test_has_pending_migrations_mult_migration_one_pending(self, - mock_find): - mock_migration1 = mock.Mock() - mock_migration1.has_migrations.return_value = False - mock_migration2 = mock.Mock() - mock_migration2.has_migrations.return_value = True - mock_migration3 = mock.Mock() - mock_migration3.has_migrations.return_value = False - - mock_find.return_value = [mock_migration1, mock_migration2, - mock_migration3] - - self.assertTrue(data_migrations.has_pending_migrations(mock.Mock())) - - @mock.patch('glance.db.sqlalchemy.alembic_migrations.data_migrations' - '._find_migration_modules') - def test_has_pending_migrations_mult_migration_some_pending(self, - mock_find): - mock_migration1 = mock.Mock() - mock_migration1.has_migrations.return_value = False - mock_migration2 = mock.Mock() - mock_migration2.has_migrations.return_value = True - mock_migration3 = mock.Mock() - mock_migration3.has_migrations.return_value = False - mock_migration4 = mock.Mock() - mock_migration4.has_migrations.return_value = True - - mock_find.return_value = [mock_migration1, mock_migration2, - mock_migration3, mock_migration4] - - self.assertTrue(data_migrations.has_pending_migrations(mock.Mock())) - - @mock.patch('importlib.import_module') - @mock.patch('pkgutil.iter_modules') - def test_find_migrations(self, mock_iter, mock_import): - def fake_iter_modules(blah): - yield 'blah', 'zebra01', 'blah' - yield 'blah', 'zebra02', 'blah' - yield 'blah', 'yellow01', 'blah' - yield 'blah', 'xray01', 'blah' - yield 'blah', 'wrinkle01', 'blah' - - mock_iter.side_effect = fake_iter_modules - - zebra1 = mock.Mock() - zebra1.has_migrations.return_value = mock.Mock() - zebra1.migrate.return_value = mock.Mock() - zebra2 = mock.Mock() - zebra2.has_migrations.return_value = mock.Mock() - zebra2.migrate.return_value = mock.Mock() - - fake_imported_modules = [zebra1, zebra2] - mock_import.side_effect = fake_imported_modules - - actual = data_migrations._find_migration_modules('zebra') - self.assertEqual(2, len(actual)) - self.assertEqual(fake_imported_modules, actual) - - @mock.patch('pkgutil.iter_modules') - def test_find_migrations_no_migrations(self, mock_iter): - def fake_iter_modules(blah): - yield 'blah', 'zebra01', 'blah' - yield 'blah', 'yellow01', 'blah' - yield 'blah', 'xray01', 'blah' - yield 'blah', 'wrinkle01', 'blah' - yield 'blah', 'victor01', 'blah' - - mock_iter.side_effect = fake_iter_modules - - actual = data_migrations._find_migration_modules('umbrella') - self.assertEqual(0, len(actual)) - self.assertEqual([], actual) - - def test_run_migrations(self): - zebra1 = mock.Mock() - zebra1.has_migrations.return_value = True - zebra1.migrate.return_value = 100 - zebra2 = mock.Mock() - zebra2.has_migrations.return_value = True - zebra2.migrate.return_value = 50 - migrations = [zebra1, zebra2] - - engine = mock.Mock() - actual = data_migrations._run_migrations(engine, migrations) - self.assertEqual(150, actual) - zebra1.has_migrations.assert_called_once_with(engine) - zebra1.migrate.assert_called_once_with(engine) - zebra2.has_migrations.assert_called_once_with(engine) - zebra2.migrate.assert_called_once_with(engine) - - def test_run_migrations_with_one_pending_migration(self): - zebra1 = mock.Mock() - zebra1.has_migrations.return_value = False - zebra1.migrate.return_value = 0 - zebra2 = mock.Mock() - zebra2.has_migrations.return_value = True - zebra2.migrate.return_value = 50 - migrations = [zebra1, zebra2] - - engine = mock.Mock() - actual = data_migrations._run_migrations(engine, migrations) - self.assertEqual(50, actual) - zebra1.has_migrations.assert_called_once_with(engine) - zebra1.migrate.assert_not_called() - zebra2.has_migrations.assert_called_once_with(engine) - zebra2.migrate.assert_called_once_with(engine) - - def test_run_migrations_with_no_migrations(self): - migrations = [] - - actual = data_migrations._run_migrations(mock.Mock(), migrations) - self.assertEqual(0, actual) - - @mock.patch('glance.db.migration.CURRENT_RELEASE', 'zebra') - @mock.patch('importlib.import_module') - @mock.patch('pkgutil.iter_modules') - def test_migrate(self, mock_iter, mock_import): - def fake_iter_modules(blah): - yield 'blah', 'zebra01', 'blah' - yield 'blah', 'zebra02', 'blah' - yield 'blah', 'yellow01', 'blah' - yield 'blah', 'xray01', 'blah' - yield 'blah', 'xray02', 'blah' - - mock_iter.side_effect = fake_iter_modules - - zebra1 = mock.Mock() - zebra1.has_migrations.return_value = True - zebra1.migrate.return_value = 100 - zebra2 = mock.Mock() - zebra2.has_migrations.return_value = True - zebra2.migrate.return_value = 50 - - fake_imported_modules = [zebra1, zebra2] - mock_import.side_effect = fake_imported_modules - - engine = mock.Mock() - actual = data_migrations.migrate(engine) - self.assertEqual(150, actual) - zebra1.has_migrations.assert_called_once_with(engine) - zebra1.migrate.assert_called_once_with(engine) - zebra2.has_migrations.assert_called_once_with(engine) - zebra2.migrate.assert_called_once_with(engine) diff --git a/glance/tests/unit/test_db.py b/glance/tests/unit/test_db.py deleted file mode 100644 index 28c50afb..00000000 --- a/glance/tests/unit/test_db.py +++ /dev/null @@ -1,769 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -import mock -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_utils import encodeutils -from oslo_utils import timeutils - -from glance.common import crypt -from glance.common import exception -import glance.context -import glance.db -from glance.db.sqlalchemy import api -import glance.tests.unit.utils as unit_test_utils -import glance.tests.utils as test_utils - -CONF = cfg.CONF -CONF.import_opt('metadata_encryption_key', 'glance.common.config') - - -@mock.patch('oslo_utils.importutils.import_module') -class TestDbUtilities(test_utils.BaseTestCase): - def setUp(self): - super(TestDbUtilities, self).setUp() - self.config(data_api='silly pants') - self.api = mock.Mock() - - def test_get_api_calls_configure_if_present(self, import_module): - import_module.return_value = self.api - self.assertEqual(glance.db.get_api(), self.api) - import_module.assert_called_once_with('silly pants') - self.api.configure.assert_called_once_with() - - def test_get_api_skips_configure_if_missing(self, import_module): - import_module.return_value = self.api - del self.api.configure - self.assertEqual(glance.db.get_api(), self.api) - import_module.assert_called_once_with('silly pants') - self.assertFalse(hasattr(self.api, 'configure')) - - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' -UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' -UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' - -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' -TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' -TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' - -USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' - -UUID1_LOCATION = 'file:///path/to/image' -UUID1_LOCATION_METADATA = {'key': 'value'} -UUID3_LOCATION = 'http://somehost.com/place' - -CHECKSUM = '93264c3edf5972c9f1cb309543d38a5c' -CHCKSUM1 = '43264c3edf4972c9f1cb309543d38a55' - - -def _db_fixture(id, **kwargs): - obj = { - 'id': id, - 'name': None, - 'is_public': False, - 'properties': {}, - 'checksum': None, - 'owner': None, - 'status': 'queued', - 'tags': [], - 'size': None, - 'locations': [], - 'protected': False, - 'disk_format': None, - 'container_format': None, - 'deleted': False, - 'min_ram': None, - 'min_disk': None, - } - if 'visibility' in kwargs: - obj.pop('is_public') - obj.update(kwargs) - return obj - - -def _db_image_member_fixture(image_id, member_id, **kwargs): - obj = { - 'image_id': image_id, - 'member': member_id, - } - obj.update(kwargs) - return obj - - -def _db_task_fixture(task_id, type, status, **kwargs): - obj = { - 'id': task_id, - 'type': type, - 'status': status, - 'input': None, - 'result': None, - 'owner': None, - 'message': None, - 'deleted': False, - 'expires_at': timeutils.utcnow() + datetime.timedelta(days=365) - } - obj.update(kwargs) - return obj - - -class TestImageRepo(test_utils.BaseTestCase): - - def setUp(self): - super(TestImageRepo, self).setUp() - self.db = unit_test_utils.FakeDB(initialize=False) - self.context = glance.context.RequestContext( - user=USER1, tenant=TENANT1) - self.image_repo = glance.db.ImageRepo(self.context, self.db) - self.image_factory = glance.domain.ImageFactory() - self._create_images() - self._create_image_members() - - def _create_images(self): - self.images = [ - _db_fixture(UUID1, owner=TENANT1, checksum=CHECKSUM, - name='1', size=256, - is_public=True, status='active', - locations=[{'url': UUID1_LOCATION, - 'metadata': UUID1_LOCATION_METADATA, - 'status': 'active'}]), - _db_fixture(UUID2, owner=TENANT1, checksum=CHCKSUM1, - name='2', size=512, is_public=False), - _db_fixture(UUID3, owner=TENANT3, checksum=CHCKSUM1, - name='3', size=1024, is_public=True, - locations=[{'url': UUID3_LOCATION, - 'metadata': {}, - 'status': 'active'}]), - _db_fixture(UUID4, owner=TENANT4, name='4', size=2048), - ] - [self.db.image_create(None, image) for image in self.images] - - self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) - - def _create_image_members(self): - self.image_members = [ - _db_image_member_fixture(UUID2, TENANT2), - _db_image_member_fixture(UUID2, TENANT3, status='accepted'), - ] - [self.db.image_member_create(None, image_member) - for image_member in self.image_members] - - def test_get(self): - image = self.image_repo.get(UUID1) - self.assertEqual(UUID1, image.image_id) - self.assertEqual('1', image.name) - self.assertEqual(set(['ping', 'pong']), image.tags) - self.assertEqual('public', image.visibility) - self.assertEqual('active', image.status) - self.assertEqual(256, image.size) - self.assertEqual(TENANT1, image.owner) - - def test_location_value(self): - image = self.image_repo.get(UUID3) - self.assertEqual(UUID3_LOCATION, image.locations[0]['url']) - - def test_location_data_value(self): - image = self.image_repo.get(UUID1) - self.assertEqual(UUID1_LOCATION, image.locations[0]['url']) - self.assertEqual(UUID1_LOCATION_METADATA, - image.locations[0]['metadata']) - - def test_location_data_exists(self): - image = self.image_repo.get(UUID2) - self.assertEqual([], image.locations) - - def test_get_not_found(self): - fake_uuid = str(uuid.uuid4()) - exc = self.assertRaises(exception.ImageNotFound, self.image_repo.get, - fake_uuid) - self.assertIn(fake_uuid, encodeutils.exception_to_unicode(exc)) - - def test_get_forbidden(self): - self.assertRaises(exception.NotFound, self.image_repo.get, UUID4) - - def test_list(self): - images = self.image_repo.list() - image_ids = set([i.image_id for i in images]) - self.assertEqual(set([UUID1, UUID2, UUID3]), image_ids) - - def _do_test_list_status(self, status, expected): - self.context = glance.context.RequestContext( - user=USER1, tenant=TENANT3) - self.image_repo = glance.db.ImageRepo(self.context, self.db) - images = self.image_repo.list(member_status=status) - self.assertEqual(expected, len(images)) - - def test_list_status(self): - self._do_test_list_status(None, 3) - - def test_list_status_pending(self): - self._do_test_list_status('pending', 2) - - def test_list_status_rejected(self): - self._do_test_list_status('rejected', 2) - - def test_list_status_all(self): - self._do_test_list_status('all', 3) - - def test_list_with_marker(self): - full_images = self.image_repo.list() - full_ids = [i.image_id for i in full_images] - marked_images = self.image_repo.list(marker=full_ids[0]) - actual_ids = [i.image_id for i in marked_images] - self.assertEqual(full_ids[1:], actual_ids) - - def test_list_with_last_marker(self): - images = self.image_repo.list() - marked_images = self.image_repo.list(marker=images[-1].image_id) - self.assertEqual(0, len(marked_images)) - - def test_limited_list(self): - limited_images = self.image_repo.list(limit=2) - self.assertEqual(2, len(limited_images)) - - def test_list_with_marker_and_limit(self): - full_images = self.image_repo.list() - full_ids = [i.image_id for i in full_images] - marked_images = self.image_repo.list(marker=full_ids[0], limit=1) - actual_ids = [i.image_id for i in marked_images] - self.assertEqual(full_ids[1:2], actual_ids) - - def test_list_private_images(self): - filters = {'visibility': 'private'} - images = self.image_repo.list(filters=filters) - self.assertEqual(0, len(images)) - - def test_list_shared_images(self): - filters = {'visibility': 'shared'} - images = self.image_repo.list(filters=filters) - image_ids = set([i.image_id for i in images]) - self.assertEqual(set([UUID2]), image_ids) - - def test_list_with_checksum_filter_single_image(self): - filters = {'checksum': CHECKSUM} - images = self.image_repo.list(filters=filters) - image_ids = list([i.image_id for i in images]) - self.assertEqual(1, len(image_ids)) - self.assertEqual([UUID1], image_ids) - - def test_list_with_checksum_filter_multiple_images(self): - filters = {'checksum': CHCKSUM1} - images = self.image_repo.list(filters=filters) - image_ids = list([i.image_id for i in images]) - self.assertEqual(2, len(image_ids)) - self.assertIn(UUID2, image_ids) - self.assertIn(UUID3, image_ids) - - def test_list_with_wrong_checksum(self): - WRONG_CHKSUM = 'd2fd42f979e1ed1aafadc7eb9354bff839c858cd' - filters = {'checksum': WRONG_CHKSUM} - images = self.image_repo.list(filters=filters) - self.assertEqual(0, len(images)) - - def test_list_with_tags_filter_single_tag(self): - filters = {'tags': ['ping']} - images = self.image_repo.list(filters=filters) - image_ids = list([i.image_id for i in images]) - self.assertEqual(1, len(image_ids)) - self.assertEqual([UUID1], image_ids) - - def test_list_with_tags_filter_multiple_tags(self): - filters = {'tags': ['ping', 'pong']} - images = self.image_repo.list(filters=filters) - image_ids = list([i.image_id for i in images]) - self.assertEqual(1, len(image_ids)) - self.assertEqual([UUID1], image_ids) - - def test_list_with_tags_filter_multiple_tags_and_nonexistent(self): - filters = {'tags': ['ping', 'fake']} - images = self.image_repo.list(filters=filters) - image_ids = list([i.image_id for i in images]) - self.assertEqual(0, len(image_ids)) - - def test_list_with_wrong_tags(self): - filters = {'tags': ['fake']} - images = self.image_repo.list(filters=filters) - self.assertEqual(0, len(images)) - - def test_list_public_images(self): - filters = {'visibility': 'public'} - images = self.image_repo.list(filters=filters) - image_ids = set([i.image_id for i in images]) - self.assertEqual(set([UUID1, UUID3]), image_ids) - - def test_sorted_list(self): - images = self.image_repo.list(sort_key=['size'], sort_dir=['asc']) - image_ids = [i.image_id for i in images] - self.assertEqual([UUID1, UUID2, UUID3], image_ids) - - def test_sorted_list_with_multiple_keys(self): - temp_id = 'd80a1a6c-bd1f-41c5-90ee-81afedb1d58d' - image = _db_fixture(temp_id, owner=TENANT1, checksum=CHECKSUM, - name='1', size=1024, - is_public=True, status='active', - locations=[{'url': UUID1_LOCATION, - 'metadata': UUID1_LOCATION_METADATA, - 'status': 'active'}]) - self.db.image_create(None, image) - images = self.image_repo.list(sort_key=['name', 'size'], - sort_dir=['asc']) - image_ids = [i.image_id for i in images] - self.assertEqual([UUID1, temp_id, UUID2, UUID3], image_ids) - - images = self.image_repo.list(sort_key=['size', 'name'], - sort_dir=['asc']) - image_ids = [i.image_id for i in images] - self.assertEqual([UUID1, UUID2, temp_id, UUID3], image_ids) - - def test_sorted_list_with_multiple_dirs(self): - temp_id = 'd80a1a6c-bd1f-41c5-90ee-81afedb1d58d' - image = _db_fixture(temp_id, owner=TENANT1, checksum=CHECKSUM, - name='1', size=1024, - is_public=True, status='active', - locations=[{'url': UUID1_LOCATION, - 'metadata': UUID1_LOCATION_METADATA, - 'status': 'active'}]) - self.db.image_create(None, image) - images = self.image_repo.list(sort_key=['name', 'size'], - sort_dir=['asc', 'desc']) - image_ids = [i.image_id for i in images] - self.assertEqual([temp_id, UUID1, UUID2, UUID3], image_ids) - - images = self.image_repo.list(sort_key=['name', 'size'], - sort_dir=['desc', 'asc']) - image_ids = [i.image_id for i in images] - self.assertEqual([UUID3, UUID2, UUID1, temp_id], image_ids) - - def test_add_image(self): - image = self.image_factory.new_image(name='added image') - self.assertEqual(image.updated_at, image.created_at) - self.image_repo.add(image) - retreived_image = self.image_repo.get(image.image_id) - self.assertEqual('added image', retreived_image.name) - self.assertEqual(image.updated_at, retreived_image.updated_at) - - def test_save_image(self): - image = self.image_repo.get(UUID1) - original_update_time = image.updated_at - image.name = 'foo' - image.tags = ['king', 'kong'] - self.image_repo.save(image) - current_update_time = image.updated_at - self.assertGreater(current_update_time, original_update_time) - image = self.image_repo.get(UUID1) - self.assertEqual('foo', image.name) - self.assertEqual(set(['king', 'kong']), image.tags) - self.assertEqual(current_update_time, image.updated_at) - - def test_save_image_not_found(self): - fake_uuid = str(uuid.uuid4()) - image = self.image_repo.get(UUID1) - image.image_id = fake_uuid - exc = self.assertRaises(exception.ImageNotFound, self.image_repo.save, - image) - self.assertIn(fake_uuid, encodeutils.exception_to_unicode(exc)) - - def test_remove_image(self): - image = self.image_repo.get(UUID1) - previous_update_time = image.updated_at - self.image_repo.remove(image) - self.assertGreater(image.updated_at, previous_update_time) - self.assertRaises(exception.ImageNotFound, self.image_repo.get, UUID1) - - def test_remove_image_not_found(self): - fake_uuid = str(uuid.uuid4()) - image = self.image_repo.get(UUID1) - image.image_id = fake_uuid - exc = self.assertRaises( - exception.ImageNotFound, self.image_repo.remove, image) - self.assertIn(fake_uuid, encodeutils.exception_to_unicode(exc)) - - -class TestEncryptedLocations(test_utils.BaseTestCase): - def setUp(self): - super(TestEncryptedLocations, self).setUp() - self.db = unit_test_utils.FakeDB(initialize=False) - self.context = glance.context.RequestContext( - user=USER1, tenant=TENANT1) - self.image_repo = glance.db.ImageRepo(self.context, self.db) - self.image_factory = glance.domain.ImageFactory() - self.crypt_key = '0123456789abcdef' - self.config(metadata_encryption_key=self.crypt_key) - self.foo_bar_location = [{'url': 'foo', 'metadata': {}, - 'status': 'active'}, - {'url': 'bar', 'metadata': {}, - 'status': 'active'}] - - def test_encrypt_locations_on_add(self): - image = self.image_factory.new_image(UUID1) - image.locations = self.foo_bar_location - self.image_repo.add(image) - db_data = self.db.image_get(self.context, UUID1) - self.assertNotEqual(db_data['locations'], ['foo', 'bar']) - decrypted_locations = [crypt.urlsafe_decrypt(self.crypt_key, l['url']) - for l in db_data['locations']] - self.assertEqual([l['url'] for l in self.foo_bar_location], - decrypted_locations) - - def test_encrypt_locations_on_save(self): - image = self.image_factory.new_image(UUID1) - self.image_repo.add(image) - image.locations = self.foo_bar_location - self.image_repo.save(image) - db_data = self.db.image_get(self.context, UUID1) - self.assertNotEqual(db_data['locations'], ['foo', 'bar']) - decrypted_locations = [crypt.urlsafe_decrypt(self.crypt_key, l['url']) - for l in db_data['locations']] - self.assertEqual([l['url'] for l in self.foo_bar_location], - decrypted_locations) - - def test_decrypt_locations_on_get(self): - url_loc = ['ping', 'pong'] - orig_locations = [{'url': l, 'metadata': {}, 'status': 'active'} - for l in url_loc] - encrypted_locs = [crypt.urlsafe_encrypt(self.crypt_key, l) - for l in url_loc] - encrypted_locations = [{'url': l, 'metadata': {}, 'status': 'active'} - for l in encrypted_locs] - self.assertNotEqual(encrypted_locations, orig_locations) - db_data = _db_fixture(UUID1, owner=TENANT1, - locations=encrypted_locations) - self.db.image_create(None, db_data) - image = self.image_repo.get(UUID1) - self.assertIn('id', image.locations[0]) - self.assertIn('id', image.locations[1]) - image.locations[0].pop('id') - image.locations[1].pop('id') - self.assertEqual(orig_locations, image.locations) - - def test_decrypt_locations_on_list(self): - url_loc = ['ping', 'pong'] - orig_locations = [{'url': l, 'metadata': {}, 'status': 'active'} - for l in url_loc] - encrypted_locs = [crypt.urlsafe_encrypt(self.crypt_key, l) - for l in url_loc] - encrypted_locations = [{'url': l, 'metadata': {}, 'status': 'active'} - for l in encrypted_locs] - self.assertNotEqual(encrypted_locations, orig_locations) - db_data = _db_fixture(UUID1, owner=TENANT1, - locations=encrypted_locations) - self.db.image_create(None, db_data) - image = self.image_repo.list()[0] - self.assertIn('id', image.locations[0]) - self.assertIn('id', image.locations[1]) - image.locations[0].pop('id') - image.locations[1].pop('id') - self.assertEqual(orig_locations, image.locations) - - -class TestImageMemberRepo(test_utils.BaseTestCase): - - def setUp(self): - super(TestImageMemberRepo, self).setUp() - self.db = unit_test_utils.FakeDB(initialize=False) - self.context = glance.context.RequestContext( - user=USER1, tenant=TENANT1) - self.image_repo = glance.db.ImageRepo(self.context, self.db) - self.image_member_factory = glance.domain.ImageMemberFactory() - self._create_images() - self._create_image_members() - image = self.image_repo.get(UUID1) - self.image_member_repo = glance.db.ImageMemberRepo(self.context, - self.db, image) - - def _create_images(self): - self.images = [ - _db_fixture(UUID1, owner=TENANT1, name='1', size=256, - status='active'), - _db_fixture(UUID2, owner=TENANT1, name='2', - size=512, visibility='shared'), - ] - [self.db.image_create(None, image) for image in self.images] - - self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) - - def _create_image_members(self): - self.image_members = [ - _db_image_member_fixture(UUID1, TENANT2), - _db_image_member_fixture(UUID1, TENANT3), - ] - [self.db.image_member_create(None, image_member) - for image_member in self.image_members] - - def test_list(self): - image_members = self.image_member_repo.list() - image_member_ids = set([i.member_id for i in image_members]) - self.assertEqual(set([TENANT2, TENANT3]), image_member_ids) - - def test_list_no_members(self): - image = self.image_repo.get(UUID2) - self.image_member_repo_uuid2 = glance.db.ImageMemberRepo( - self.context, self.db, image) - image_members = self.image_member_repo_uuid2.list() - image_member_ids = set([i.member_id for i in image_members]) - self.assertEqual(set([]), image_member_ids) - - def test_save_image_member(self): - image_member = self.image_member_repo.get(TENANT2) - image_member.status = 'accepted' - self.image_member_repo.save(image_member) - image_member_updated = self.image_member_repo.get(TENANT2) - self.assertEqual(image_member.id, image_member_updated.id) - self.assertEqual('accepted', image_member_updated.status) - - def test_add_image_member(self): - image = self.image_repo.get(UUID1) - image_member = self.image_member_factory.new_image_member(image, - TENANT4) - self.assertIsNone(image_member.id) - self.image_member_repo.add(image_member) - retreived_image_member = self.image_member_repo.get(TENANT4) - self.assertIsNotNone(retreived_image_member.id) - self.assertEqual(image_member.image_id, - retreived_image_member.image_id) - self.assertEqual(image_member.member_id, - retreived_image_member.member_id) - self.assertEqual('pending', retreived_image_member.status) - - def test_add_duplicate_image_member(self): - image = self.image_repo.get(UUID1) - image_member = self.image_member_factory.new_image_member(image, - TENANT4) - self.assertIsNone(image_member.id) - self.image_member_repo.add(image_member) - retreived_image_member = self.image_member_repo.get(TENANT4) - self.assertIsNotNone(retreived_image_member.id) - self.assertEqual(image_member.image_id, - retreived_image_member.image_id) - self.assertEqual(image_member.member_id, - retreived_image_member.member_id) - self.assertEqual('pending', retreived_image_member.status) - - self.assertRaises(exception.Duplicate, self.image_member_repo.add, - image_member) - - def test_get_image_member(self): - image = self.image_repo.get(UUID1) - image_member = self.image_member_factory.new_image_member(image, - TENANT4) - self.assertIsNone(image_member.id) - self.image_member_repo.add(image_member) - - member = self.image_member_repo.get(image_member.member_id) - - self.assertEqual(member.id, image_member.id) - self.assertEqual(member.image_id, image_member.image_id) - self.assertEqual(member.member_id, image_member.member_id) - self.assertEqual('pending', member.status) - - def test_get_nonexistent_image_member(self): - fake_image_member_id = 'fake' - self.assertRaises(exception.NotFound, self.image_member_repo.get, - fake_image_member_id) - - def test_remove_image_member(self): - image_member = self.image_member_repo.get(TENANT2) - self.image_member_repo.remove(image_member) - self.assertRaises(exception.NotFound, self.image_member_repo.get, - TENANT2) - - def test_remove_image_member_does_not_exist(self): - fake_uuid = str(uuid.uuid4()) - image = self.image_repo.get(UUID2) - fake_member = glance.domain.ImageMemberFactory().new_image_member( - image, TENANT4) - fake_member.id = fake_uuid - exc = self.assertRaises(exception.NotFound, - self.image_member_repo.remove, - fake_member) - self.assertIn(fake_uuid, encodeutils.exception_to_unicode(exc)) - - -class TestTaskRepo(test_utils.BaseTestCase): - - def setUp(self): - super(TestTaskRepo, self).setUp() - self.db = unit_test_utils.FakeDB(initialize=False) - self.context = glance.context.RequestContext(user=USER1, - tenant=TENANT1) - self.task_repo = glance.db.TaskRepo(self.context, self.db) - self.task_factory = glance.domain.TaskFactory() - self.fake_task_input = ('{"import_from": ' - '"swift://cloud.foo/account/mycontainer/path"' - ',"import_from_format": "qcow2"}') - self._create_tasks() - - def _create_tasks(self): - self.tasks = [ - _db_task_fixture(UUID1, type='import', status='pending', - input=self.fake_task_input, - result='', - owner=TENANT1, - message='', - ), - _db_task_fixture(UUID2, type='import', status='processing', - input=self.fake_task_input, - result='', - owner=TENANT1, - message='', - ), - _db_task_fixture(UUID3, type='import', status='failure', - input=self.fake_task_input, - result='', - owner=TENANT1, - message='', - ), - _db_task_fixture(UUID4, type='import', status='success', - input=self.fake_task_input, - result='', - owner=TENANT2, - message='', - ), - ] - [self.db.task_create(None, task) for task in self.tasks] - - def test_get(self): - task = self.task_repo.get(UUID1) - self.assertEqual(task.task_id, UUID1) - self.assertEqual('import', task.type) - self.assertEqual('pending', task.status) - self.assertEqual(task.task_input, self.fake_task_input) - self.assertEqual('', task.result) - self.assertEqual('', task.message) - self.assertEqual(task.owner, TENANT1) - - def test_get_not_found(self): - self.assertRaises(exception.NotFound, - self.task_repo.get, - str(uuid.uuid4())) - - def test_get_forbidden(self): - self.assertRaises(exception.NotFound, - self.task_repo.get, - UUID4) - - def test_list(self): - tasks = self.task_repo.list() - task_ids = set([i.task_id for i in tasks]) - self.assertEqual(set([UUID1, UUID2, UUID3]), task_ids) - - def test_list_with_type(self): - filters = {'type': 'import'} - tasks = self.task_repo.list(filters=filters) - task_ids = set([i.task_id for i in tasks]) - self.assertEqual(set([UUID1, UUID2, UUID3]), task_ids) - - def test_list_with_status(self): - filters = {'status': 'failure'} - tasks = self.task_repo.list(filters=filters) - task_ids = set([i.task_id for i in tasks]) - self.assertEqual(set([UUID3]), task_ids) - - def test_list_with_marker(self): - full_tasks = self.task_repo.list() - full_ids = [i.task_id for i in full_tasks] - marked_tasks = self.task_repo.list(marker=full_ids[0]) - actual_ids = [i.task_id for i in marked_tasks] - self.assertEqual(full_ids[1:], actual_ids) - - def test_list_with_last_marker(self): - tasks = self.task_repo.list() - marked_tasks = self.task_repo.list(marker=tasks[-1].task_id) - self.assertEqual(0, len(marked_tasks)) - - def test_limited_list(self): - limited_tasks = self.task_repo.list(limit=2) - self.assertEqual(2, len(limited_tasks)) - - def test_list_with_marker_and_limit(self): - full_tasks = self.task_repo.list() - full_ids = [i.task_id for i in full_tasks] - marked_tasks = self.task_repo.list(marker=full_ids[0], limit=1) - actual_ids = [i.task_id for i in marked_tasks] - self.assertEqual(full_ids[1:2], actual_ids) - - def test_sorted_list(self): - tasks = self.task_repo.list(sort_key='status', sort_dir='desc') - task_ids = [i.task_id for i in tasks] - self.assertEqual([UUID2, UUID1, UUID3], task_ids) - - def test_add_task(self): - task_type = 'import' - task = self.task_factory.new_task(task_type, None, - task_input=self.fake_task_input) - self.assertEqual(task.updated_at, task.created_at) - self.task_repo.add(task) - retrieved_task = self.task_repo.get(task.task_id) - self.assertEqual(task.updated_at, retrieved_task.updated_at) - self.assertEqual(self.fake_task_input, retrieved_task.task_input) - - def test_save_task(self): - task = self.task_repo.get(UUID1) - original_update_time = task.updated_at - self.task_repo.save(task) - current_update_time = task.updated_at - self.assertGreater(current_update_time, original_update_time) - task = self.task_repo.get(UUID1) - self.assertEqual(current_update_time, task.updated_at) - - def test_remove_task(self): - task = self.task_repo.get(UUID1) - self.task_repo.remove(task) - self.assertRaises(exception.NotFound, - self.task_repo.get, - task.task_id) - - -class RetryOnDeadlockTestCase(test_utils.BaseTestCase): - - def test_raise_deadlock(self): - - class TestException(Exception): - pass - - self.attempts = 3 - - def _mock_get_session(): - def _raise_exceptions(): - self.attempts -= 1 - if self.attempts <= 0: - raise TestException("Exit") - raise db_exc.DBDeadlock("Fake Exception") - return _raise_exceptions - - with mock.patch.object(api, 'get_session') as sess: - sess.side_effect = _mock_get_session() - - try: - api._image_update(None, {}, 'fake-id') - except TestException: - self.assertEqual(3, sess.call_count) - - # Test retry on image destroy if db deadlock occurs - self.attempts = 3 - with mock.patch.object(api, 'get_session') as sess: - sess.side_effect = _mock_get_session() - - try: - api.image_destroy(None, 'fake-id') - except TestException: - self.assertEqual(3, sess.call_count) diff --git a/glance/tests/unit/test_db_metadef.py b/glance/tests/unit/test_db_metadef.py deleted file mode 100644 index eaad1970..00000000 --- a/glance/tests/unit/test_db_metadef.py +++ /dev/null @@ -1,566 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# Copyright 2014 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import encodeutils - -from glance.common import exception -import glance.context -import glance.db -import glance.tests.unit.utils as unit_test_utils -import glance.tests.utils as test_utils - -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' -TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' -TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' - -USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' - -NAMESPACE1 = 'namespace1' -NAMESPACE2 = 'namespace2' -NAMESPACE3 = 'namespace3' -NAMESPACE4 = 'namespace4' - -PROPERTY1 = 'Property1' -PROPERTY2 = 'Property2' -PROPERTY3 = 'Property3' - -OBJECT1 = 'Object1' -OBJECT2 = 'Object2' -OBJECT3 = 'Object3' - -TAG1 = 'Tag1' -TAG2 = 'Tag2' -TAG3 = 'Tag3' -TAG4 = 'Tag4' -TAG5 = 'Tag5' - -RESOURCE_TYPE1 = 'ResourceType1' -RESOURCE_TYPE2 = 'ResourceType2' -RESOURCE_TYPE3 = 'ResourceType3' - - -def _db_namespace_fixture(**kwargs): - namespace = { - 'namespace': None, - 'display_name': None, - 'description': None, - 'visibility': True, - 'protected': False, - 'owner': None - } - namespace.update(kwargs) - return namespace - - -def _db_property_fixture(name, **kwargs): - property = { - 'name': name, - 'json_schema': {"type": "string", "title": "title"}, - } - property.update(kwargs) - return property - - -def _db_object_fixture(name, **kwargs): - obj = { - 'name': name, - 'description': None, - 'json_schema': {}, - 'required': '[]', - } - obj.update(kwargs) - return obj - - -def _db_tag_fixture(name, **kwargs): - obj = { - 'name': name - } - obj.update(kwargs) - return obj - - -def _db_tags_fixture(names=None): - tags = [] - if names: - tag_name_list = names - else: - tag_name_list = [TAG1, TAG2, TAG3] - - for tag_name in tag_name_list: - tags.append(_db_tag_fixture(tag_name)) - return tags - - -def _db_resource_type_fixture(name, **kwargs): - obj = { - 'name': name, - 'protected': False, - } - obj.update(kwargs) - return obj - - -def _db_namespace_resource_type_fixture(name, **kwargs): - obj = { - 'name': name, - 'properties_target': None, - 'prefix': None, - } - obj.update(kwargs) - return obj - - -class TestMetadefRepo(test_utils.BaseTestCase): - - def setUp(self): - super(TestMetadefRepo, self).setUp() - self.db = unit_test_utils.FakeDB(initialize=False) - self.context = glance.context.RequestContext(user=USER1, - tenant=TENANT1) - self.namespace_repo = glance.db.MetadefNamespaceRepo(self.context, - self.db) - self.property_repo = glance.db.MetadefPropertyRepo(self.context, - self.db) - self.object_repo = glance.db.MetadefObjectRepo(self.context, - self.db) - self.tag_repo = glance.db.MetadefTagRepo(self.context, - self.db) - self.resource_type_repo = glance.db.MetadefResourceTypeRepo( - self.context, self.db) - self.namespace_factory = glance.domain.MetadefNamespaceFactory() - self.property_factory = glance.domain.MetadefPropertyFactory() - self.object_factory = glance.domain.MetadefObjectFactory() - self.tag_factory = glance.domain.MetadefTagFactory() - self.resource_type_factory = glance.domain.MetadefResourceTypeFactory() - self._create_namespaces() - self._create_properties() - self._create_objects() - self._create_tags() - self._create_resource_types() - - def _create_namespaces(self): - self.namespaces = [ - _db_namespace_fixture(namespace=NAMESPACE1, - display_name='1', - description='desc1', - visibility='private', - protected=True, - owner=TENANT1), - _db_namespace_fixture(namespace=NAMESPACE2, - display_name='2', - description='desc2', - visibility='public', - protected=False, - owner=TENANT1), - _db_namespace_fixture(namespace=NAMESPACE3, - display_name='3', - description='desc3', - visibility='private', - protected=True, - owner=TENANT3), - _db_namespace_fixture(namespace=NAMESPACE4, - display_name='4', - description='desc4', - visibility='public', - protected=True, - owner=TENANT3) - ] - [self.db.metadef_namespace_create(None, namespace) - for namespace in self.namespaces] - - def _create_properties(self): - self.properties = [ - _db_property_fixture(name=PROPERTY1), - _db_property_fixture(name=PROPERTY2), - _db_property_fixture(name=PROPERTY3) - ] - [self.db.metadef_property_create(self.context, NAMESPACE1, property) - for property in self.properties] - [self.db.metadef_property_create(self.context, NAMESPACE4, property) - for property in self.properties] - - def _create_objects(self): - self.objects = [ - _db_object_fixture(name=OBJECT1, - description='desc1'), - _db_object_fixture(name=OBJECT2, - description='desc2'), - _db_object_fixture(name=OBJECT3, - description='desc3'), - ] - [self.db.metadef_object_create(self.context, NAMESPACE1, object) - for object in self.objects] - [self.db.metadef_object_create(self.context, NAMESPACE4, object) - for object in self.objects] - - def _create_tags(self): - self.tags = [ - _db_tag_fixture(name=TAG1), - _db_tag_fixture(name=TAG2), - _db_tag_fixture(name=TAG3), - ] - [self.db.metadef_tag_create(self.context, NAMESPACE1, tag) - for tag in self.tags] - [self.db.metadef_tag_create(self.context, NAMESPACE4, tag) - for tag in self.tags] - - def _create_resource_types(self): - self.resource_types = [ - _db_resource_type_fixture(name=RESOURCE_TYPE1, - protected=False), - _db_resource_type_fixture(name=RESOURCE_TYPE2, - protected=False), - _db_resource_type_fixture(name=RESOURCE_TYPE3, - protected=True), - ] - [self.db.metadef_resource_type_create(self.context, resource_type) - for resource_type in self.resource_types] - - def test_get_namespace(self): - namespace = self.namespace_repo.get(NAMESPACE1) - self.assertEqual(NAMESPACE1, namespace.namespace) - self.assertEqual('desc1', namespace.description) - self.assertEqual('1', namespace.display_name) - self.assertEqual(TENANT1, namespace.owner) - self.assertTrue(namespace.protected) - self.assertEqual('private', namespace.visibility) - - def test_get_namespace_not_found(self): - fake_namespace = "fake_namespace" - exc = self.assertRaises(exception.NotFound, - self.namespace_repo.get, - fake_namespace) - self.assertIn(fake_namespace, encodeutils.exception_to_unicode(exc)) - - def test_get_namespace_forbidden(self): - self.assertRaises(exception.NotFound, - self.namespace_repo.get, - NAMESPACE3) - - def test_list_namespace(self): - namespaces = self.namespace_repo.list() - namespace_names = set([n.namespace for n in namespaces]) - self.assertEqual(set([NAMESPACE1, NAMESPACE2, NAMESPACE4]), - namespace_names) - - def test_list_private_namespaces(self): - filters = {'visibility': 'private'} - namespaces = self.namespace_repo.list(filters=filters) - namespace_names = set([n.namespace for n in namespaces]) - self.assertEqual(set([NAMESPACE1]), namespace_names) - - def test_add_namespace(self): - # NOTE(pawel-koniszewski): Change db_namespace_fixture to - # namespace_factory when namespace primary key in DB - # will be changed from Integer to UUID - namespace = _db_namespace_fixture(namespace='added_namespace', - display_name='fake', - description='fake_desc', - visibility='public', - protected=True, - owner=TENANT1) - self.assertEqual('added_namespace', namespace['namespace']) - self.db.metadef_namespace_create(None, namespace) - retrieved_namespace = self.namespace_repo.get(namespace['namespace']) - self.assertEqual('added_namespace', retrieved_namespace.namespace) - - def test_save_namespace(self): - namespace = self.namespace_repo.get(NAMESPACE1) - namespace.display_name = 'save_name' - namespace.description = 'save_desc' - self.namespace_repo.save(namespace) - namespace = self.namespace_repo.get(NAMESPACE1) - self.assertEqual('save_name', namespace.display_name) - self.assertEqual('save_desc', namespace.description) - - def test_remove_namespace(self): - namespace = self.namespace_repo.get(NAMESPACE1) - self.namespace_repo.remove(namespace) - self.assertRaises(exception.NotFound, self.namespace_repo.get, - NAMESPACE1) - - def test_remove_namespace_not_found(self): - fake_name = 'fake_name' - namespace = self.namespace_repo.get(NAMESPACE1) - namespace.namespace = fake_name - exc = self.assertRaises(exception.NotFound, self.namespace_repo.remove, - namespace) - self.assertIn(fake_name, encodeutils.exception_to_unicode(exc)) - - def test_get_property(self): - property = self.property_repo.get(NAMESPACE1, PROPERTY1) - namespace = self.namespace_repo.get(NAMESPACE1) - self.assertEqual(PROPERTY1, property.name) - self.assertEqual(namespace.namespace, property.namespace.namespace) - - def test_get_property_not_found(self): - exc = self.assertRaises(exception.NotFound, - self.property_repo.get, - NAMESPACE2, PROPERTY1) - self.assertIn(PROPERTY1, encodeutils.exception_to_unicode(exc)) - - def test_list_property(self): - properties = self.property_repo.list(filters={'namespace': NAMESPACE1}) - property_names = set([p.name for p in properties]) - self.assertEqual(set([PROPERTY1, PROPERTY2, PROPERTY3]), - property_names) - - def test_list_property_empty_result(self): - properties = self.property_repo.list(filters={'namespace': NAMESPACE2}) - property_names = set([p.name for p in properties]) - self.assertEqual(set([]), - property_names) - - def test_list_property_namespace_not_found(self): - exc = self.assertRaises(exception.NotFound, self.property_repo.list, - filters={'namespace': 'not-a-namespace'}) - self.assertIn('not-a-namespace', encodeutils.exception_to_unicode(exc)) - - def test_add_property(self): - # NOTE(pawel-koniszewski): Change db_property_fixture to - # property_factory when property primary key in DB - # will be changed from Integer to UUID - property = _db_property_fixture(name='added_property') - self.assertEqual('added_property', property['name']) - self.db.metadef_property_create(self.context, NAMESPACE1, property) - retrieved_property = self.property_repo.get(NAMESPACE1, - 'added_property') - self.assertEqual('added_property', retrieved_property.name) - - def test_add_property_namespace_forbidden(self): - # NOTE(pawel-koniszewski): Change db_property_fixture to - # property_factory when property primary key in DB - # will be changed from Integer to UUID - property = _db_property_fixture(name='added_property') - self.assertEqual('added_property', property['name']) - self.assertRaises(exception.Forbidden, self.db.metadef_property_create, - self.context, NAMESPACE3, property) - - def test_add_property_namespace_not_found(self): - # NOTE(pawel-koniszewski): Change db_property_fixture to - # property_factory when property primary key in DB - # will be changed from Integer to UUID - property = _db_property_fixture(name='added_property') - self.assertEqual('added_property', property['name']) - self.assertRaises(exception.NotFound, self.db.metadef_property_create, - self.context, 'not_a_namespace', property) - - def test_save_property(self): - property = self.property_repo.get(NAMESPACE1, PROPERTY1) - property.schema = '{"save": "schema"}' - self.property_repo.save(property) - property = self.property_repo.get(NAMESPACE1, PROPERTY1) - self.assertEqual(PROPERTY1, property.name) - self.assertEqual('{"save": "schema"}', property.schema) - - def test_remove_property(self): - property = self.property_repo.get(NAMESPACE1, PROPERTY1) - self.property_repo.remove(property) - self.assertRaises(exception.NotFound, self.property_repo.get, - NAMESPACE1, PROPERTY1) - - def test_remove_property_not_found(self): - fake_name = 'fake_name' - property = self.property_repo.get(NAMESPACE1, PROPERTY1) - property.name = fake_name - self.assertRaises(exception.NotFound, self.property_repo.remove, - property) - - def test_get_object(self): - object = self.object_repo.get(NAMESPACE1, OBJECT1) - namespace = self.namespace_repo.get(NAMESPACE1) - self.assertEqual(OBJECT1, object.name) - self.assertEqual('desc1', object.description) - self.assertEqual(['[]'], object.required) - self.assertEqual({}, object.properties) - self.assertEqual(namespace.namespace, object.namespace.namespace) - - def test_get_object_not_found(self): - exc = self.assertRaises(exception.NotFound, self.object_repo.get, - NAMESPACE2, OBJECT1) - self.assertIn(OBJECT1, encodeutils.exception_to_unicode(exc)) - - def test_list_object(self): - objects = self.object_repo.list(filters={'namespace': NAMESPACE1}) - object_names = set([o.name for o in objects]) - self.assertEqual(set([OBJECT1, OBJECT2, OBJECT3]), object_names) - - def test_list_object_empty_result(self): - objects = self.object_repo.list(filters={'namespace': NAMESPACE2}) - object_names = set([o.name for o in objects]) - self.assertEqual(set([]), object_names) - - def test_list_object_namespace_not_found(self): - exc = self.assertRaises(exception.NotFound, self.object_repo.list, - filters={'namespace': 'not-a-namespace'}) - self.assertIn('not-a-namespace', encodeutils.exception_to_unicode(exc)) - - def test_add_object(self): - # NOTE(pawel-koniszewski): Change db_object_fixture to - # object_factory when object primary key in DB - # will be changed from Integer to UUID - object = _db_object_fixture(name='added_object') - self.assertEqual('added_object', object['name']) - self.db.metadef_object_create(self.context, NAMESPACE1, object) - retrieved_object = self.object_repo.get(NAMESPACE1, - 'added_object') - self.assertEqual('added_object', retrieved_object.name) - - def test_add_object_namespace_forbidden(self): - # NOTE(pawel-koniszewski): Change db_object_fixture to - # object_factory when object primary key in DB - # will be changed from Integer to UUID - object = _db_object_fixture(name='added_object') - self.assertEqual('added_object', object['name']) - self.assertRaises(exception.Forbidden, self.db.metadef_object_create, - self.context, NAMESPACE3, object) - - def test_add_object_namespace_not_found(self): - # NOTE(pawel-koniszewski): Change db_object_fixture to - # object_factory when object primary key in DB - # will be changed from Integer to UUID - object = _db_object_fixture(name='added_object') - self.assertEqual('added_object', object['name']) - self.assertRaises(exception.NotFound, self.db.metadef_object_create, - self.context, 'not-a-namespace', object) - - def test_save_object(self): - object = self.object_repo.get(NAMESPACE1, OBJECT1) - object.required = ['save_req'] - object.description = 'save_desc' - self.object_repo.save(object) - object = self.object_repo.get(NAMESPACE1, OBJECT1) - self.assertEqual(OBJECT1, object.name) - self.assertEqual(['save_req'], object.required) - self.assertEqual('save_desc', object.description) - - def test_remove_object(self): - object = self.object_repo.get(NAMESPACE1, OBJECT1) - self.object_repo.remove(object) - self.assertRaises(exception.NotFound, self.object_repo.get, - NAMESPACE1, OBJECT1) - - def test_remove_object_not_found(self): - fake_name = 'fake_name' - object = self.object_repo.get(NAMESPACE1, OBJECT1) - object.name = fake_name - self.assertRaises(exception.NotFound, self.object_repo.remove, - object) - - def test_list_resource_type(self): - resource_type = self.resource_type_repo.list( - filters={'namespace': NAMESPACE1}) - self.assertEqual(0, len(resource_type)) - - def test_get_tag(self): - tag = self.tag_repo.get(NAMESPACE1, TAG1) - namespace = self.namespace_repo.get(NAMESPACE1) - self.assertEqual(TAG1, tag.name) - self.assertEqual(namespace.namespace, tag.namespace.namespace) - - def test_get_tag_not_found(self): - exc = self.assertRaises(exception.NotFound, self.tag_repo.get, - NAMESPACE2, TAG1) - self.assertIn(TAG1, encodeutils.exception_to_unicode(exc)) - - def test_list_tag(self): - tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) - tag_names = set([t.name for t in tags]) - self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) - - def test_list_tag_empty_result(self): - tags = self.tag_repo.list(filters={'namespace': NAMESPACE2}) - tag_names = set([t.name for t in tags]) - self.assertEqual(set([]), tag_names) - - def test_list_tag_namespace_not_found(self): - exc = self.assertRaises(exception.NotFound, self.tag_repo.list, - filters={'namespace': 'not-a-namespace'}) - self.assertIn('not-a-namespace', encodeutils.exception_to_unicode(exc)) - - def test_add_tag(self): - # NOTE(pawel-koniszewski): Change db_tag_fixture to - # tag_factory when tag primary key in DB - # will be changed from Integer to UUID - tag = _db_tag_fixture(name='added_tag') - self.assertEqual('added_tag', tag['name']) - self.db.metadef_tag_create(self.context, NAMESPACE1, tag) - retrieved_tag = self.tag_repo.get(NAMESPACE1, 'added_tag') - self.assertEqual('added_tag', retrieved_tag.name) - - def test_add_tags(self): - tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) - tag_names = set([t.name for t in tags]) - self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) - - tags = _db_tags_fixture([TAG3, TAG4, TAG5]) - self.db.metadef_tag_create_tags(self.context, NAMESPACE1, tags) - - tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) - tag_names = set([t.name for t in tags]) - self.assertEqual(set([TAG3, TAG4, TAG5]), tag_names) - - def test_add_duplicate_tags_with_pre_existing_tags(self): - tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) - tag_names = set([t.name for t in tags]) - self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) - - tags = _db_tags_fixture([TAG5, TAG4, TAG5]) - self.assertRaises(exception.Duplicate, - self.db.metadef_tag_create_tags, - self.context, NAMESPACE1, tags) - - tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) - tag_names = set([t.name for t in tags]) - self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) - - def test_add_tag_namespace_forbidden(self): - # NOTE(pawel-koniszewski): Change db_tag_fixture to - # tag_factory when tag primary key in DB - # will be changed from Integer to UUID - tag = _db_tag_fixture(name='added_tag') - self.assertEqual('added_tag', tag['name']) - self.assertRaises(exception.Forbidden, self.db.metadef_tag_create, - self.context, NAMESPACE3, tag) - - def test_add_tag_namespace_not_found(self): - # NOTE(pawel-koniszewski): Change db_tag_fixture to - # tag_factory when tag primary key in DB - # will be changed from Integer to UUID - tag = _db_tag_fixture(name='added_tag') - self.assertEqual('added_tag', tag['name']) - self.assertRaises(exception.NotFound, self.db.metadef_tag_create, - self.context, 'not-a-namespace', tag) - - def test_save_tag(self): - tag = self.tag_repo.get(NAMESPACE1, TAG1) - self.tag_repo.save(tag) - tag = self.tag_repo.get(NAMESPACE1, TAG1) - self.assertEqual(TAG1, tag.name) - - def test_remove_tag(self): - tag = self.tag_repo.get(NAMESPACE1, TAG1) - self.tag_repo.remove(tag) - self.assertRaises(exception.NotFound, self.tag_repo.get, - NAMESPACE1, TAG1) - - def test_remove_tag_not_found(self): - fake_name = 'fake_name' - tag = self.tag_repo.get(NAMESPACE1, TAG1) - tag.name = fake_name - self.assertRaises(exception.NotFound, self.tag_repo.remove, tag) diff --git a/glance/tests/unit/test_domain.py b/glance/tests/unit/test_domain.py deleted file mode 100644 index 5ab5cf79..00000000 --- a/glance/tests/unit/test_domain.py +++ /dev/null @@ -1,575 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -import mock -from oslo_config import cfg -import oslo_utils.importutils - -import glance.async -from glance.async import taskflow_executor -from glance.common import exception -from glance.common import timeutils -from glance import domain -import glance.tests.utils as test_utils - - -CONF = cfg.CONF - - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' - - -class TestImageFactory(test_utils.BaseTestCase): - - def setUp(self): - super(TestImageFactory, self).setUp() - self.image_factory = domain.ImageFactory() - - def test_minimal_new_image(self): - image = self.image_factory.new_image() - self.assertIsNotNone(image.image_id) - self.assertIsNotNone(image.created_at) - self.assertEqual(image.created_at, image.updated_at) - self.assertEqual('queued', image.status) - self.assertEqual('shared', image.visibility) - self.assertIsNone(image.owner) - self.assertIsNone(image.name) - self.assertIsNone(image.size) - self.assertEqual(0, image.min_disk) - self.assertEqual(0, image.min_ram) - self.assertFalse(image.protected) - self.assertIsNone(image.disk_format) - self.assertIsNone(image.container_format) - self.assertEqual({}, image.extra_properties) - self.assertEqual(set([]), image.tags) - - def test_new_image(self): - image = self.image_factory.new_image( - image_id=UUID1, name='image-1', min_disk=256, - owner=TENANT1) - self.assertEqual(UUID1, image.image_id) - self.assertIsNotNone(image.created_at) - self.assertEqual(image.created_at, image.updated_at) - self.assertEqual('queued', image.status) - self.assertEqual('shared', image.visibility) - self.assertEqual(TENANT1, image.owner) - self.assertEqual('image-1', image.name) - self.assertIsNone(image.size) - self.assertEqual(256, image.min_disk) - self.assertEqual(0, image.min_ram) - self.assertFalse(image.protected) - self.assertIsNone(image.disk_format) - self.assertIsNone(image.container_format) - self.assertEqual({}, image.extra_properties) - self.assertEqual(set([]), image.tags) - - def test_new_image_with_extra_properties_and_tags(self): - extra_properties = {'foo': 'bar'} - tags = ['one', 'two'] - image = self.image_factory.new_image( - image_id=UUID1, name='image-1', - extra_properties=extra_properties, tags=tags) - - self.assertEqual(UUID1, image.image_id, UUID1) - self.assertIsNotNone(image.created_at) - self.assertEqual(image.created_at, image.updated_at) - self.assertEqual('queued', image.status) - self.assertEqual('shared', image.visibility) - self.assertIsNone(image.owner) - self.assertEqual('image-1', image.name) - self.assertIsNone(image.size) - self.assertEqual(0, image.min_disk) - self.assertEqual(0, image.min_ram) - self.assertFalse(image.protected) - self.assertIsNone(image.disk_format) - self.assertIsNone(image.container_format) - self.assertEqual({'foo': 'bar'}, image.extra_properties) - self.assertEqual(set(['one', 'two']), image.tags) - - def test_new_image_read_only_property(self): - self.assertRaises(exception.ReadonlyProperty, - self.image_factory.new_image, image_id=UUID1, - name='image-1', size=256) - - def test_new_image_unexpected_property(self): - self.assertRaises(TypeError, - self.image_factory.new_image, image_id=UUID1, - image_name='name-1') - - def test_new_image_reserved_property(self): - extra_properties = {'deleted': True} - self.assertRaises(exception.ReservedProperty, - self.image_factory.new_image, image_id=UUID1, - extra_properties=extra_properties) - - def test_new_image_for_is_public(self): - extra_prop = {'is_public': True} - new_image = self.image_factory.new_image(image_id=UUID1, - extra_properties=extra_prop) - self.assertEqual(True, new_image.extra_properties['is_public']) - - -class TestImage(test_utils.BaseTestCase): - - def setUp(self): - super(TestImage, self).setUp() - self.image_factory = domain.ImageFactory() - self.image = self.image_factory.new_image( - container_format='bear', disk_format='rawr') - - def test_extra_properties(self): - self.image.extra_properties = {'foo': 'bar'} - self.assertEqual({'foo': 'bar'}, self.image.extra_properties) - - def test_extra_properties_assign(self): - self.image.extra_properties['foo'] = 'bar' - self.assertEqual({'foo': 'bar'}, self.image.extra_properties) - - def test_delete_extra_properties(self): - self.image.extra_properties = {'foo': 'bar'} - self.assertEqual({'foo': 'bar'}, self.image.extra_properties) - del self.image.extra_properties['foo'] - self.assertEqual({}, self.image.extra_properties) - - def test_visibility_enumerated(self): - self.image.visibility = 'public' - self.image.visibility = 'private' - self.image.visibility = 'shared' - self.image.visibility = 'community' - self.assertRaises(ValueError, setattr, - self.image, 'visibility', 'ellison') - - def test_tags_always_a_set(self): - self.image.tags = ['a', 'b', 'c'] - self.assertEqual(set(['a', 'b', 'c']), self.image.tags) - - def test_delete_protected_image(self): - self.image.protected = True - self.assertRaises(exception.ProtectedImageDelete, self.image.delete) - - def test_status_saving(self): - self.image.status = 'saving' - self.assertEqual('saving', self.image.status) - - def test_set_incorrect_status(self): - self.image.status = 'saving' - self.image.status = 'killed' - self.assertRaises( - exception.InvalidImageStatusTransition, - setattr, self.image, 'status', 'delet') - - def test_status_saving_without_disk_format(self): - self.image.disk_format = None - self.assertRaises(ValueError, setattr, - self.image, 'status', 'saving') - - def test_status_saving_without_container_format(self): - self.image.container_format = None - self.assertRaises(ValueError, setattr, - self.image, 'status', 'saving') - - def test_status_active_without_disk_format(self): - self.image.disk_format = None - self.assertRaises(ValueError, setattr, - self.image, 'status', 'active') - - def test_status_active_without_container_format(self): - self.image.container_format = None - self.assertRaises(ValueError, setattr, - self.image, 'status', 'active') - - def test_delayed_delete(self): - self.config(delayed_delete=True) - self.image.status = 'active' - self.image.locations = [{'url': 'http://foo.bar/not.exists', - 'metadata': {}}] - self.assertEqual('active', self.image.status) - self.image.delete() - self.assertEqual('pending_delete', self.image.status) - - -class TestImageMember(test_utils.BaseTestCase): - - def setUp(self): - super(TestImageMember, self).setUp() - self.image_member_factory = domain.ImageMemberFactory() - self.image_factory = domain.ImageFactory() - self.image = self.image_factory.new_image() - self.image_member = self.image_member_factory.new_image_member( - image=self.image, - member_id=TENANT1) - - def test_status_enumerated(self): - self.image_member.status = 'pending' - self.image_member.status = 'accepted' - self.image_member.status = 'rejected' - self.assertRaises(ValueError, setattr, - self.image_member, 'status', 'ellison') - - -class TestImageMemberFactory(test_utils.BaseTestCase): - - def setUp(self): - super(TestImageMemberFactory, self).setUp() - self.image_member_factory = domain.ImageMemberFactory() - self.image_factory = domain.ImageFactory() - - def test_minimal_new_image_member(self): - member_id = 'fake-member-id' - image = self.image_factory.new_image( - image_id=UUID1, name='image-1', min_disk=256, - owner=TENANT1) - image_member = self.image_member_factory.new_image_member(image, - member_id) - self.assertEqual(image_member.image_id, image.image_id) - self.assertIsNotNone(image_member.created_at) - self.assertEqual(image_member.created_at, image_member.updated_at) - self.assertEqual('pending', image_member.status) - self.assertIsNotNone(image_member.member_id) - - -class TestExtraProperties(test_utils.BaseTestCase): - - def test_getitem(self): - a_dict = {'foo': 'bar', 'snitch': 'golden'} - extra_properties = domain.ExtraProperties(a_dict) - self.assertEqual('bar', extra_properties['foo']) - self.assertEqual('golden', extra_properties['snitch']) - - def test_getitem_with_no_items(self): - extra_properties = domain.ExtraProperties() - self.assertRaises(KeyError, extra_properties.__getitem__, 'foo') - - def test_setitem(self): - a_dict = {'foo': 'bar', 'snitch': 'golden'} - extra_properties = domain.ExtraProperties(a_dict) - extra_properties['foo'] = 'baz' - self.assertEqual('baz', extra_properties['foo']) - - def test_delitem(self): - a_dict = {'foo': 'bar', 'snitch': 'golden'} - extra_properties = domain.ExtraProperties(a_dict) - del extra_properties['foo'] - self.assertRaises(KeyError, extra_properties.__getitem__, 'foo') - self.assertEqual('golden', extra_properties['snitch']) - - def test_len_with_zero_items(self): - extra_properties = domain.ExtraProperties() - self.assertEqual(0, len(extra_properties)) - - def test_len_with_non_zero_items(self): - extra_properties = domain.ExtraProperties() - extra_properties['foo'] = 'bar' - extra_properties['snitch'] = 'golden' - self.assertEqual(2, len(extra_properties)) - - def test_eq_with_a_dict(self): - a_dict = {'foo': 'bar', 'snitch': 'golden'} - extra_properties = domain.ExtraProperties(a_dict) - ref_extra_properties = {'foo': 'bar', 'snitch': 'golden'} - self.assertEqual(ref_extra_properties, extra_properties) - - def test_eq_with_an_object_of_ExtraProperties(self): - a_dict = {'foo': 'bar', 'snitch': 'golden'} - extra_properties = domain.ExtraProperties(a_dict) - ref_extra_properties = domain.ExtraProperties() - ref_extra_properties['snitch'] = 'golden' - ref_extra_properties['foo'] = 'bar' - self.assertEqual(ref_extra_properties, extra_properties) - - def test_eq_with_uneqal_dict(self): - a_dict = {'foo': 'bar', 'snitch': 'golden'} - extra_properties = domain.ExtraProperties(a_dict) - ref_extra_properties = {'boo': 'far', 'gnitch': 'solden'} - self.assertNotEqual(ref_extra_properties, extra_properties) - - def test_eq_with_unequal_ExtraProperties_object(self): - a_dict = {'foo': 'bar', 'snitch': 'golden'} - extra_properties = domain.ExtraProperties(a_dict) - ref_extra_properties = domain.ExtraProperties() - ref_extra_properties['gnitch'] = 'solden' - ref_extra_properties['boo'] = 'far' - self.assertNotEqual(ref_extra_properties, extra_properties) - - def test_eq_with_incompatible_object(self): - a_dict = {'foo': 'bar', 'snitch': 'golden'} - extra_properties = domain.ExtraProperties(a_dict) - random_list = ['foo', 'bar'] - self.assertNotEqual(random_list, extra_properties) - - -class TestTaskFactory(test_utils.BaseTestCase): - - def setUp(self): - super(TestTaskFactory, self).setUp() - self.task_factory = domain.TaskFactory() - - def test_new_task(self): - task_type = 'import' - owner = TENANT1 - task_input = 'input' - task = self.task_factory.new_task(task_type, owner, - task_input=task_input, - result='test_result', - message='test_message') - self.assertIsNotNone(task.task_id) - self.assertIsNotNone(task.created_at) - self.assertEqual(task_type, task.type) - self.assertEqual(task.created_at, task.updated_at) - self.assertEqual('pending', task.status) - self.assertIsNone(task.expires_at) - self.assertEqual(owner, task.owner) - self.assertEqual(task_input, task.task_input) - self.assertEqual('test_message', task.message) - self.assertEqual('test_result', task.result) - - def test_new_task_invalid_type(self): - task_type = 'blah' - owner = TENANT1 - self.assertRaises( - exception.InvalidTaskType, - self.task_factory.new_task, - task_type, - owner, - ) - - -class TestTask(test_utils.BaseTestCase): - - def setUp(self): - super(TestTask, self).setUp() - self.task_factory = domain.TaskFactory() - task_type = 'import' - owner = TENANT1 - task_ttl = CONF.task.task_time_to_live - self.task = self.task_factory.new_task(task_type, - owner, - task_time_to_live=task_ttl) - - def test_task_invalid_status(self): - task_id = str(uuid.uuid4()) - status = 'blah' - self.assertRaises( - exception.InvalidTaskStatus, - domain.Task, - task_id, - task_type='import', - status=status, - owner=None, - expires_at=None, - created_at=timeutils.utcnow(), - updated_at=timeutils.utcnow(), - task_input=None, - message=None, - result=None - ) - - def test_validate_status_transition_from_pending(self): - self.task.begin_processing() - self.assertEqual('processing', self.task.status) - - def test_validate_status_transition_from_processing_to_success(self): - self.task.begin_processing() - self.task.succeed('') - self.assertEqual('success', self.task.status) - - def test_validate_status_transition_from_processing_to_failure(self): - self.task.begin_processing() - self.task.fail('') - self.assertEqual('failure', self.task.status) - - def test_invalid_status_transitions_from_pending(self): - # test do not allow transition from pending to success - self.assertRaises( - exception.InvalidTaskStatusTransition, - self.task.succeed, - '' - ) - - def test_invalid_status_transitions_from_success(self): - # test do not allow transition from success to processing - self.task.begin_processing() - self.task.succeed('') - self.assertRaises( - exception.InvalidTaskStatusTransition, - self.task.begin_processing - ) - # test do not allow transition from success to failure - self.assertRaises( - exception.InvalidTaskStatusTransition, - self.task.fail, - '' - ) - - def test_invalid_status_transitions_from_failure(self): - # test do not allow transition from failure to processing - self.task.begin_processing() - self.task.fail('') - self.assertRaises( - exception.InvalidTaskStatusTransition, - self.task.begin_processing - ) - # test do not allow transition from failure to success - self.assertRaises( - exception.InvalidTaskStatusTransition, - self.task.succeed, - '' - ) - - def test_begin_processing(self): - self.task.begin_processing() - self.assertEqual('processing', self.task.status) - - @mock.patch.object(timeutils, 'utcnow') - def test_succeed(self, mock_utcnow): - mock_utcnow.return_value = datetime.datetime.utcnow() - self.task.begin_processing() - self.task.succeed('{"location": "file://home"}') - self.assertEqual('success', self.task.status) - self.assertEqual('{"location": "file://home"}', self.task.result) - self.assertEqual(u'', self.task.message) - expected = (timeutils.utcnow() + - datetime.timedelta(hours=CONF.task.task_time_to_live)) - self.assertEqual( - expected, - self.task.expires_at - ) - - @mock.patch.object(timeutils, 'utcnow') - def test_fail(self, mock_utcnow): - mock_utcnow.return_value = datetime.datetime.utcnow() - self.task.begin_processing() - self.task.fail('{"message": "connection failed"}') - self.assertEqual('failure', self.task.status) - self.assertEqual('{"message": "connection failed"}', self.task.message) - self.assertIsNone(self.task.result) - expected = (timeutils.utcnow() + - datetime.timedelta(hours=CONF.task.task_time_to_live)) - self.assertEqual( - expected, - self.task.expires_at - ) - - @mock.patch.object(glance.async.TaskExecutor, 'begin_processing') - def test_run(self, mock_begin_processing): - executor = glance.async.TaskExecutor(context=mock.ANY, - task_repo=mock.ANY, - image_repo=mock.ANY, - image_factory=mock.ANY) - self.task.run(executor) - - mock_begin_processing.assert_called_once_with(self.task.task_id) - - -class TestTaskStub(test_utils.BaseTestCase): - def setUp(self): - super(TestTaskStub, self).setUp() - self.task_id = str(uuid.uuid4()) - self.task_type = 'import' - self.owner = TENANT1 - self.task_ttl = CONF.task.task_time_to_live - - def test_task_stub_init(self): - self.task_factory = domain.TaskFactory() - task = domain.TaskStub( - self.task_id, - self.task_type, - 'status', - self.owner, - 'expires_at', - 'created_at', - 'updated_at' - ) - self.assertEqual(self.task_id, task.task_id) - self.assertEqual(self.task_type, task.type) - self.assertEqual(self.owner, task.owner) - self.assertEqual('status', task.status) - self.assertEqual('expires_at', task.expires_at) - self.assertEqual('created_at', task.created_at) - self.assertEqual('updated_at', task.updated_at) - - def test_task_stub_get_status(self): - status = 'pending' - task = domain.TaskStub( - self.task_id, - self.task_type, - status, - self.owner, - 'expires_at', - 'created_at', - 'updated_at' - ) - self.assertEqual(status, task.status) - - -class TestTaskExecutorFactory(test_utils.BaseTestCase): - def setUp(self): - super(TestTaskExecutorFactory, self).setUp() - self.task_repo = mock.Mock() - self.image_repo = mock.Mock() - self.image_factory = mock.Mock() - - def test_init(self): - task_executor_factory = domain.TaskExecutorFactory(self.task_repo, - self.image_repo, - self.image_factory) - self.assertEqual(self.task_repo, task_executor_factory.task_repo) - - def test_new_task_executor(self): - task_executor_factory = domain.TaskExecutorFactory(self.task_repo, - self.image_repo, - self.image_factory) - context = mock.Mock() - with mock.patch.object(oslo_utils.importutils, - 'import_class') as mock_import_class: - mock_executor = mock.Mock() - mock_import_class.return_value = mock_executor - task_executor_factory.new_task_executor(context) - - mock_executor.assert_called_once_with(context, - self.task_repo, - self.image_repo, - self.image_factory) - - def test_new_task_executor_error(self): - task_executor_factory = domain.TaskExecutorFactory(self.task_repo, - self.image_repo, - self.image_factory) - context = mock.Mock() - with mock.patch.object(oslo_utils.importutils, - 'import_class') as mock_import_class: - mock_import_class.side_effect = ImportError - - self.assertRaises(ImportError, - task_executor_factory.new_task_executor, - context) - - def test_new_task_eventlet_backwards_compatibility(self): - context = mock.MagicMock() - - self.config(task_executor='eventlet', group='task') - - task_executor_factory = domain.TaskExecutorFactory(self.task_repo, - self.image_repo, - self.image_factory) - - # NOTE(flaper87): "eventlet" executor. short name to avoid > 79. - te_evnt = task_executor_factory.new_task_executor(context) - self.assertIsInstance(te_evnt, taskflow_executor.TaskExecutor) diff --git a/glance/tests/unit/test_domain_proxy.py b/glance/tests/unit/test_domain_proxy.py deleted file mode 100644 index 6979cab7..00000000 --- a/glance/tests/unit/test_domain_proxy.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright 2013 OpenStack Foundation. -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.domain import proxy -import glance.tests.utils as test_utils - - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' - - -class FakeProxy(object): - def __init__(self, base, *args, **kwargs): - self.base = base - self.args = args - self.kwargs = kwargs - - -class FakeRepo(object): - def __init__(self, result=None): - self.args = None - self.kwargs = None - self.result = result - - def fake_method(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - return self.result - - get = fake_method - list = fake_method - add = fake_method - save = fake_method - remove = fake_method - - -class TestProxyRepoPlain(test_utils.BaseTestCase): - def setUp(self): - super(TestProxyRepoPlain, self).setUp() - self.fake_repo = FakeRepo() - self.proxy_repo = proxy.Repo(self.fake_repo) - - def _test_method(self, name, base_result, *args, **kwargs): - self.fake_repo.result = base_result - method = getattr(self.proxy_repo, name) - proxy_result = method(*args, **kwargs) - self.assertEqual(base_result, proxy_result) - self.assertEqual(args, self.fake_repo.args) - self.assertEqual(kwargs, self.fake_repo.kwargs) - - def test_get(self): - self._test_method('get', 'snarf', 'abcd') - - def test_list(self): - self._test_method('list', ['sniff', 'snarf'], 2, filter='^sn') - - def test_add(self): - self._test_method('add', 'snuff', 'enough') - - def test_save(self): - self._test_method('save', 'snuff', 'enough', from_state=None) - - def test_remove(self): - self._test_method('add', None, 'flying') - - -class TestProxyRepoWrapping(test_utils.BaseTestCase): - def setUp(self): - super(TestProxyRepoWrapping, self).setUp() - self.fake_repo = FakeRepo() - self.proxy_repo = proxy.Repo(self.fake_repo, - item_proxy_class=FakeProxy, - item_proxy_kwargs={'a': 1}) - - def _test_method(self, name, base_result, *args, **kwargs): - self.fake_repo.result = base_result - method = getattr(self.proxy_repo, name) - proxy_result = method(*args, **kwargs) - self.assertIsInstance(proxy_result, FakeProxy) - self.assertEqual(base_result, proxy_result.base) - self.assertEqual(0, len(proxy_result.args)) - self.assertEqual({'a': 1}, proxy_result.kwargs) - self.assertEqual(args, self.fake_repo.args) - self.assertEqual(kwargs, self.fake_repo.kwargs) - - def test_get(self): - self.fake_repo.result = 'snarf' - result = self.proxy_repo.get('some-id') - self.assertIsInstance(result, FakeProxy) - self.assertEqual(('some-id',), self.fake_repo.args) - self.assertEqual({}, self.fake_repo.kwargs) - self.assertEqual('snarf', result.base) - self.assertEqual(tuple(), result.args) - self.assertEqual({'a': 1}, result.kwargs) - - def test_list(self): - self.fake_repo.result = ['scratch', 'sniff'] - results = self.proxy_repo.list(2, prefix='s') - self.assertEqual((2,), self.fake_repo.args) - self.assertEqual({'prefix': 's'}, self.fake_repo.kwargs) - self.assertEqual(2, len(results)) - for i in range(2): - self.assertIsInstance(results[i], FakeProxy) - self.assertEqual(self.fake_repo.result[i], results[i].base) - self.assertEqual(tuple(), results[i].args) - self.assertEqual({'a': 1}, results[i].kwargs) - - def _test_method_with_proxied_argument(self, name, result, **kwargs): - self.fake_repo.result = result - item = FakeProxy('snoop') - method = getattr(self.proxy_repo, name) - proxy_result = method(item) - - self.assertEqual(('snoop',), self.fake_repo.args) - self.assertEqual(kwargs, self.fake_repo.kwargs) - - if result is None: - self.assertIsNone(proxy_result) - else: - self.assertIsInstance(proxy_result, FakeProxy) - self.assertEqual(result, proxy_result.base) - self.assertEqual(tuple(), proxy_result.args) - self.assertEqual({'a': 1}, proxy_result.kwargs) - - def test_add(self): - self._test_method_with_proxied_argument('add', 'dog') - - def test_add_with_no_result(self): - self._test_method_with_proxied_argument('add', None) - - def test_save(self): - self._test_method_with_proxied_argument('save', 'dog', - from_state=None) - - def test_save_with_no_result(self): - self._test_method_with_proxied_argument('save', None, - from_state=None) - - def test_remove(self): - self._test_method_with_proxied_argument('remove', 'dog') - - def test_remove_with_no_result(self): - self._test_method_with_proxied_argument('remove', None) - - -class FakeImageFactory(object): - def __init__(self, result=None): - self.result = None - self.kwargs = None - - def new_image(self, **kwargs): - self.kwargs = kwargs - return self.result - - -class TestImageFactory(test_utils.BaseTestCase): - def setUp(self): - super(TestImageFactory, self).setUp() - self.factory = FakeImageFactory() - - def test_proxy_plain(self): - proxy_factory = proxy.ImageFactory(self.factory) - self.factory.result = 'eddard' - image = proxy_factory.new_image(a=1, b='two') - self.assertEqual('eddard', image) - self.assertEqual({'a': 1, 'b': 'two'}, self.factory.kwargs) - - def test_proxy_wrapping(self): - proxy_factory = proxy.ImageFactory(self.factory, - proxy_class=FakeProxy, - proxy_kwargs={'dog': 'bark'}) - self.factory.result = 'stark' - image = proxy_factory.new_image(a=1, b='two') - self.assertIsInstance(image, FakeProxy) - self.assertEqual('stark', image.base) - self.assertEqual({'a': 1, 'b': 'two'}, self.factory.kwargs) - - -class FakeImageMembershipFactory(object): - def __init__(self, result=None): - self.result = None - self.image = None - self.member_id = None - - def new_image_member(self, image, member_id): - self.image = image - self.member_id = member_id - return self.result - - -class TestImageMembershipFactory(test_utils.BaseTestCase): - def setUp(self): - super(TestImageMembershipFactory, self).setUp() - self.factory = FakeImageMembershipFactory() - - def test_proxy_plain(self): - proxy_factory = proxy.ImageMembershipFactory(self.factory) - self.factory.result = 'tyrion' - membership = proxy_factory.new_image_member('jaime', 'cersei') - self.assertEqual('tyrion', membership) - self.assertEqual('jaime', self.factory.image) - self.assertEqual('cersei', self.factory.member_id) - - def test_proxy_wrapped_membership(self): - proxy_factory = proxy.ImageMembershipFactory( - self.factory, proxy_class=FakeProxy, proxy_kwargs={'a': 1}) - self.factory.result = 'tyrion' - membership = proxy_factory.new_image_member('jaime', 'cersei') - self.assertIsInstance(membership, FakeProxy) - self.assertEqual('tyrion', membership.base) - self.assertEqual({'a': 1}, membership.kwargs) - self.assertEqual('jaime', self.factory.image) - self.assertEqual('cersei', self.factory.member_id) - - def test_proxy_wrapped_image(self): - proxy_factory = proxy.ImageMembershipFactory( - self.factory, proxy_class=FakeProxy) - self.factory.result = 'tyrion' - image = FakeProxy('jaime') - membership = proxy_factory.new_image_member(image, 'cersei') - self.assertIsInstance(membership, FakeProxy) - self.assertIsInstance(self.factory.image, FakeProxy) - self.assertEqual('cersei', self.factory.member_id) - - def test_proxy_both_wrapped(self): - class FakeProxy2(FakeProxy): - pass - - proxy_factory = proxy.ImageMembershipFactory( - self.factory, - proxy_class=FakeProxy, - proxy_kwargs={'b': 2}) - - self.factory.result = 'tyrion' - image = FakeProxy2('jaime') - membership = proxy_factory.new_image_member(image, 'cersei') - self.assertIsInstance(membership, FakeProxy) - self.assertEqual('tyrion', membership.base) - self.assertEqual({'b': 2}, membership.kwargs) - self.assertIsInstance(self.factory.image, FakeProxy2) - self.assertEqual('cersei', self.factory.member_id) - - -class FakeImage(object): - def __init__(self, result=None): - self.result = result - - -class TestTaskFactory(test_utils.BaseTestCase): - def setUp(self): - super(TestTaskFactory, self).setUp() - self.factory = mock.Mock() - self.fake_type = 'import' - self.fake_owner = "owner" - - def test_proxy_plain(self): - proxy_factory = proxy.TaskFactory(self.factory) - - proxy_factory.new_task( - type=self.fake_type, - owner=self.fake_owner - ) - - self.factory.new_task.assert_called_once_with( - type=self.fake_type, - owner=self.fake_owner - ) - - def test_proxy_wrapping(self): - proxy_factory = proxy.TaskFactory( - self.factory, - task_proxy_class=FakeProxy, - task_proxy_kwargs={'dog': 'bark'}) - - self.factory.new_task.return_value = 'fake_task' - - task = proxy_factory.new_task( - type=self.fake_type, - owner=self.fake_owner - ) - - self.factory.new_task.assert_called_once_with( - type=self.fake_type, - owner=self.fake_owner - ) - self.assertIsInstance(task, FakeProxy) - self.assertEqual('fake_task', task.base) diff --git a/glance/tests/unit/test_glance_manage.py b/glance/tests/unit/test_glance_manage.py deleted file mode 100644 index 3facb84a..00000000 --- a/glance/tests/unit/test_glance_manage.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2016 OpenStack Foundation. -# Copyright 2016 NTT Data. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_db import exception as db_exception - -from glance.cmd import manage -from glance import context -from glance.db.sqlalchemy import api as db_api -import glance.tests.utils as test_utils - -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' - - -class DBCommandsTestCase(test_utils.BaseTestCase): - def setUp(self): - super(DBCommandsTestCase, self).setUp() - self.commands = manage.DbCommands() - self.context = context.RequestContext( - user=USER1, tenant=TENANT1) - - @mock.patch.object(db_api, 'purge_deleted_rows') - @mock.patch.object(context, 'get_admin_context') - def test_purge_command(self, mock_context, mock_db_purge): - mock_context.return_value = self.context - self.commands.purge(0, 100) - mock_db_purge.assert_called_once_with(self.context, 0, 100) - - def test_purge_command_negative_rows(self): - exit = self.assertRaises(SystemExit, self.commands.purge, 1, -1) - self.assertEqual("Minimal rows limit is 1.", exit.code) - - def test_purge_invalid_age_in_days(self): - age_in_days = 'abcd' - ex = self.assertRaises(SystemExit, self.commands.purge, age_in_days) - expected = ("Invalid int value for age_in_days: " - "%(age_in_days)s") % {'age_in_days': age_in_days} - self.assertEqual(expected, ex.code) - - def test_purge_negative_age_in_days(self): - ex = self.assertRaises(SystemExit, self.commands.purge, '-1') - self.assertEqual("Must supply a non-negative value for age.", ex.code) - - def test_purge_invalid_max_rows(self): - max_rows = 'abcd' - ex = self.assertRaises(SystemExit, self.commands.purge, 1, max_rows) - expected = ("Invalid int value for max_rows: " - "%(max_rows)s") % {'max_rows': max_rows} - self.assertEqual(expected, ex.code) - - @mock.patch.object(db_api, 'purge_deleted_rows') - @mock.patch.object(context, 'get_admin_context') - def test_purge_max_rows(self, mock_context, mock_db_purge): - mock_context.return_value = self.context - value = (2 ** 31) - 1 - self.commands.purge(age_in_days=1, max_rows=value) - mock_db_purge.assert_called_once_with(self.context, 1, value) - - def test_purge_command_exceeded_maximum_rows(self): - # value(2 ** 31) is greater than max_rows(2147483647) by 1. - value = 2 ** 31 - ex = self.assertRaises(SystemExit, self.commands.purge, age_in_days=1, - max_rows=value) - expected = "'max_rows' value out of range, must not exceed 2147483647." - self.assertEqual(expected, ex.code) - - @mock.patch('glance.db.sqlalchemy.api.purge_deleted_rows') - def test_purge_command_fk_constraint_failure(self, purge_deleted_rows): - purge_deleted_rows.side_effect = db_exception.DBReferenceError( - 'fake_table', 'fake_constraint', 'fake_key', 'fake_key_table') - exit = self.assertRaises(SystemExit, self.commands.purge, 10, 100) - self.assertEqual("Purge command failed, check glance-manage logs" - " for more details.", exit.code) diff --git a/glance/tests/unit/test_glance_replicator.py b/glance/tests/unit/test_glance_replicator.py deleted file mode 100644 index 3e39b5fb..00000000 --- a/glance/tests/unit/test_glance_replicator.py +++ /dev/null @@ -1,614 +0,0 @@ -# Copyright 2012 Michael Still and Canonical Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import os -import sys -import uuid - -import fixtures -import mock -from oslo_serialization import jsonutils -import six -from six import moves -from six.moves import http_client as http -import webob - -from glance.cmd import replicator as glance_replicator -from glance.common import exception -from glance.tests.unit import utils as unit_test_utils -from glance.tests import utils as test_utils - - -IMG_RESPONSE_ACTIVE = { - 'content-length': '0', - 'property-image_state': 'available', - 'min_ram': '0', - 'disk_format': 'aki', - 'updated_at': '2012-06-25T02:10:36', - 'date': 'Thu, 28 Jun 2012 07:20:05 GMT', - 'owner': '8aef75b5c0074a59aa99188fdb4b9e90', - 'id': '6d55dd55-053a-4765-b7bc-b30df0ea3861', - 'size': '4660272', - 'property-image_location': 'ubuntu-bucket/oneiric-server-cloudimg-amd64-' - 'vmlinuz-generic.manifest.xml', - 'property-architecture': 'x86_64', - 'etag': 'f46cfe7fb3acaff49a3567031b9b53bb', - 'location': 'http://127.0.0.1:9292/v1/images/' - '6d55dd55-053a-4765-b7bc-b30df0ea3861', - 'container_format': 'aki', - 'status': 'active', - 'deleted': 'False', - 'min_disk': '0', - 'is_public': 'False', - 'name': 'ubuntu-bucket/oneiric-server-cloudimg-amd64-vmlinuz-generic', - 'checksum': 'f46cfe7fb3acaff49a3567031b9b53bb', - 'created_at': '2012-06-25T02:10:32', - 'protected': 'False', - 'content-type': 'text/html; charset=UTF-8' -} - -IMG_RESPONSE_QUEUED = copy.copy(IMG_RESPONSE_ACTIVE) -IMG_RESPONSE_QUEUED['status'] = 'queued' -IMG_RESPONSE_QUEUED['id'] = '49b2c782-ee10-4692-84f8-3942e9432c4b' -IMG_RESPONSE_QUEUED['location'] = ('http://127.0.0.1:9292/v1/images/' - + IMG_RESPONSE_QUEUED['id']) - - -class FakeHTTPConnection(object): - def __init__(self): - self.count = 0 - self.reqs = {} - self.last_req = None - self.host = 'localhost' - self.port = 9292 - - def prime_request(self, method, url, in_body, in_headers, - out_code, out_body, out_headers): - if not url.startswith('/'): - url = '/' + url - url = unit_test_utils.sort_url_by_qs_keys(url) - hkeys = sorted(in_headers.keys()) - hashable = (method, url, in_body, ' '.join(hkeys)) - - flat_headers = [] - for key in out_headers: - flat_headers.append((key, out_headers[key])) - - self.reqs[hashable] = (out_code, out_body, flat_headers) - - def request(self, method, url, body, headers): - self.count += 1 - url = unit_test_utils.sort_url_by_qs_keys(url) - hkeys = sorted(headers.keys()) - hashable = (method, url, body, ' '.join(hkeys)) - - if hashable not in self.reqs: - options = [] - for h in self.reqs: - options.append(repr(h)) - - raise Exception('No such primed request: %s "%s"\n' - '%s\n\n' - 'Available:\n' - '%s' - % (method, url, hashable, '\n\n'.join(options))) - self.last_req = hashable - - def getresponse(self): - class FakeResponse(object): - def __init__(self, args): - (code, body, headers) = args - self.body = six.StringIO(body) - self.headers = headers - self.status = code - - def read(self, count=1000000): - return self.body.read(count) - - def getheaders(self): - return self.headers - - return FakeResponse(self.reqs[self.last_req]) - - -class ImageServiceTestCase(test_utils.BaseTestCase): - def test_rest_errors(self): - c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') - - for code, exc in [(http.BAD_REQUEST, webob.exc.HTTPBadRequest), - (http.UNAUTHORIZED, webob.exc.HTTPUnauthorized), - (http.FORBIDDEN, webob.exc.HTTPForbidden), - (http.CONFLICT, webob.exc.HTTPConflict), - (http.INTERNAL_SERVER_ERROR, - webob.exc.HTTPInternalServerError)]: - c.conn.prime_request('GET', - ('v1/images/' - '5dcddce0-cba5-4f18-9cf4-9853c7b207a6'), '', - {'x-auth-token': 'noauth'}, code, '', {}) - self.assertRaises(exc, c.get_image, - '5dcddce0-cba5-4f18-9cf4-9853c7b207a6') - - def test_rest_get_images(self): - c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') - - # Two images, one of which is queued - resp = {'images': [IMG_RESPONSE_ACTIVE, IMG_RESPONSE_QUEUED]} - c.conn.prime_request('GET', 'v1/images/detail?is_public=None', '', - {'x-auth-token': 'noauth'}, - http.OK, jsonutils.dumps(resp), {}) - c.conn.prime_request('GET', - ('v1/images/detail?marker=%s&is_public=None' - % IMG_RESPONSE_QUEUED['id']), - '', {'x-auth-token': 'noauth'}, - http.OK, jsonutils.dumps({'images': []}), {}) - - imgs = list(c.get_images()) - self.assertEqual(2, len(imgs)) - self.assertEqual(2, c.conn.count) - - def test_rest_get_image(self): - c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') - - image_contents = 'THISISTHEIMAGEBODY' - c.conn.prime_request('GET', - 'v1/images/%s' % IMG_RESPONSE_ACTIVE['id'], - '', {'x-auth-token': 'noauth'}, - http.OK, image_contents, IMG_RESPONSE_ACTIVE) - - body = c.get_image(IMG_RESPONSE_ACTIVE['id']) - self.assertEqual(image_contents, body.read()) - - def test_rest_header_list_to_dict(self): - i = [('x-image-meta-banana', 42), - ('gerkin', 12), - ('x-image-meta-property-frog', 11), - ('x-image-meta-property-duck', 12)] - o = glance_replicator.ImageService._header_list_to_dict(i) - self.assertIn('banana', o) - self.assertIn('gerkin', o) - self.assertIn('properties', o) - self.assertIn('frog', o['properties']) - self.assertIn('duck', o['properties']) - self.assertNotIn('x-image-meta-banana', o) - - def test_rest_get_image_meta(self): - c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') - - c.conn.prime_request('HEAD', - 'v1/images/%s' % IMG_RESPONSE_ACTIVE['id'], - '', {'x-auth-token': 'noauth'}, - http.OK, '', IMG_RESPONSE_ACTIVE) - - header = c.get_image_meta(IMG_RESPONSE_ACTIVE['id']) - self.assertIn('id', header) - - def test_rest_dict_to_headers(self): - i = {'banana': 42, - 'gerkin': 12, - 'properties': {'frog': 1, - 'kernel_id': None} - } - o = glance_replicator.ImageService._dict_to_headers(i) - self.assertIn('x-image-meta-banana', o) - self.assertIn('x-image-meta-gerkin', o) - self.assertIn('x-image-meta-property-frog', o) - self.assertIn('x-image-meta-property-kernel_id', o) - self.assertEqual(o['x-image-meta-property-kernel_id'], '') - self.assertNotIn('properties', o) - - def test_rest_add_image(self): - c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') - - image_body = 'THISISANIMAGEBODYFORSURE!' - image_meta_with_proto = { - 'x-auth-token': 'noauth', - 'Content-Type': 'application/octet-stream', - 'Content-Length': len(image_body) - } - - for key in IMG_RESPONSE_ACTIVE: - image_meta_with_proto[ - 'x-image-meta-%s' % key] = IMG_RESPONSE_ACTIVE[key] - - c.conn.prime_request('POST', 'v1/images', - image_body, image_meta_with_proto, - http.OK, '', IMG_RESPONSE_ACTIVE) - - headers, body = c.add_image(IMG_RESPONSE_ACTIVE, image_body) - self.assertEqual(IMG_RESPONSE_ACTIVE, headers) - self.assertEqual(1, c.conn.count) - - def test_rest_add_image_meta(self): - c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') - - image_meta = {'id': '5dcddce0-cba5-4f18-9cf4-9853c7b207a6'} - image_meta_headers = glance_replicator.ImageService._dict_to_headers( - image_meta) - image_meta_headers['x-auth-token'] = 'noauth' - image_meta_headers['Content-Type'] = 'application/octet-stream' - c.conn.prime_request('PUT', 'v1/images/%s' % image_meta['id'], - '', image_meta_headers, http.OK, '', '') - headers, body = c.add_image_meta(image_meta) - - -class FakeHttpResponse(object): - def __init__(self, headers, data): - self.headers = headers - self.data = six.BytesIO(data) - - def getheaders(self): - return self.headers - - def read(self, amt=None): - return self.data.read(amt) - - -FAKEIMAGES = [{'status': 'active', 'size': 100, 'dontrepl': 'banana', - 'id': '5dcddce0-cba5-4f18-9cf4-9853c7b207a6', 'name': 'x1'}, - {'status': 'deleted', 'size': 200, 'dontrepl': 'banana', - 'id': 'f4da1d2a-40e8-4710-b3aa-0222a4cc887b', 'name': 'x2'}, - {'status': 'active', 'size': 300, 'dontrepl': 'banana', - 'id': '37ff82db-afca-48c7-ae0b-ddc7cf83e3db', 'name': 'x3'}] -FAKEIMAGES_LIVEMASTER = [{'status': 'active', 'size': 100, - 'dontrepl': 'banana', 'name': 'x1', - 'id': '5dcddce0-cba5-4f18-9cf4-9853c7b207a6'}, - {'status': 'deleted', 'size': 200, - 'dontrepl': 'banana', 'name': 'x2', - 'id': 'f4da1d2a-40e8-4710-b3aa-0222a4cc887b'}, - {'status': 'deleted', 'size': 300, - 'dontrepl': 'banana', 'name': 'x3', - 'id': '37ff82db-afca-48c7-ae0b-ddc7cf83e3db'}, - {'status': 'active', 'size': 100, - 'dontrepl': 'banana', 'name': 'x4', - 'id': '15648dd7-8dd0-401c-bd51-550e1ba9a088'}] - - -class FakeImageService(object): - def __init__(self, http_conn, authtoken): - self.authtoken = authtoken - - def get_images(self): - if self.authtoken == 'livesourcetoken': - return FAKEIMAGES_LIVEMASTER - return FAKEIMAGES - - def get_image(self, id): - return FakeHttpResponse({}, b'data') - - def get_image_meta(self, id): - for img in FAKEIMAGES: - if img['id'] == id: - return img - return {} - - def add_image_meta(self, meta): - return {'status': http.OK}, None - - def add_image(self, meta, data): - return {'status': http.OK}, None - - -def get_image_service(): - return FakeImageService - - -def check_no_args(command, args): - options = moves.UserDict() - no_args_error = False - - orig_img_service = glance_replicator.get_image_service - try: - glance_replicator.get_image_service = get_image_service - command(options, args) - except TypeError as e: - if str(e) == "Too few arguments.": - no_args_error = True - finally: - glance_replicator.get_image_service = orig_img_service - - return no_args_error - - -def check_bad_args(command, args): - options = moves.UserDict() - bad_args_error = False - - orig_img_service = glance_replicator.get_image_service - try: - glance_replicator.get_image_service = get_image_service - command(options, args) - except ValueError: - bad_args_error = True - finally: - glance_replicator.get_image_service = orig_img_service - - return bad_args_error - - -class ReplicationCommandsTestCase(test_utils.BaseTestCase): - - @mock.patch.object(glance_replicator, 'lookup_command') - def test_help(self, mock_lookup_command): - option = mock.Mock() - mock_lookup_command.return_value = "fake_return" - - glance_replicator.print_help(option, []) - glance_replicator.print_help(option, ['dump']) - glance_replicator.print_help(option, ['fake_command']) - self.assertEqual(2, mock_lookup_command.call_count) - - def test_replication_size(self): - options = moves.UserDict() - options.targettoken = 'targettoken' - args = ['localhost:9292'] - - stdout = sys.stdout - orig_img_service = glance_replicator.get_image_service - sys.stdout = six.StringIO() - try: - glance_replicator.get_image_service = get_image_service - glance_replicator.replication_size(options, args) - sys.stdout.seek(0) - output = sys.stdout.read() - finally: - sys.stdout = stdout - glance_replicator.get_image_service = orig_img_service - - output = output.rstrip() - self.assertEqual( - 'Total size is 400 bytes (400.0 B) across 2 images', - output - ) - - def test_replication_size_with_no_args(self): - args = [] - command = glance_replicator.replication_size - self.assertTrue(check_no_args(command, args)) - - def test_replication_size_with_args_is_None(self): - args = None - command = glance_replicator.replication_size - self.assertTrue(check_no_args(command, args)) - - def test_replication_size_with_bad_args(self): - args = ['aaa'] - command = glance_replicator.replication_size - self.assertTrue(check_bad_args(command, args)) - - def test_human_readable_size(self): - _human_readable_size = glance_replicator._human_readable_size - - self.assertEqual('0.0 B', _human_readable_size(0)) - self.assertEqual('1.0 B', _human_readable_size(1)) - self.assertEqual('512.0 B', _human_readable_size(512)) - self.assertEqual('1.0 KiB', _human_readable_size(1024)) - self.assertEqual('2.0 KiB', _human_readable_size(2048)) - self.assertEqual('8.0 KiB', _human_readable_size(8192)) - self.assertEqual('64.0 KiB', _human_readable_size(65536)) - self.assertEqual('93.3 KiB', _human_readable_size(95536)) - self.assertEqual('117.7 MiB', _human_readable_size(123456789)) - self.assertEqual('36.3 GiB', _human_readable_size(39022543360)) - - def test_replication_dump(self): - tempdir = self.useFixture(fixtures.TempDir()).path - - options = moves.UserDict() - options.chunksize = 4096 - options.sourcetoken = 'sourcetoken' - options.metaonly = False - args = ['localhost:9292', tempdir] - - orig_img_service = glance_replicator.get_image_service - self.addCleanup(setattr, glance_replicator, - 'get_image_service', orig_img_service) - glance_replicator.get_image_service = get_image_service - glance_replicator.replication_dump(options, args) - - for active in ['5dcddce0-cba5-4f18-9cf4-9853c7b207a6', - '37ff82db-afca-48c7-ae0b-ddc7cf83e3db']: - imgfile = os.path.join(tempdir, active) - self.assertTrue(os.path.exists(imgfile)) - self.assertTrue(os.path.exists('%s.img' % imgfile)) - - with open(imgfile) as f: - d = jsonutils.loads(f.read()) - self.assertIn('status', d) - self.assertIn('id', d) - self.assertIn('size', d) - - for inactive in ['f4da1d2a-40e8-4710-b3aa-0222a4cc887b']: - imgfile = os.path.join(tempdir, inactive) - self.assertTrue(os.path.exists(imgfile)) - self.assertFalse(os.path.exists('%s.img' % imgfile)) - - with open(imgfile) as f: - d = jsonutils.loads(f.read()) - self.assertIn('status', d) - self.assertIn('id', d) - self.assertIn('size', d) - - def test_replication_dump_with_no_args(self): - args = [] - command = glance_replicator.replication_dump - self.assertTrue(check_no_args(command, args)) - - def test_replication_dump_with_bad_args(self): - args = ['aaa', 'bbb'] - command = glance_replicator.replication_dump - self.assertTrue(check_bad_args(command, args)) - - def test_replication_load(self): - tempdir = self.useFixture(fixtures.TempDir()).path - - def write_image(img, data): - imgfile = os.path.join(tempdir, img['id']) - with open(imgfile, 'w') as f: - f.write(jsonutils.dumps(img)) - - if data: - with open('%s.img' % imgfile, 'w') as f: - f.write(data) - - for img in FAKEIMAGES: - cimg = copy.copy(img) - # We need at least one image where the stashed metadata on disk - # is newer than what the fake has - if cimg['id'] == '5dcddce0-cba5-4f18-9cf4-9853c7b207a6': - cimg['extra'] = 'thisissomeextra' - - # This is an image where the metadata change should be ignored - if cimg['id'] == 'f4da1d2a-40e8-4710-b3aa-0222a4cc887b': - cimg['dontrepl'] = 'thisisyetmoreextra' - - write_image(cimg, 'kjdhfkjshdfkjhsdkfd') - - # And an image which isn't on the destination at all - new_id = str(uuid.uuid4()) - cimg['id'] = new_id - write_image(cimg, 'dskjfhskjhfkfdhksjdhf') - - # And an image which isn't on the destination, but lacks image - # data - new_id_missing_data = str(uuid.uuid4()) - cimg['id'] = new_id_missing_data - write_image(cimg, None) - - # A file which should be ignored - badfile = os.path.join(tempdir, 'kjdfhf') - with open(badfile, 'w') as f: - f.write(jsonutils.dumps([1, 2, 3, 4, 5])) - - # Finally, we're ready to test - options = moves.UserDict() - options.dontreplicate = 'dontrepl dontreplabsent' - options.targettoken = 'targettoken' - args = ['localhost:9292', tempdir] - - orig_img_service = glance_replicator.get_image_service - try: - glance_replicator.get_image_service = get_image_service - updated = glance_replicator.replication_load(options, args) - finally: - glance_replicator.get_image_service = orig_img_service - - self.assertIn('5dcddce0-cba5-4f18-9cf4-9853c7b207a6', updated) - self.assertNotIn('f4da1d2a-40e8-4710-b3aa-0222a4cc887b', updated) - self.assertIn(new_id, updated) - self.assertNotIn(new_id_missing_data, updated) - - def test_replication_load_with_no_args(self): - args = [] - command = glance_replicator.replication_load - self.assertTrue(check_no_args(command, args)) - - def test_replication_load_with_bad_args(self): - args = ['aaa', 'bbb'] - command = glance_replicator.replication_load - self.assertTrue(check_bad_args(command, args)) - - def test_replication_livecopy(self): - options = moves.UserDict() - options.chunksize = 4096 - options.dontreplicate = 'dontrepl dontreplabsent' - options.sourcetoken = 'livesourcetoken' - options.targettoken = 'livetargettoken' - options.metaonly = False - args = ['localhost:9292', 'localhost:9393'] - - orig_img_service = glance_replicator.get_image_service - try: - glance_replicator.get_image_service = get_image_service - updated = glance_replicator.replication_livecopy(options, args) - finally: - glance_replicator.get_image_service = orig_img_service - - self.assertEqual(2, len(updated)) - - def test_replication_livecopy_with_no_args(self): - args = [] - command = glance_replicator.replication_livecopy - self.assertTrue(check_no_args(command, args)) - - def test_replication_livecopy_with_bad_args(self): - args = ['aaa', 'bbb'] - command = glance_replicator.replication_livecopy - self.assertTrue(check_bad_args(command, args)) - - def test_replication_compare(self): - options = moves.UserDict() - options.chunksize = 4096 - options.dontreplicate = 'dontrepl dontreplabsent' - options.sourcetoken = 'livesourcetoken' - options.targettoken = 'livetargettoken' - options.metaonly = False - args = ['localhost:9292', 'localhost:9393'] - - orig_img_service = glance_replicator.get_image_service - try: - glance_replicator.get_image_service = get_image_service - differences = glance_replicator.replication_compare(options, args) - finally: - glance_replicator.get_image_service = orig_img_service - - self.assertIn('15648dd7-8dd0-401c-bd51-550e1ba9a088', differences) - self.assertEqual(differences['15648dd7-8dd0-401c-bd51-550e1ba9a088'], - 'missing') - self.assertIn('37ff82db-afca-48c7-ae0b-ddc7cf83e3db', differences) - self.assertEqual(differences['37ff82db-afca-48c7-ae0b-ddc7cf83e3db'], - 'diff') - - def test_replication_compare_with_no_args(self): - args = [] - command = glance_replicator.replication_compare - self.assertTrue(check_no_args(command, args)) - - def test_replication_compare_with_bad_args(self): - args = ['aaa', 'bbb'] - command = glance_replicator.replication_compare - self.assertTrue(check_bad_args(command, args)) - - -class ReplicationUtilitiesTestCase(test_utils.BaseTestCase): - def test_check_upload_response_headers(self): - glance_replicator._check_upload_response_headers({'status': 'active'}, - None) - - d = {'image': {'status': 'active'}} - glance_replicator._check_upload_response_headers({}, - jsonutils.dumps(d)) - - self.assertRaises( - exception.UploadException, - glance_replicator._check_upload_response_headers, {}, None) - - def test_image_present(self): - client = FakeImageService(None, 'noauth') - self.assertTrue(glance_replicator._image_present( - client, '5dcddce0-cba5-4f18-9cf4-9853c7b207a6')) - self.assertFalse(glance_replicator._image_present( - client, uuid.uuid4())) - - def test_dict_diff(self): - a = {'a': 1, 'b': 2, 'c': 3} - b = {'a': 1, 'b': 2} - c = {'a': 1, 'b': 1, 'c': 3} - d = {'a': 1, 'b': 2, 'c': 3, 'd': 4} - - # Only things that the first dict has which the second dict doesn't - # matter here. - self.assertFalse(glance_replicator._dict_diff(a, a)) - self.assertTrue(glance_replicator._dict_diff(a, b)) - self.assertTrue(glance_replicator._dict_diff(a, c)) - self.assertFalse(glance_replicator._dict_diff(a, d)) diff --git a/glance/tests/unit/test_image_cache.py b/glance/tests/unit/test_image_cache.py deleted file mode 100644 index 89d86f5f..00000000 --- a/glance/tests/unit/test_image_cache.py +++ /dev/null @@ -1,563 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from contextlib import contextmanager -import datetime -import hashlib -import os -import time - -import fixtures -from oslo_utils import units -from oslotest import moxstubout -import six -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.common import exception -from glance import image_cache -# NOTE(bcwaldon): This is imported to load the registry config options -import glance.registry # noqa -from glance.tests import utils as test_utils -from glance.tests.utils import skip_if_disabled -from glance.tests.utils import xattr_writes_supported - -FIXTURE_LENGTH = 1024 -FIXTURE_DATA = b'*' * FIXTURE_LENGTH - - -class ImageCacheTestCase(object): - - def _setup_fixture_file(self): - FIXTURE_FILE = six.BytesIO(FIXTURE_DATA) - - self.assertFalse(self.cache.is_cached(1)) - - self.assertTrue(self.cache.cache_image_file(1, FIXTURE_FILE)) - - self.assertTrue(self.cache.is_cached(1)) - - @skip_if_disabled - def test_is_cached(self): - """Verify is_cached(1) returns 0, then add something to the cache - and verify is_cached(1) returns 1. - """ - self._setup_fixture_file() - - @skip_if_disabled - def test_read(self): - """Verify is_cached(1) returns 0, then add something to the cache - and verify after a subsequent read from the cache that - is_cached(1) returns 1. - """ - self._setup_fixture_file() - - buff = six.BytesIO() - with self.cache.open_for_read(1) as cache_file: - for chunk in cache_file: - buff.write(chunk) - - self.assertEqual(FIXTURE_DATA, buff.getvalue()) - - @skip_if_disabled - def test_open_for_read(self): - """Test convenience wrapper for opening a cache file via - its image identifier. - """ - self._setup_fixture_file() - - buff = six.BytesIO() - with self.cache.open_for_read(1) as cache_file: - for chunk in cache_file: - buff.write(chunk) - - self.assertEqual(FIXTURE_DATA, buff.getvalue()) - - @skip_if_disabled - def test_get_image_size(self): - """Test convenience wrapper for querying cache file size via - its image identifier. - """ - self._setup_fixture_file() - - size = self.cache.get_image_size(1) - - self.assertEqual(FIXTURE_LENGTH, size) - - @skip_if_disabled - def test_delete(self): - """Test delete method that removes an image from the cache.""" - self._setup_fixture_file() - - self.cache.delete_cached_image(1) - - self.assertFalse(self.cache.is_cached(1)) - - @skip_if_disabled - def test_delete_all(self): - """Test delete method that removes an image from the cache.""" - for image_id in (1, 2): - self.assertFalse(self.cache.is_cached(image_id)) - - for image_id in (1, 2): - FIXTURE_FILE = six.BytesIO(FIXTURE_DATA) - self.assertTrue(self.cache.cache_image_file(image_id, - FIXTURE_FILE)) - - for image_id in (1, 2): - self.assertTrue(self.cache.is_cached(image_id)) - - self.cache.delete_all_cached_images() - - for image_id in (1, 2): - self.assertFalse(self.cache.is_cached(image_id)) - - @skip_if_disabled - def test_clean_stalled(self): - """Test the clean method removes expected images.""" - incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', '1') - incomplete_file = open(incomplete_file_path, 'wb') - incomplete_file.write(FIXTURE_DATA) - incomplete_file.close() - - self.assertTrue(os.path.exists(incomplete_file_path)) - - self.cache.clean(stall_time=0) - - self.assertFalse(os.path.exists(incomplete_file_path)) - - @skip_if_disabled - def test_clean_stalled_nonzero_stall_time(self): - """ - Test the clean method removes the stalled images as expected - """ - incomplete_file_path_1 = os.path.join(self.cache_dir, - 'incomplete', '1') - incomplete_file_path_2 = os.path.join(self.cache_dir, - 'incomplete', '2') - for f in (incomplete_file_path_1, incomplete_file_path_2): - incomplete_file = open(f, 'wb') - incomplete_file.write(FIXTURE_DATA) - incomplete_file.close() - - mtime = os.path.getmtime(incomplete_file_path_1) - pastday = (datetime.datetime.fromtimestamp(mtime) - - datetime.timedelta(days=1)) - atime = int(time.mktime(pastday.timetuple())) - mtime = atime - os.utime(incomplete_file_path_1, (atime, mtime)) - - self.assertTrue(os.path.exists(incomplete_file_path_1)) - self.assertTrue(os.path.exists(incomplete_file_path_2)) - - self.cache.clean(stall_time=3600) - - self.assertFalse(os.path.exists(incomplete_file_path_1)) - self.assertTrue(os.path.exists(incomplete_file_path_2)) - - @skip_if_disabled - def test_prune(self): - """ - Test that pruning the cache works as expected... - """ - self.assertEqual(0, self.cache.get_cache_size()) - - # Add a bunch of images to the cache. The max cache size for the cache - # is set to 5KB and each image is 1K. We use 11 images in this test. - # The first 10 are added to and retrieved from cache in the same order. - # Then, the 11th image is added to cache but not retrieved before we - # prune. We should see only 5 images left after pruning, and the - # images that are least recently accessed should be the ones pruned... - for x in range(10): - FIXTURE_FILE = six.BytesIO(FIXTURE_DATA) - self.assertTrue(self.cache.cache_image_file(x, FIXTURE_FILE)) - - self.assertEqual(10 * units.Ki, self.cache.get_cache_size()) - - # OK, hit the images that are now cached... - for x in range(10): - buff = six.BytesIO() - with self.cache.open_for_read(x) as cache_file: - for chunk in cache_file: - buff.write(chunk) - - # Add a new image to cache. - # This is specifically to test the bug: 1438564 - FIXTURE_FILE = six.BytesIO(FIXTURE_DATA) - self.assertTrue(self.cache.cache_image_file(99, FIXTURE_FILE)) - - self.cache.prune() - - self.assertEqual(5 * units.Ki, self.cache.get_cache_size()) - - # Ensure images 0, 1, 2, 3, 4 & 5 are not cached anymore - for x in range(0, 6): - self.assertFalse(self.cache.is_cached(x), - "Image %s was cached!" % x) - - # Ensure images 6, 7, 8 and 9 are still cached - for x in range(6, 10): - self.assertTrue(self.cache.is_cached(x), - "Image %s was not cached!" % x) - - # Ensure the newly added image, 99, is still cached - self.assertTrue(self.cache.is_cached(99), "Image 99 was not cached!") - - @skip_if_disabled - def test_prune_to_zero(self): - """Test that an image_cache_max_size of 0 doesn't kill the pruner - - This is a test specifically for LP #1039854 - """ - self.assertEqual(0, self.cache.get_cache_size()) - - FIXTURE_FILE = six.BytesIO(FIXTURE_DATA) - self.assertTrue(self.cache.cache_image_file('xxx', FIXTURE_FILE)) - - self.assertEqual(1024, self.cache.get_cache_size()) - - # OK, hit the image that is now cached... - buff = six.BytesIO() - with self.cache.open_for_read('xxx') as cache_file: - for chunk in cache_file: - buff.write(chunk) - - self.config(image_cache_max_size=0) - self.cache.prune() - - self.assertEqual(0, self.cache.get_cache_size()) - self.assertFalse(self.cache.is_cached('xxx')) - - @skip_if_disabled - def test_queue(self): - """ - Test that queueing works properly - """ - - self.assertFalse(self.cache.is_cached(1)) - self.assertFalse(self.cache.is_queued(1)) - - FIXTURE_FILE = six.BytesIO(FIXTURE_DATA) - - self.assertTrue(self.cache.queue_image(1)) - - self.assertTrue(self.cache.is_queued(1)) - self.assertFalse(self.cache.is_cached(1)) - - # Should not return True if the image is already - # queued for caching... - self.assertFalse(self.cache.queue_image(1)) - - self.assertFalse(self.cache.is_cached(1)) - - # Test that we return False if we try to queue - # an image that has already been cached - - self.assertTrue(self.cache.cache_image_file(1, FIXTURE_FILE)) - - self.assertFalse(self.cache.is_queued(1)) - self.assertTrue(self.cache.is_cached(1)) - - self.assertFalse(self.cache.queue_image(1)) - - self.cache.delete_cached_image(1) - - for x in range(3): - self.assertTrue(self.cache.queue_image(x)) - - self.assertEqual(['0', '1', '2'], - self.cache.get_queued_images()) - - def test_open_for_write_good(self): - """ - Test to see if open_for_write works in normal case - """ - - # test a good case - image_id = '1' - self.assertFalse(self.cache.is_cached(image_id)) - with self.cache.driver.open_for_write(image_id) as cache_file: - cache_file.write(b'a') - self.assertTrue(self.cache.is_cached(image_id), - "Image %s was NOT cached!" % image_id) - # make sure it has tidied up - incomplete_file_path = os.path.join(self.cache_dir, - 'incomplete', image_id) - invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id) - self.assertFalse(os.path.exists(incomplete_file_path)) - self.assertFalse(os.path.exists(invalid_file_path)) - - def test_open_for_write_with_exception(self): - """ - Test to see if open_for_write works in a failure case for each driver - This case is where an exception is raised while the file is being - written. The image is partially filled in cache and filling wont resume - so verify the image is moved to invalid/ directory - """ - # test a case where an exception is raised while the file is open - image_id = '1' - self.assertFalse(self.cache.is_cached(image_id)) - try: - with self.cache.driver.open_for_write(image_id): - raise IOError - except Exception as e: - self.assertIsInstance(e, IOError) - self.assertFalse(self.cache.is_cached(image_id), - "Image %s was cached!" % image_id) - # make sure it has tidied up - incomplete_file_path = os.path.join(self.cache_dir, - 'incomplete', image_id) - invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id) - self.assertFalse(os.path.exists(incomplete_file_path)) - self.assertTrue(os.path.exists(invalid_file_path)) - - def test_caching_iterator(self): - """ - Test to see if the caching iterator interacts properly with the driver - When the iterator completes going through the data the driver should - have closed the image and placed it correctly - """ - # test a case where an exception NOT raised while the file is open, - # and a consuming iterator completes - def consume(image_id): - data = [b'a', b'b', b'c', b'd', b'e', b'f'] - checksum = None - caching_iter = self.cache.get_caching_iter(image_id, checksum, - iter(data)) - self.assertEqual(data, list(caching_iter)) - - image_id = '1' - self.assertFalse(self.cache.is_cached(image_id)) - consume(image_id) - self.assertTrue(self.cache.is_cached(image_id), - "Image %s was NOT cached!" % image_id) - # make sure it has tidied up - incomplete_file_path = os.path.join(self.cache_dir, - 'incomplete', image_id) - invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id) - self.assertFalse(os.path.exists(incomplete_file_path)) - self.assertFalse(os.path.exists(invalid_file_path)) - - def test_caching_iterator_handles_backend_failure(self): - """ - Test that when the backend fails, caching_iter does not continue trying - to consume data, and rolls back the cache. - """ - def faulty_backend(): - data = [b'a', b'b', b'c', b'Fail', b'd', b'e', b'f'] - for d in data: - if d == b'Fail': - raise exception.GlanceException('Backend failure') - yield d - - def consume(image_id): - caching_iter = self.cache.get_caching_iter(image_id, None, - faulty_backend()) - # exercise the caching_iter - list(caching_iter) - - image_id = '1' - self.assertRaises(exception.GlanceException, consume, image_id) - # make sure bad image was not cached - self.assertFalse(self.cache.is_cached(image_id)) - - def test_caching_iterator_falloffend(self): - """ - Test to see if the caching iterator interacts properly with the driver - in a case where the iterator is only partially consumed. In this case - the image is only partially filled in cache and filling wont resume. - When the iterator goes out of scope the driver should have closed the - image and moved it from incomplete/ to invalid/ - """ - # test a case where a consuming iterator just stops. - def falloffend(image_id): - data = [b'a', b'b', b'c', b'd', b'e', b'f'] - checksum = None - caching_iter = self.cache.get_caching_iter(image_id, checksum, - iter(data)) - self.assertEqual(b'a', next(caching_iter)) - - image_id = '1' - self.assertFalse(self.cache.is_cached(image_id)) - falloffend(image_id) - self.assertFalse(self.cache.is_cached(image_id), - "Image %s was cached!" % image_id) - # make sure it has tidied up - incomplete_file_path = os.path.join(self.cache_dir, - 'incomplete', image_id) - invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id) - self.assertFalse(os.path.exists(incomplete_file_path)) - self.assertTrue(os.path.exists(invalid_file_path)) - - def test_gate_caching_iter_good_checksum(self): - image = b"12345678990abcdefghijklmnop" - image_id = 123 - - md5 = hashlib.md5() - md5.update(image) - checksum = md5.hexdigest() - - cache = image_cache.ImageCache() - img_iter = cache.get_caching_iter(image_id, checksum, [image]) - for chunk in img_iter: - pass - # checksum is valid, fake image should be cached: - self.assertTrue(cache.is_cached(image_id)) - - def test_gate_caching_iter_bad_checksum(self): - image = b"12345678990abcdefghijklmnop" - image_id = 123 - checksum = "foobar" # bad. - - cache = image_cache.ImageCache() - img_iter = cache.get_caching_iter(image_id, checksum, [image]) - - def reader(): - for chunk in img_iter: - pass - self.assertRaises(exception.GlanceException, reader) - # checksum is invalid, caching will fail: - self.assertFalse(cache.is_cached(image_id)) - - -class TestImageCacheXattr(test_utils.BaseTestCase, - ImageCacheTestCase): - - """Tests image caching when xattr is used in cache""" - - def setUp(self): - """ - Test to see if the pre-requisites for the image cache - are working (python-xattr installed and xattr support on the - filesystem) - """ - super(TestImageCacheXattr, self).setUp() - - if getattr(self, 'disable', False): - return - - self.cache_dir = self.useFixture(fixtures.TempDir()).path - - if not getattr(self, 'inited', False): - try: - import xattr # noqa - except ImportError: - self.inited = True - self.disabled = True - self.disabled_message = ("python-xattr not installed.") - return - - self.inited = True - self.disabled = False - self.config(image_cache_dir=self.cache_dir, - image_cache_driver='xattr', - image_cache_max_size=5 * units.Ki) - self.cache = image_cache.ImageCache() - - if not xattr_writes_supported(self.cache_dir): - self.inited = True - self.disabled = True - self.disabled_message = ("filesystem does not support xattr") - return - - -class TestImageCacheSqlite(test_utils.BaseTestCase, - ImageCacheTestCase): - - """Tests image caching when SQLite is used in cache""" - - def setUp(self): - """ - Test to see if the pre-requisites for the image cache - are working (python-sqlite3 installed) - """ - super(TestImageCacheSqlite, self).setUp() - - if getattr(self, 'disable', False): - return - - if not getattr(self, 'inited', False): - try: - import sqlite3 # noqa - except ImportError: - self.inited = True - self.disabled = True - self.disabled_message = ("python-sqlite3 not installed.") - return - - self.inited = True - self.disabled = False - self.cache_dir = self.useFixture(fixtures.TempDir()).path - self.config(image_cache_dir=self.cache_dir, - image_cache_driver='sqlite', - image_cache_max_size=5 * units.Ki) - self.cache = image_cache.ImageCache() - - -class TestImageCacheNoDep(test_utils.BaseTestCase): - - def setUp(self): - super(TestImageCacheNoDep, self).setUp() - - self.driver = None - - def init_driver(self2): - self2.driver = self.driver - - mox_fixture = self.useFixture(moxstubout.MoxStubout()) - self.stubs = mox_fixture.stubs - self.stubs.Set(image_cache.ImageCache, 'init_driver', init_driver) - - def test_get_caching_iter_when_write_fails(self): - - class FailingFile(object): - - def write(self, data): - if data == "Fail": - raise IOError - - class FailingFileDriver(object): - - def is_cacheable(self, *args, **kwargs): - return True - - @contextmanager - def open_for_write(self, *args, **kwargs): - yield FailingFile() - - self.driver = FailingFileDriver() - cache = image_cache.ImageCache() - data = [b'a', b'b', b'c', b'Fail', b'd', b'e', b'f'] - - caching_iter = cache.get_caching_iter('dummy_id', None, iter(data)) - self.assertEqual(data, list(caching_iter)) - - def test_get_caching_iter_when_open_fails(self): - - class OpenFailingDriver(object): - - def is_cacheable(self, *args, **kwargs): - return True - - @contextmanager - def open_for_write(self, *args, **kwargs): - raise IOError - - self.driver = OpenFailingDriver() - cache = image_cache.ImageCache() - data = [b'a', b'b', b'c', b'd', b'e', b'f'] - - caching_iter = cache.get_caching_iter('dummy_id', None, iter(data)) - self.assertEqual(data, list(caching_iter)) diff --git a/glance/tests/unit/test_image_cache_client.py b/glance/tests/unit/test_image_cache_client.py deleted file mode 100644 index c3ea9db3..00000000 --- a/glance/tests/unit/test_image_cache_client.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -import mock - -from glance.common import exception -from glance.image_cache import client -from glance.tests import utils - - -class CacheClientTestCase(utils.BaseTestCase): - def setUp(self): - super(CacheClientTestCase, self).setUp() - self.client = client.CacheClient('test_host') - self.client.do_request = mock.Mock() - - def test_delete_cached_image(self): - self.client.do_request.return_value = utils.FakeHTTPResponse() - self.assertTrue(self.client.delete_cached_image('test_id')) - self.client.do_request.assert_called_with("DELETE", - "/cached_images/test_id") - - def test_get_cached_images(self): - expected_data = b'{"cached_images": "some_images"}' - self.client.do_request.return_value = utils.FakeHTTPResponse( - data=expected_data) - self.assertEqual("some_images", self.client.get_cached_images()) - self.client.do_request.assert_called_with("GET", "/cached_images") - - def test_get_queued_images(self): - expected_data = b'{"queued_images": "some_images"}' - self.client.do_request.return_value = utils.FakeHTTPResponse( - data=expected_data) - self.assertEqual("some_images", self.client.get_queued_images()) - self.client.do_request.assert_called_with("GET", "/queued_images") - - def test_delete_all_cached_images(self): - expected_data = b'{"num_deleted": 4}' - self.client.do_request.return_value = utils.FakeHTTPResponse( - data=expected_data) - self.assertEqual(4, self.client.delete_all_cached_images()) - self.client.do_request.assert_called_with("DELETE", "/cached_images") - - def test_queue_image_for_caching(self): - self.client.do_request.return_value = utils.FakeHTTPResponse() - self.assertTrue(self.client.queue_image_for_caching('test_id')) - self.client.do_request.assert_called_with("PUT", - "/queued_images/test_id") - - def test_delete_queued_image(self): - self.client.do_request.return_value = utils.FakeHTTPResponse() - self.assertTrue(self.client.delete_queued_image('test_id')) - self.client.do_request.assert_called_with("DELETE", - "/queued_images/test_id") - - def test_delete_all_queued_images(self): - expected_data = b'{"num_deleted": 4}' - self.client.do_request.return_value = utils.FakeHTTPResponse( - data=expected_data) - self.assertEqual(4, self.client.delete_all_queued_images()) - self.client.do_request.assert_called_with("DELETE", "/queued_images") - - -class GetClientTestCase(utils.BaseTestCase): - def setUp(self): - super(GetClientTestCase, self).setUp() - self.host = 'test_host' - self.env = os.environ.copy() - os.environ.clear() - - def tearDown(self): - os.environ = self.env - super(GetClientTestCase, self).tearDown() - - def test_get_client_host_only(self): - expected_creds = { - 'username': None, - 'password': None, - 'tenant': None, - 'auth_url': None, - 'strategy': 'noauth', - 'region': None - } - self.assertEqual(expected_creds, client.get_client(self.host).creds) - - def test_get_client_all_creds(self): - expected_creds = { - 'username': 'name', - 'password': 'pass', - 'tenant': 'ten', - 'auth_url': 'url', - 'strategy': 'keystone', - 'region': 'reg' - } - creds = client.get_client( - self.host, - username='name', - password='pass', - tenant='ten', - auth_url='url', - auth_strategy='strategy', - region='reg' - ).creds - self.assertEqual(expected_creds, creds) - - def test_get_client_using_provided_host(self): - cli = client.get_client(self.host) - cli._do_request = mock.MagicMock() - cli.configure_from_url = mock.MagicMock() - cli.auth_plugin.management_url = mock.MagicMock() - cli.do_request("GET", "/queued_images") - self.assertFalse(cli.configure_from_url.called) - self.assertFalse(client.get_client(self.host).configure_via_auth) - - def test_get_client_client_configuration_error(self): - self.assertRaises(exception.ClientConfigurationError, - client.get_client, self.host, username='name', - password='pass', tenant='ten', - auth_strategy='keystone', region='reg') diff --git a/glance/tests/unit/test_manage.py b/glance/tests/unit/test_manage.py deleted file mode 100644 index 80b0c670..00000000 --- a/glance/tests/unit/test_manage.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright 2014 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import mock - -from glance.cmd import manage -from glance.db.sqlalchemy import api as db_api -from glance.db.sqlalchemy import metadata as db_metadata -from glance.tests import utils as test_utils - - -class TestManageBase(test_utils.BaseTestCase): - - def setUp(self): - super(TestManageBase, self).setUp() - - def clear_conf(): - manage.CONF.reset() - manage.CONF.unregister_opt(manage.command_opt) - clear_conf() - self.addCleanup(clear_conf) - - self.useFixture(fixtures.MonkeyPatch( - 'oslo_log.log.setup', lambda product_name, version='test': None)) - - patcher = mock.patch('glance.db.sqlalchemy.api.get_engine') - patcher.start() - self.addCleanup(patcher.stop) - - def _main_test_helper(self, argv, func_name=None, *exp_args, **exp_kwargs): - self.useFixture(fixtures.MonkeyPatch('sys.argv', argv)) - manage.main() - func_name.assert_called_once_with(*exp_args, **exp_kwargs) - - -class TestLegacyManage(TestManageBase): - - @mock.patch.object(manage.DbCommands, 'version') - def test_legacy_db_version(self, db_upgrade): - self._main_test_helper(['glance.cmd.manage', 'db_version'], - manage.DbCommands.version) - - @mock.patch.object(manage.DbCommands, 'sync') - def test_legacy_db_sync(self, db_sync): - self._main_test_helper(['glance.cmd.manage', 'db_sync'], - manage.DbCommands.sync, None) - - @mock.patch.object(manage.DbCommands, 'upgrade') - def test_legacy_db_upgrade(self, db_upgrade): - self._main_test_helper(['glance.cmd.manage', 'db_upgrade'], - manage.DbCommands.upgrade, None) - - @mock.patch.object(manage.DbCommands, 'version_control') - def test_legacy_db_version_control(self, db_version_control): - self._main_test_helper(['glance.cmd.manage', 'db_version_control'], - manage.DbCommands.version_control, None) - - @mock.patch.object(manage.DbCommands, 'sync') - def test_legacy_db_sync_version(self, db_sync): - self._main_test_helper(['glance.cmd.manage', 'db_sync', 'liberty'], - manage.DbCommands.sync, 'liberty') - - @mock.patch.object(manage.DbCommands, 'upgrade') - def test_legacy_db_upgrade_version(self, db_upgrade): - self._main_test_helper(['glance.cmd.manage', 'db_upgrade', 'liberty'], - manage.DbCommands.upgrade, 'liberty') - - @mock.patch.object(manage.DbCommands, 'expand') - def test_legacy_db_expand(self, db_expand): - self._main_test_helper(['glance.cmd.manage', 'db_expand'], - manage.DbCommands.expand) - - @mock.patch.object(manage.DbCommands, 'migrate') - def test_legacy_db_migrate(self, db_migrate): - self._main_test_helper(['glance.cmd.manage', 'db_migrate'], - manage.DbCommands.migrate) - - @mock.patch.object(manage.DbCommands, 'contract') - def test_legacy_db_contract(self, db_contract): - self._main_test_helper(['glance.cmd.manage', 'db_contract'], - manage.DbCommands.contract) - - def test_db_metadefs_unload(self): - db_metadata.db_unload_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db_unload_metadefs'], - db_metadata.db_unload_metadefs, - db_api.get_engine()) - - def test_db_metadefs_load(self): - db_metadata.db_load_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs'], - db_metadata.db_load_metadefs, - db_api.get_engine(), - None, None, None, None) - - def test_db_metadefs_load_with_specified_path(self): - db_metadata.db_load_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs', - '/mock/'], - db_metadata.db_load_metadefs, - db_api.get_engine(), - '/mock/', None, None, None) - - def test_db_metadefs_load_from_path_merge(self): - db_metadata.db_load_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs', - '/mock/', 'True'], - db_metadata.db_load_metadefs, - db_api.get_engine(), - '/mock/', 'True', None, None) - - def test_db_metadefs_load_from_merge_and_prefer_new(self): - db_metadata.db_load_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs', - '/mock/', 'True', 'True'], - db_metadata.db_load_metadefs, - db_api.get_engine(), - '/mock/', 'True', 'True', None) - - def test_db_metadefs_load_from_merge_and_prefer_new_and_overwrite(self): - db_metadata.db_load_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs', - '/mock/', 'True', 'True', 'True'], - db_metadata.db_load_metadefs, - db_api.get_engine(), - '/mock/', 'True', 'True', 'True') - - def test_db_metadefs_export(self): - db_metadata.db_export_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db_export_metadefs'], - db_metadata.db_export_metadefs, - db_api.get_engine(), - None) - - def test_db_metadefs_export_with_specified_path(self): - db_metadata.db_export_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db_export_metadefs', - '/mock/'], - db_metadata.db_export_metadefs, - db_api.get_engine(), - '/mock/') - - -class TestManage(TestManageBase): - - @mock.patch.object(manage.DbCommands, 'version') - def test_db_version(self, version): - self._main_test_helper(['glance.cmd.manage', 'db', 'version'], - manage.DbCommands.version) - - @mock.patch.object(manage.DbCommands, 'sync') - def test_db_sync(self, sync): - self._main_test_helper(['glance.cmd.manage', 'db', 'sync'], - manage.DbCommands.sync) - - @mock.patch.object(manage.DbCommands, 'upgrade') - def test_db_upgrade(self, upgrade): - self._main_test_helper(['glance.cmd.manage', 'db', 'upgrade'], - manage.DbCommands.upgrade) - - @mock.patch.object(manage.DbCommands, 'version_control') - def test_db_version_control(self, version_control): - self._main_test_helper(['glance.cmd.manage', 'db', 'version_control'], - manage.DbCommands.version_control) - - @mock.patch.object(manage.DbCommands, 'sync') - def test_db_sync_version(self, sync): - self._main_test_helper(['glance.cmd.manage', 'db', 'sync', 'liberty'], - manage.DbCommands.sync, 'liberty') - - @mock.patch.object(manage.DbCommands, 'upgrade') - def test_db_upgrade_version(self, upgrade): - self._main_test_helper(['glance.cmd.manage', 'db', - 'upgrade', 'liberty'], - manage.DbCommands.upgrade, 'liberty') - - @mock.patch.object(manage.DbCommands, 'expand') - def test_db_expand(self, expand): - self._main_test_helper(['glance.cmd.manage', 'db', 'expand'], - manage.DbCommands.expand) - - @mock.patch.object(manage.DbCommands, 'migrate') - def test_db_migrate(self, migrate): - self._main_test_helper(['glance.cmd.manage', 'db', 'migrate'], - manage.DbCommands.migrate) - - @mock.patch.object(manage.DbCommands, 'contract') - def test_db_contract(self, contract): - self._main_test_helper(['glance.cmd.manage', 'db', 'contract'], - manage.DbCommands.contract) - - def test_db_metadefs_unload(self): - db_metadata.db_unload_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db', 'unload_metadefs'], - db_metadata.db_unload_metadefs, - db_api.get_engine()) - - def test_db_metadefs_load(self): - db_metadata.db_load_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs'], - db_metadata.db_load_metadefs, - db_api.get_engine(), - None, False, False, False) - - def test_db_metadefs_load_with_specified_path(self): - db_metadata.db_load_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', - '--path', '/mock/'], - db_metadata.db_load_metadefs, - db_api.get_engine(), - '/mock/', False, False, False) - - def test_db_metadefs_load_prefer_new_with_path(self): - db_metadata.db_load_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', - '--path', '/mock/', '--merge', '--prefer_new'], - db_metadata.db_load_metadefs, - db_api.get_engine(), - '/mock/', True, True, False) - - def test_db_metadefs_load_prefer_new(self): - db_metadata.db_load_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', - '--merge', '--prefer_new'], - db_metadata.db_load_metadefs, - db_api.get_engine(), - None, True, True, False) - - def test_db_metadefs_load_overwrite_existing(self): - db_metadata.db_load_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', - '--merge', '--overwrite'], - db_metadata.db_load_metadefs, - db_api.get_engine(), - None, True, False, True) - - def test_db_metadefs_load_prefer_new_and_overwrite_existing(self): - db_metadata.db_load_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', - '--merge', '--prefer_new', '--overwrite'], - db_metadata.db_load_metadefs, - db_api.get_engine(), - None, True, True, True) - - def test_db_metadefs_load_from_path_overwrite_existing(self): - db_metadata.db_load_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', - '--path', '/mock/', '--merge', '--overwrite'], - db_metadata.db_load_metadefs, - db_api.get_engine(), - '/mock/', True, False, True) - - def test_db_metadefs_export(self): - db_metadata.db_export_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db', 'export_metadefs'], - db_metadata.db_export_metadefs, - db_api.get_engine(), - None) - - def test_db_metadefs_export_with_specified_path(self): - db_metadata.db_export_metadefs = mock.Mock() - self._main_test_helper(['glance.cmd.manage', 'db', 'export_metadefs', - '--path', '/mock/'], - db_metadata.db_export_metadefs, - db_api.get_engine(), - '/mock/') diff --git a/glance/tests/unit/test_misc.py b/glance/tests/unit/test_misc.py deleted file mode 100644 index 9e3f98ab..00000000 --- a/glance/tests/unit/test_misc.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import six -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.common import crypt -from glance.common import utils -from glance.tests import utils as test_utils - - -class UtilsTestCase(test_utils.BaseTestCase): - - def test_encryption(self): - # Check that original plaintext and unencrypted ciphertext match - # Check keys of the three allowed lengths - key_list = ["1234567890abcdef", - "12345678901234567890abcd", - "1234567890abcdef1234567890ABCDEF"] - plaintext_list = [''] - blocksize = 64 - for i in range(3 * blocksize): - text = os.urandom(i) - if six.PY3: - text = text.decode('latin1') - plaintext_list.append(text) - - for key in key_list: - for plaintext in plaintext_list: - ciphertext = crypt.urlsafe_encrypt(key, plaintext, blocksize) - self.assertIsInstance(ciphertext, str) - self.assertNotEqual(ciphertext, plaintext) - text = crypt.urlsafe_decrypt(key, ciphertext) - self.assertIsInstance(text, str) - self.assertEqual(plaintext, text) - - def test_empty_metadata_headers(self): - """Ensure unset metadata is not encoded in HTTP headers""" - - metadata = { - 'foo': 'bar', - 'snafu': None, - 'bells': 'whistles', - 'unset': None, - 'empty': '', - 'properties': { - 'distro': '', - 'arch': None, - 'user': 'nobody', - }, - } - - headers = utils.image_meta_to_http_headers(metadata) - - self.assertNotIn('x-image-meta-snafu', headers) - self.assertNotIn('x-image-meta-uset', headers) - self.assertNotIn('x-image-meta-snafu', headers) - self.assertNotIn('x-image-meta-property-arch', headers) - - self.assertEqual('bar', headers.get('x-image-meta-foo')) - self.assertEqual('whistles', headers.get('x-image-meta-bells')) - self.assertEqual('', headers.get('x-image-meta-empty')) - self.assertEqual('', headers.get('x-image-meta-property-distro')) - self.assertEqual('nobody', headers.get('x-image-meta-property-user')) diff --git a/glance/tests/unit/test_notifier.py b/glance/tests/unit/test_notifier.py deleted file mode 100644 index 9219ad20..00000000 --- a/glance/tests/unit/test_notifier.py +++ /dev/null @@ -1,749 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import glance_store -import mock -from oslo_config import cfg -import oslo_messaging -import webob - -import glance.async -from glance.common import exception -from glance.common import timeutils -import glance.context -from glance import notifier -import glance.tests.unit.utils as unit_test_utils -from glance.tests import utils - - -DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) - - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' - - -class ImageStub(glance.domain.Image): - def get_data(self, offset=0, chunk_size=None): - return ['01234', '56789'] - - def set_data(self, data, size=None): - for chunk in data: - pass - - -class ImageRepoStub(object): - def remove(self, *args, **kwargs): - return 'image_from_get' - - def save(self, *args, **kwargs): - return 'image_from_save' - - def add(self, *args, **kwargs): - return 'image_from_add' - - def get(self, *args, **kwargs): - return 'image_from_get' - - def list(self, *args, **kwargs): - return ['images_from_list'] - - -class ImageMemberRepoStub(object): - def remove(self, *args, **kwargs): - return 'image_member_from_remove' - - def save(self, *args, **kwargs): - return 'image_member_from_save' - - def add(self, *args, **kwargs): - return 'image_member_from_add' - - def get(self, *args, **kwargs): - return 'image_member_from_get' - - def list(self, *args, **kwargs): - return ['image_members_from_list'] - - -class TaskStub(glance.domain.TaskStub): - def run(self, executor): - pass - - -class Task(glance.domain.Task): - def succeed(self, result): - pass - - def fail(self, message): - pass - - -class TaskRepoStub(object): - def remove(self, *args, **kwargs): - return 'task_from_remove' - - def save(self, *args, **kwargs): - return 'task_from_save' - - def add(self, *args, **kwargs): - return 'task_from_add' - - def get_task(self, *args, **kwargs): - return 'task_from_get' - - def list(self, *args, **kwargs): - return ['tasks_from_list'] - - -class TestNotifier(utils.BaseTestCase): - - @mock.patch.object(oslo_messaging, 'Notifier') - @mock.patch.object(oslo_messaging, 'get_notification_transport') - def _test_load_strategy(self, - mock_get_transport, mock_notifier, - url, driver): - nfier = notifier.Notifier() - mock_get_transport.assert_called_with(cfg.CONF) - self.assertIsNotNone(nfier._transport) - mock_notifier.assert_called_with(nfier._transport, - publisher_id='image.localhost') - self.assertIsNotNone(nfier._notifier) - - def test_notifier_load(self): - self._test_load_strategy(url=None, driver=None) - - @mock.patch.object(oslo_messaging, 'set_transport_defaults') - def test_set_defaults(self, mock_set_trans_defaults): - notifier.set_defaults(control_exchange='foo') - mock_set_trans_defaults.assert_called_with('foo') - notifier.set_defaults() - mock_set_trans_defaults.assert_called_with('glance') - - -class TestImageNotifications(utils.BaseTestCase): - """Test Image Notifications work""" - - def setUp(self): - super(TestImageNotifications, self).setUp() - self.image = ImageStub( - image_id=UUID1, name='image-1', status='active', size=1024, - created_at=DATETIME, updated_at=DATETIME, owner=TENANT1, - visibility='public', container_format='ami', virtual_size=2048, - tags=['one', 'two'], disk_format='ami', min_ram=128, - min_disk=10, checksum='ca425b88f047ce8ec45ee90e813ada91', - locations=['http://127.0.0.1']) - self.context = glance.context.RequestContext(tenant=TENANT2, - user=USER1) - self.image_repo_stub = ImageRepoStub() - self.notifier = unit_test_utils.FakeNotifier() - self.image_repo_proxy = glance.notifier.ImageRepoProxy( - self.image_repo_stub, self.context, self.notifier) - self.image_proxy = glance.notifier.ImageProxy( - self.image, self.context, self.notifier) - - def test_image_save_notification(self): - self.image_repo_proxy.save(self.image_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.update', output_log['event_type']) - self.assertEqual(self.image.image_id, output_log['payload']['id']) - if 'location' in output_log['payload']: - self.fail('Notification contained location field.') - - def test_image_save_notification_disabled(self): - self.config(disabled_notifications=["image.update"]) - self.image_repo_proxy.save(self.image_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_image_add_notification(self): - self.image_repo_proxy.add(self.image_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.create', output_log['event_type']) - self.assertEqual(self.image.image_id, output_log['payload']['id']) - if 'location' in output_log['payload']: - self.fail('Notification contained location field.') - - def test_image_add_notification_disabled(self): - self.config(disabled_notifications=["image.create"]) - self.image_repo_proxy.add(self.image_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_image_delete_notification(self): - self.image_repo_proxy.remove(self.image_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.delete', output_log['event_type']) - self.assertEqual(self.image.image_id, output_log['payload']['id']) - self.assertTrue(output_log['payload']['deleted']) - if 'location' in output_log['payload']: - self.fail('Notification contained location field.') - - def test_image_delete_notification_disabled(self): - self.config(disabled_notifications=['image.delete']) - self.image_repo_proxy.remove(self.image_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_image_get(self): - image = self.image_repo_proxy.get(UUID1) - self.assertIsInstance(image, glance.notifier.ImageProxy) - self.assertEqual('image_from_get', image.repo) - - def test_image_list(self): - images = self.image_repo_proxy.list() - self.assertIsInstance(images[0], glance.notifier.ImageProxy) - self.assertEqual('images_from_list', images[0].repo) - - def test_image_get_data_should_call_next_image_get_data(self): - with mock.patch.object(self.image, 'get_data') as get_data_mock: - self.image_proxy.get_data() - - self.assertTrue(get_data_mock.called) - - def test_image_get_data_notification(self): - self.image_proxy.size = 10 - data = ''.join(self.image_proxy.get_data()) - self.assertEqual('0123456789', data) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.send', output_log['event_type']) - self.assertEqual(self.image.image_id, - output_log['payload']['image_id']) - self.assertEqual(TENANT2, output_log['payload']['receiver_tenant_id']) - self.assertEqual(USER1, output_log['payload']['receiver_user_id']) - self.assertEqual(10, output_log['payload']['bytes_sent']) - self.assertEqual(TENANT1, output_log['payload']['owner_id']) - - def test_image_get_data_notification_disabled(self): - self.config(disabled_notifications=['image.send']) - self.image_proxy.size = 10 - data = ''.join(self.image_proxy.get_data()) - self.assertEqual('0123456789', data) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_image_get_data_size_mismatch(self): - self.image_proxy.size = 11 - list(self.image_proxy.get_data()) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('ERROR', output_log['notification_type']) - self.assertEqual('image.send', output_log['event_type']) - self.assertEqual(self.image.image_id, - output_log['payload']['image_id']) - - def test_image_set_data_prepare_notification(self): - insurance = {'called': False} - - def data_iterator(): - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.prepare', output_log['event_type']) - self.assertEqual(self.image.image_id, output_log['payload']['id']) - yield 'abcd' - yield 'efgh' - insurance['called'] = True - - self.image_proxy.set_data(data_iterator(), 8) - self.assertTrue(insurance['called']) - - def test_image_set_data_prepare_notification_disabled(self): - insurance = {'called': False} - - def data_iterator(): - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - yield 'abcd' - yield 'efgh' - insurance['called'] = True - - self.config(disabled_notifications=['image.prepare']) - self.image_proxy.set_data(data_iterator(), 8) - self.assertTrue(insurance['called']) - - def test_image_set_data_upload_and_activate_notification(self): - def data_iterator(): - self.notifier.log = [] - yield 'abcde' - yield 'fghij' - - self.image_proxy.set_data(data_iterator(), 10) - - output_logs = self.notifier.get_logs() - self.assertEqual(2, len(output_logs)) - - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.upload', output_log['event_type']) - self.assertEqual(self.image.image_id, output_log['payload']['id']) - - output_log = output_logs[1] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.activate', output_log['event_type']) - self.assertEqual(self.image.image_id, output_log['payload']['id']) - - def test_image_set_data_upload_and_activate_notification_disabled(self): - insurance = {'called': False} - - def data_iterator(): - self.notifier.log = [] - yield 'abcde' - yield 'fghij' - insurance['called'] = True - - self.config(disabled_notifications=['image.activate', 'image.upload']) - self.image_proxy.set_data(data_iterator(), 10) - self.assertTrue(insurance['called']) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_image_set_data_storage_full(self): - def data_iterator(): - self.notifier.log = [] - yield 'abcde' - raise glance_store.StorageFull(message='Modern Major General') - - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.image_proxy.set_data, data_iterator(), 10) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - - output_log = output_logs[0] - self.assertEqual('ERROR', output_log['notification_type']) - self.assertEqual('image.upload', output_log['event_type']) - self.assertIn('Modern Major General', output_log['payload']) - - def test_image_set_data_value_error(self): - def data_iterator(): - self.notifier.log = [] - yield 'abcde' - raise ValueError('value wrong') - - self.assertRaises(webob.exc.HTTPBadRequest, - self.image_proxy.set_data, data_iterator(), 10) - - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - - output_log = output_logs[0] - self.assertEqual('ERROR', output_log['notification_type']) - self.assertEqual('image.upload', output_log['event_type']) - self.assertIn('value wrong', output_log['payload']) - - def test_image_set_data_duplicate(self): - def data_iterator(): - self.notifier.log = [] - yield 'abcde' - raise exception.Duplicate('Cant have duplicates') - - self.assertRaises(webob.exc.HTTPConflict, - self.image_proxy.set_data, data_iterator(), 10) - - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - - output_log = output_logs[0] - self.assertEqual('ERROR', output_log['notification_type']) - self.assertEqual('image.upload', output_log['event_type']) - self.assertIn('Cant have duplicates', output_log['payload']) - - def test_image_set_data_storage_write_denied(self): - def data_iterator(): - self.notifier.log = [] - yield 'abcde' - raise glance_store.StorageWriteDenied(message='The Very Model') - - self.assertRaises(webob.exc.HTTPServiceUnavailable, - self.image_proxy.set_data, data_iterator(), 10) - - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - - output_log = output_logs[0] - self.assertEqual('ERROR', output_log['notification_type']) - self.assertEqual('image.upload', output_log['event_type']) - self.assertIn('The Very Model', output_log['payload']) - - def test_image_set_data_forbidden(self): - def data_iterator(): - self.notifier.log = [] - yield 'abcde' - raise exception.Forbidden('Not allowed') - - self.assertRaises(webob.exc.HTTPForbidden, - self.image_proxy.set_data, data_iterator(), 10) - - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - - output_log = output_logs[0] - self.assertEqual('ERROR', output_log['notification_type']) - self.assertEqual('image.upload', output_log['event_type']) - self.assertIn('Not allowed', output_log['payload']) - - def test_image_set_data_not_found(self): - def data_iterator(): - self.notifier.log = [] - yield 'abcde' - raise exception.NotFound('Not found') - - self.assertRaises(webob.exc.HTTPNotFound, - self.image_proxy.set_data, data_iterator(), 10) - - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - - output_log = output_logs[0] - self.assertEqual('ERROR', output_log['notification_type']) - self.assertEqual('image.upload', output_log['event_type']) - self.assertIn('Not found', output_log['payload']) - - def test_image_set_data_HTTP_error(self): - def data_iterator(): - self.notifier.log = [] - yield 'abcde' - raise webob.exc.HTTPError('Http issue') - - self.assertRaises(webob.exc.HTTPError, - self.image_proxy.set_data, data_iterator(), 10) - - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - - output_log = output_logs[0] - self.assertEqual('ERROR', output_log['notification_type']) - self.assertEqual('image.upload', output_log['event_type']) - self.assertIn('Http issue', output_log['payload']) - - def test_image_set_data_error(self): - def data_iterator(): - self.notifier.log = [] - yield 'abcde' - raise exception.GlanceException('Failed') - - self.assertRaises(exception.GlanceException, - self.image_proxy.set_data, data_iterator(), 10) - - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - - output_log = output_logs[0] - self.assertEqual('ERROR', output_log['notification_type']) - self.assertEqual('image.upload', output_log['event_type']) - self.assertIn('Failed', output_log['payload']) - - -class TestImageMemberNotifications(utils.BaseTestCase): - """Test Image Member Notifications work""" - - def setUp(self): - super(TestImageMemberNotifications, self).setUp() - self.context = glance.context.RequestContext(tenant=TENANT2, - user=USER1) - self.notifier = unit_test_utils.FakeNotifier() - - self.image = ImageStub( - image_id=UUID1, name='image-1', status='active', size=1024, - created_at=DATETIME, updated_at=DATETIME, owner=TENANT1, - visibility='public', container_format='ami', - tags=['one', 'two'], disk_format='ami', min_ram=128, - min_disk=10, checksum='ca425b88f047ce8ec45ee90e813ada91', - locations=['http://127.0.0.1']) - self.image_member = glance.domain.ImageMembership( - id=1, image_id=UUID1, member_id=TENANT1, created_at=DATETIME, - updated_at=DATETIME, status='accepted') - - self.image_member_repo_stub = ImageMemberRepoStub() - self.image_member_repo_proxy = glance.notifier.ImageMemberRepoProxy( - self.image_member_repo_stub, self.image, - self.context, self.notifier) - self.image_member_proxy = glance.notifier.ImageMemberProxy( - self.image_member, self.context, self.notifier) - - def _assert_image_member_with_notifier(self, output_log, deleted=False): - self.assertEqual(self.image_member.member_id, - output_log['payload']['member_id']) - self.assertEqual(self.image_member.image_id, - output_log['payload']['image_id']) - self.assertEqual(self.image_member.status, - output_log['payload']['status']) - self.assertEqual(timeutils.isotime(self.image_member.created_at), - output_log['payload']['created_at']) - self.assertEqual(timeutils.isotime(self.image_member.updated_at), - output_log['payload']['updated_at']) - - if deleted: - self.assertTrue(output_log['payload']['deleted']) - self.assertIsNotNone(output_log['payload']['deleted_at']) - else: - self.assertFalse(output_log['payload']['deleted']) - self.assertIsNone(output_log['payload']['deleted_at']) - - def test_image_member_add_notification(self): - self.image_member_repo_proxy.add(self.image_member_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.member.create', output_log['event_type']) - self._assert_image_member_with_notifier(output_log) - - def test_image_member_add_notification_disabled(self): - self.config(disabled_notifications=['image.member.create']) - self.image_member_repo_proxy.add(self.image_member_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_image_member_save_notification(self): - self.image_member_repo_proxy.save(self.image_member_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.member.update', output_log['event_type']) - self._assert_image_member_with_notifier(output_log) - - def test_image_member_save_notification_disabled(self): - self.config(disabled_notifications=['image.member.update']) - self.image_member_repo_proxy.save(self.image_member_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_image_member_delete_notification(self): - self.image_member_repo_proxy.remove(self.image_member_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.member.delete', output_log['event_type']) - self._assert_image_member_with_notifier(output_log, deleted=True) - - def test_image_member_delete_notification_disabled(self): - self.config(disabled_notifications=['image.member.delete']) - self.image_member_repo_proxy.remove(self.image_member_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_image_member_get(self): - image_member = self.image_member_repo_proxy.get(TENANT1) - self.assertIsInstance(image_member, glance.notifier.ImageMemberProxy) - self.assertEqual('image_member_from_get', image_member.repo) - - def test_image_member_list(self): - image_members = self.image_member_repo_proxy.list() - self.assertIsInstance(image_members[0], - glance.notifier.ImageMemberProxy) - self.assertEqual('image_members_from_list', image_members[0].repo) - - -class TestTaskNotifications(utils.BaseTestCase): - """Test Task Notifications work""" - - def setUp(self): - super(TestTaskNotifications, self).setUp() - task_input = {"loc": "fake"} - self.task_stub = TaskStub( - task_id='aaa', - task_type='import', - status='pending', - owner=TENANT2, - expires_at=None, - created_at=DATETIME, - updated_at=DATETIME, - ) - - self.task = Task( - task_id='aaa', - task_type='import', - status='pending', - owner=TENANT2, - expires_at=None, - created_at=DATETIME, - updated_at=DATETIME, - task_input=task_input, - result='res', - message='blah' - ) - self.context = glance.context.RequestContext( - tenant=TENANT2, - user=USER1 - ) - self.task_repo_stub = TaskRepoStub() - self.notifier = unit_test_utils.FakeNotifier() - self.task_repo_proxy = glance.notifier.TaskRepoProxy( - self.task_repo_stub, - self.context, - self.notifier - ) - self.task_proxy = glance.notifier.TaskProxy( - self.task, - self.context, - self.notifier - ) - self.task_stub_proxy = glance.notifier.TaskStubProxy( - self.task_stub, - self.context, - self.notifier - ) - self.patcher = mock.patch.object(timeutils, 'utcnow') - mock_utcnow = self.patcher.start() - mock_utcnow.return_value = datetime.datetime.utcnow() - - def tearDown(self): - super(TestTaskNotifications, self).tearDown() - self.patcher.stop() - - def test_task_create_notification(self): - self.task_repo_proxy.add(self.task_stub_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('task.create', output_log['event_type']) - self.assertEqual(self.task.task_id, output_log['payload']['id']) - self.assertEqual( - timeutils.isotime(self.task.updated_at), - output_log['payload']['updated_at'] - ) - self.assertEqual( - timeutils.isotime(self.task.created_at), - output_log['payload']['created_at'] - ) - if 'location' in output_log['payload']: - self.fail('Notification contained location field.') - - def test_task_create_notification_disabled(self): - self.config(disabled_notifications=['task.create']) - self.task_repo_proxy.add(self.task_stub_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_task_delete_notification(self): - now = timeutils.isotime() - self.task_repo_proxy.remove(self.task_stub_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('task.delete', output_log['event_type']) - self.assertEqual(self.task.task_id, output_log['payload']['id']) - self.assertEqual( - timeutils.isotime(self.task.updated_at), - output_log['payload']['updated_at'] - ) - self.assertEqual( - timeutils.isotime(self.task.created_at), - output_log['payload']['created_at'] - ) - self.assertEqual( - now, - output_log['payload']['deleted_at'] - ) - if 'location' in output_log['payload']: - self.fail('Notification contained location field.') - - def test_task_delete_notification_disabled(self): - self.config(disabled_notifications=['task.delete']) - self.task_repo_proxy.remove(self.task_stub_proxy) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_task_run_notification(self): - with mock.patch('glance.async.TaskExecutor') as mock_executor: - executor = mock_executor.return_value - executor._run.return_value = mock.Mock() - self.task_proxy.run(executor=mock_executor) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('task.run', output_log['event_type']) - self.assertEqual(self.task.task_id, output_log['payload']['id']) - - def test_task_run_notification_disabled(self): - self.config(disabled_notifications=['task.run']) - with mock.patch('glance.async.TaskExecutor') as mock_executor: - executor = mock_executor.return_value - executor._run.return_value = mock.Mock() - self.task_proxy.run(executor=mock_executor) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_task_processing_notification(self): - self.task_proxy.begin_processing() - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('task.processing', output_log['event_type']) - self.assertEqual(self.task.task_id, output_log['payload']['id']) - - def test_task_processing_notification_disabled(self): - self.config(disabled_notifications=['task.processing']) - self.task_proxy.begin_processing() - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_task_success_notification(self): - self.task_proxy.begin_processing() - self.task_proxy.succeed(result=None) - output_logs = self.notifier.get_logs() - self.assertEqual(2, len(output_logs)) - output_log = output_logs[1] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('task.success', output_log['event_type']) - self.assertEqual(self.task.task_id, output_log['payload']['id']) - - def test_task_success_notification_disabled(self): - self.config(disabled_notifications=['task.processing', 'task.success']) - self.task_proxy.begin_processing() - self.task_proxy.succeed(result=None) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_task_failure_notification(self): - self.task_proxy.fail(message=None) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('task.failure', output_log['event_type']) - self.assertEqual(self.task.task_id, output_log['payload']['id']) - - def test_task_failure_notification_disabled(self): - self.config(disabled_notifications=['task.failure']) - self.task_proxy.fail(message=None) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) diff --git a/glance/tests/unit/test_policy.py b/glance/tests/unit/test_policy.py deleted file mode 100644 index aa246d48..00000000 --- a/glance/tests/unit/test_policy.py +++ /dev/null @@ -1,595 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os.path - -import mock -import oslo_config.cfg - -import glance.api.policy -from glance.common import exception -import glance.context -from glance.tests.unit import base -import glance.tests.unit.utils as unit_test_utils -from glance.tests import utils as test_utils - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' - - -class ImageRepoStub(object): - def get(self, *args, **kwargs): - return 'image_from_get' - - def save(self, *args, **kwargs): - return 'image_from_save' - - def add(self, *args, **kwargs): - return 'image_from_add' - - def list(self, *args, **kwargs): - return ['image_from_list_0', 'image_from_list_1'] - - -class ImageStub(object): - def __init__(self, image_id=None, visibility='private', - container_format='bear', disk_format='raw', - status='active', extra_properties=None): - - if extra_properties is None: - extra_properties = {} - - self.image_id = image_id - self.visibility = visibility - self.container_format = container_format - self.disk_format = disk_format - self.status = status - self.extra_properties = extra_properties - - def delete(self): - self.status = 'deleted' - - -class ImageFactoryStub(object): - def new_image(self, image_id=None, name=None, visibility='private', - min_disk=0, min_ram=0, protected=False, owner=None, - disk_format=None, container_format=None, - extra_properties=None, tags=None, **other_args): - self.visibility = visibility - return 'new_image' - - -class MemberRepoStub(object): - image = None - - def add(self, image_member): - image_member.output = 'member_repo_add' - - def get(self, *args, **kwargs): - return 'member_repo_get' - - def save(self, image_member, from_state=None): - image_member.output = 'member_repo_save' - - def list(self, *args, **kwargs): - return 'member_repo_list' - - def remove(self, image_member): - image_member.output = 'member_repo_remove' - - -class ImageMembershipStub(object): - def __init__(self, output=None): - self.output = output - - -class TaskRepoStub(object): - def get(self, *args, **kwargs): - return 'task_from_get' - - def add(self, *args, **kwargs): - return 'task_from_add' - - def list(self, *args, **kwargs): - return ['task_from_list_0', 'task_from_list_1'] - - -class TaskStub(object): - def __init__(self, task_id): - self.task_id = task_id - self.status = 'pending' - - def run(self, executor): - self.status = 'processing' - - -class TaskFactoryStub(object): - def new_task(self, *args): - return 'new_task' - - -class TestPolicyEnforcer(base.IsolatedUnitTest): - def test_policy_file_default_rules_default_location(self): - enforcer = glance.api.policy.Enforcer() - - context = glance.context.RequestContext(roles=[]) - enforcer.enforce(context, 'get_image', {}) - - def test_policy_file_custom_rules_default_location(self): - rules = {"get_image": '!'} - self.set_policy_rules(rules) - - enforcer = glance.api.policy.Enforcer() - - context = glance.context.RequestContext(roles=[]) - self.assertRaises(exception.Forbidden, - enforcer.enforce, context, 'get_image', {}) - - def test_policy_file_custom_location(self): - self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'), - group='oslo_policy') - - rules = {"get_image": '!'} - self.set_policy_rules(rules) - - enforcer = glance.api.policy.Enforcer() - - context = glance.context.RequestContext(roles=[]) - self.assertRaises(exception.Forbidden, - enforcer.enforce, context, 'get_image', {}) - - def test_policy_file_check(self): - self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'), - group='oslo_policy') - - rules = {"get_image": '!'} - self.set_policy_rules(rules) - - enforcer = glance.api.policy.Enforcer() - - context = glance.context.RequestContext(roles=[]) - self.assertEqual(False, enforcer.check(context, 'get_image', {})) - - def test_policy_file_get_image_default_everybody(self): - rules = {"default": ''} - self.set_policy_rules(rules) - - enforcer = glance.api.policy.Enforcer() - - context = glance.context.RequestContext(roles=[]) - self.assertEqual(True, enforcer.check(context, 'get_image', {})) - - def test_policy_file_get_image_default_nobody(self): - rules = {"default": '!'} - self.set_policy_rules(rules) - - enforcer = glance.api.policy.Enforcer() - - context = glance.context.RequestContext(roles=[]) - self.assertRaises(exception.Forbidden, - enforcer.enforce, context, 'get_image', {}) - - -class TestPolicyEnforcerNoFile(base.IsolatedUnitTest): - def test_policy_file_specified_but_not_found(self): - """Missing defined policy file should result in a default ruleset""" - self.config(policy_file='gobble.gobble', group='oslo_policy') - enforcer = glance.api.policy.Enforcer() - - context = glance.context.RequestContext(roles=[]) - enforcer.enforce(context, 'get_image', {}) - self.assertRaises(exception.Forbidden, - enforcer.enforce, context, 'manage_image_cache', {}) - - admin_context = glance.context.RequestContext(roles=['admin']) - enforcer.enforce(admin_context, 'manage_image_cache', {}) - - def test_policy_file_default_not_found(self): - """Missing default policy file should result in a default ruleset""" - def fake_find_file(self, name): - return None - - self.stubs.Set(oslo_config.cfg.ConfigOpts, 'find_file', - fake_find_file) - - enforcer = glance.api.policy.Enforcer() - - context = glance.context.RequestContext(roles=[]) - enforcer.enforce(context, 'get_image', {}) - self.assertRaises(exception.Forbidden, - enforcer.enforce, context, 'manage_image_cache', {}) - - admin_context = glance.context.RequestContext(roles=['admin']) - enforcer.enforce(admin_context, 'manage_image_cache', {}) - - -class TestImagePolicy(test_utils.BaseTestCase): - def setUp(self): - self.image_stub = ImageStub(UUID1) - self.image_repo_stub = ImageRepoStub() - self.image_factory_stub = ImageFactoryStub() - self.policy = mock.Mock() - self.policy.enforce = mock.Mock() - super(TestImagePolicy, self).setUp() - - def test_publicize_image_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) - self.assertRaises(exception.Forbidden, - setattr, image, 'visibility', 'public') - self.assertEqual('private', image.visibility) - self.policy.enforce.assert_called_once_with({}, "publicize_image", - image.target) - - def test_publicize_image_allowed(self): - image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) - image.visibility = 'public' - self.assertEqual('public', image.visibility) - self.policy.enforce.assert_called_once_with({}, "publicize_image", - image.target) - - def test_communitize_image_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) - self.assertRaises(exception.Forbidden, - setattr, image, 'visibility', 'community') - self.assertEqual('private', image.visibility) - self.policy.enforce.assert_called_once_with({}, "communitize_image", - image.target) - - def test_communitize_image_allowed(self): - image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) - image.visibility = 'community' - self.assertEqual('community', image.visibility) - self.policy.enforce.assert_called_once_with({}, "communitize_image", - image.target) - - def test_delete_image_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) - self.assertRaises(exception.Forbidden, image.delete) - self.assertEqual('active', image.status) - self.policy.enforce.assert_called_once_with({}, "delete_image", - image.target) - - def test_delete_image_allowed(self): - image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) - image.delete() - self.assertEqual('deleted', image.status) - self.policy.enforce.assert_called_once_with({}, "delete_image", - image.target) - - def test_get_image_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - image_target = mock.Mock() - with mock.patch.object(glance.api.policy, 'ImageTarget') as target: - target.return_value = image_target - image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, - {}, self.policy) - self.assertRaises(exception.Forbidden, image_repo.get, UUID1) - self.policy.enforce.assert_called_once_with({}, "get_image", - image_target) - - def test_get_image_allowed(self): - image_target = mock.Mock() - with mock.patch.object(glance.api.policy, 'ImageTarget') as target: - target.return_value = image_target - image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, - {}, self.policy) - output = image_repo.get(UUID1) - self.assertIsInstance(output, glance.api.policy.ImageProxy) - self.assertEqual('image_from_get', output.image) - self.policy.enforce.assert_called_once_with({}, "get_image", - image_target) - - def test_get_images_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, - {}, self.policy) - self.assertRaises(exception.Forbidden, image_repo.list) - self.policy.enforce.assert_called_once_with({}, "get_images", {}) - - def test_get_images_allowed(self): - image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, - {}, self.policy) - images = image_repo.list() - for i, image in enumerate(images): - self.assertIsInstance(image, glance.api.policy.ImageProxy) - self.assertEqual('image_from_list_%d' % i, image.image) - self.policy.enforce.assert_called_once_with({}, "get_images", {}) - - def test_modify_image_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, - {}, self.policy) - image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) - self.assertRaises(exception.Forbidden, image_repo.save, image) - self.policy.enforce.assert_called_once_with({}, "modify_image", - image.target) - - def test_modify_image_allowed(self): - image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, - {}, self.policy) - image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) - image_repo.save(image) - self.policy.enforce.assert_called_once_with({}, "modify_image", - image.target) - - def test_add_image_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, - {}, self.policy) - image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) - self.assertRaises(exception.Forbidden, image_repo.add, image) - self.policy.enforce.assert_called_once_with({}, "add_image", - image.target) - - def test_add_image_allowed(self): - image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, - {}, self.policy) - image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) - image_repo.add(image) - self.policy.enforce.assert_called_once_with({}, "add_image", - image.target) - - def test_new_image_visibility_public_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - image_factory = glance.api.policy.ImageFactoryProxy( - self.image_factory_stub, {}, self.policy) - self.assertRaises(exception.Forbidden, image_factory.new_image, - visibility='public') - self.policy.enforce.assert_called_once_with({}, "publicize_image", {}) - - def test_new_image_visibility_public_allowed(self): - image_factory = glance.api.policy.ImageFactoryProxy( - self.image_factory_stub, {}, self.policy) - image_factory.new_image(visibility='public') - self.policy.enforce.assert_called_once_with({}, "publicize_image", {}) - - def test_new_image_visibility_community_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - image_factory = glance.api.policy.ImageFactoryProxy( - self.image_factory_stub, {}, self.policy) - self.assertRaises(exception.Forbidden, image_factory.new_image, - visibility='community') - self.policy.enforce.assert_called_once_with({}, - "communitize_image", - {}) - - def test_new_image_visibility_community_allowed(self): - image_factory = glance.api.policy.ImageFactoryProxy( - self.image_factory_stub, {}, self.policy) - image_factory.new_image(visibility='community') - self.policy.enforce.assert_called_once_with({}, - "communitize_image", - {}) - - def test_image_get_data_policy_enforced_with_target(self): - extra_properties = { - 'test_key': 'test_4321' - } - image_stub = ImageStub(UUID1, extra_properties=extra_properties) - with mock.patch('glance.api.policy.ImageTarget'): - image = glance.api.policy.ImageProxy(image_stub, {}, self.policy) - target = image.target - self.policy.enforce.side_effect = exception.Forbidden - - self.assertRaises(exception.Forbidden, image.get_data) - self.policy.enforce.assert_called_once_with({}, "download_image", - target) - - def test_image_set_data(self): - self.policy.enforce.side_effect = exception.Forbidden - image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) - self.assertRaises(exception.Forbidden, image.set_data) - self.policy.enforce.assert_called_once_with({}, "upload_image", - image.target) - - -class TestMemberPolicy(test_utils.BaseTestCase): - def setUp(self): - self.policy = mock.Mock() - self.policy.enforce = mock.Mock() - self.image_stub = ImageStub(UUID1) - image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) - self.member_repo = glance.api.policy.ImageMemberRepoProxy( - MemberRepoStub(), image, {}, self.policy) - self.target = self.member_repo.target - super(TestMemberPolicy, self).setUp() - - def test_add_member_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - self.assertRaises(exception.Forbidden, self.member_repo.add, '') - self.policy.enforce.assert_called_once_with({}, "add_member", - self.target) - - def test_add_member_allowed(self): - image_member = ImageMembershipStub() - self.member_repo.add(image_member) - self.assertEqual('member_repo_add', image_member.output) - self.policy.enforce.assert_called_once_with({}, "add_member", - self.target) - - def test_get_member_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - self.assertRaises(exception.Forbidden, self.member_repo.get, '') - self.policy.enforce.assert_called_once_with({}, "get_member", - self.target) - - def test_get_member_allowed(self): - output = self.member_repo.get('') - self.assertEqual('member_repo_get', output) - self.policy.enforce.assert_called_once_with({}, "get_member", - self.target) - - def test_modify_member_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - self.assertRaises(exception.Forbidden, self.member_repo.save, '') - self.policy.enforce.assert_called_once_with({}, "modify_member", - self.target) - - def test_modify_member_allowed(self): - image_member = ImageMembershipStub() - self.member_repo.save(image_member) - self.assertEqual('member_repo_save', image_member.output) - self.policy.enforce.assert_called_once_with({}, "modify_member", - self.target) - - def test_get_members_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - self.assertRaises(exception.Forbidden, self.member_repo.list, '') - self.policy.enforce.assert_called_once_with({}, "get_members", - self.target) - - def test_get_members_allowed(self): - output = self.member_repo.list('') - self.assertEqual('member_repo_list', output) - self.policy.enforce.assert_called_once_with({}, "get_members", - self.target) - - def test_delete_member_not_allowed(self): - self.policy.enforce.side_effect = exception.Forbidden - self.assertRaises(exception.Forbidden, self.member_repo.remove, '') - self.policy.enforce.assert_called_once_with({}, "delete_member", - self.target) - - def test_delete_member_allowed(self): - image_member = ImageMembershipStub() - self.member_repo.remove(image_member) - self.assertEqual('member_repo_remove', image_member.output) - self.policy.enforce.assert_called_once_with({}, "delete_member", - self.target) - - -class TestTaskPolicy(test_utils.BaseTestCase): - def setUp(self): - self.task_stub = TaskStub(UUID1) - self.task_repo_stub = TaskRepoStub() - self.task_factory_stub = TaskFactoryStub() - self.policy = unit_test_utils.FakePolicyEnforcer() - super(TestTaskPolicy, self).setUp() - - def test_get_task_not_allowed(self): - rules = {"get_task": False} - self.policy.set_rules(rules) - task_repo = glance.api.policy.TaskRepoProxy( - self.task_repo_stub, - {}, - self.policy - ) - self.assertRaises(exception.Forbidden, - task_repo.get, - UUID1) - - def test_get_task_allowed(self): - rules = {"get_task": True} - self.policy.set_rules(rules) - task_repo = glance.api.policy.TaskRepoProxy( - self.task_repo_stub, - {}, - self.policy - ) - task = task_repo.get(UUID1) - self.assertIsInstance(task, glance.api.policy.TaskProxy) - self.assertEqual('task_from_get', task.task) - - def test_get_tasks_not_allowed(self): - rules = {"get_tasks": False} - self.policy.set_rules(rules) - task_repo = glance.api.policy.TaskStubRepoProxy( - self.task_repo_stub, - {}, - self.policy - ) - self.assertRaises(exception.Forbidden, task_repo.list) - - def test_get_tasks_allowed(self): - rules = {"get_task": True} - self.policy.set_rules(rules) - task_repo = glance.api.policy.TaskStubRepoProxy( - self.task_repo_stub, - {}, - self.policy - ) - tasks = task_repo.list() - for i, task in enumerate(tasks): - self.assertIsInstance(task, glance.api.policy.TaskStubProxy) - self.assertEqual('task_from_list_%d' % i, task.task_stub) - - def test_add_task_not_allowed(self): - rules = {"add_task": False} - self.policy.set_rules(rules) - task_repo = glance.api.policy.TaskRepoProxy( - self.task_repo_stub, - {}, - self.policy - ) - task = glance.api.policy.TaskProxy(self.task_stub, {}, self.policy) - self.assertRaises(exception.Forbidden, task_repo.add, task) - - def test_add_task_allowed(self): - rules = {"add_task": True} - self.policy.set_rules(rules) - task_repo = glance.api.policy.TaskRepoProxy( - self.task_repo_stub, - {}, - self.policy - ) - task = glance.api.policy.TaskProxy(self.task_stub, {}, self.policy) - task_repo.add(task) - - -class TestContextPolicyEnforcer(base.IsolatedUnitTest): - def _do_test_policy_influence_context_admin(self, - policy_admin_role, - context_role, - context_is_admin, - admin_expected): - self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'), - group='oslo_policy') - - rules = {'context_is_admin': 'role:%s' % policy_admin_role} - self.set_policy_rules(rules) - - enforcer = glance.api.policy.Enforcer() - - context = glance.context.RequestContext(roles=[context_role], - is_admin=context_is_admin, - policy_enforcer=enforcer) - self.assertEqual(admin_expected, context.is_admin) - - def test_context_admin_policy_admin(self): - self._do_test_policy_influence_context_admin('test_admin', - 'test_admin', - True, - True) - - def test_context_nonadmin_policy_admin(self): - self._do_test_policy_influence_context_admin('test_admin', - 'test_admin', - False, - True) - - def test_context_admin_policy_nonadmin(self): - self._do_test_policy_influence_context_admin('test_admin', - 'demo', - True, - True) - - def test_context_nonadmin_policy_nonadmin(self): - self._do_test_policy_influence_context_admin('test_admin', - 'demo', - False, - False) diff --git a/glance/tests/unit/test_quota.py b/glance/tests/unit/test_quota.py deleted file mode 100644 index d2f66583..00000000 --- a/glance/tests/unit/test_quota.py +++ /dev/null @@ -1,711 +0,0 @@ -# Copyright 2013, Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import uuid - -import mock -from mock import patch -from oslo_utils import encodeutils -from oslo_utils import units - -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.common import exception -from glance.common import store_utils -import glance.quota -from glance.tests.unit import utils as unit_test_utils -from glance.tests import utils as test_utils - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' - - -class FakeContext(object): - owner = 'someone' - is_admin = False - - -class FakeImage(object): - size = None - image_id = 'someid' - locations = [{'url': 'file:///not/a/path', 'metadata': {}}] - tags = set([]) - - def set_data(self, data, size=None): - self.size = 0 - for d in data: - self.size += len(d) - - def __init__(self, **kwargs): - self.extra_properties = kwargs.get('extra_properties', {}) - - -class TestImageQuota(test_utils.BaseTestCase): - def setUp(self): - super(TestImageQuota, self).setUp() - - def _get_image(self, location_count=1, image_size=10): - context = FakeContext() - db_api = unit_test_utils.FakeDB() - store_api = unit_test_utils.FakeStoreAPI() - store = unit_test_utils.FakeStoreUtils(store_api) - base_image = FakeImage() - base_image.image_id = 'xyz' - base_image.size = image_size - image = glance.quota.ImageProxy(base_image, context, db_api, store) - locations = [] - for i in range(location_count): - locations.append({'url': 'file:///g/there/it/is%d' % i, - 'metadata': {}, 'status': 'active'}) - image_values = {'id': 'xyz', 'owner': context.owner, - 'status': 'active', 'size': image_size, - 'locations': locations} - db_api.image_create(context, image_values) - return image - - def test_quota_allowed(self): - quota = 10 - self.config(user_storage_quota=str(quota)) - context = FakeContext() - db_api = unit_test_utils.FakeDB() - store_api = unit_test_utils.FakeStoreAPI() - store = unit_test_utils.FakeStoreUtils(store_api) - base_image = FakeImage() - base_image.image_id = 'id' - image = glance.quota.ImageProxy(base_image, context, db_api, store) - data = '*' * quota - base_image.set_data(data, size=None) - image.set_data(data) - self.assertEqual(quota, base_image.size) - - def _test_quota_allowed_unit(self, data_length, config_quota): - self.config(user_storage_quota=config_quota) - context = FakeContext() - db_api = unit_test_utils.FakeDB() - store_api = unit_test_utils.FakeStoreAPI() - store = unit_test_utils.FakeStoreUtils(store_api) - base_image = FakeImage() - base_image.image_id = 'id' - image = glance.quota.ImageProxy(base_image, context, db_api, store) - data = '*' * data_length - base_image.set_data(data, size=None) - image.set_data(data) - self.assertEqual(data_length, base_image.size) - - def test_quota_allowed_unit_b(self): - self._test_quota_allowed_unit(10, '10B') - - def test_quota_allowed_unit_kb(self): - self._test_quota_allowed_unit(10, '1KB') - - def test_quota_allowed_unit_mb(self): - self._test_quota_allowed_unit(10, '1MB') - - def test_quota_allowed_unit_gb(self): - self._test_quota_allowed_unit(10, '1GB') - - def test_quota_allowed_unit_tb(self): - self._test_quota_allowed_unit(10, '1TB') - - def _quota_exceeded_size(self, quota, data, - deleted=True, size=None): - self.config(user_storage_quota=quota) - context = FakeContext() - db_api = unit_test_utils.FakeDB() - store_api = unit_test_utils.FakeStoreAPI() - store = unit_test_utils.FakeStoreUtils(store_api) - base_image = FakeImage() - base_image.image_id = 'id' - image = glance.quota.ImageProxy(base_image, context, db_api, store) - - if deleted: - with patch.object(store_utils, 'safe_delete_from_backend'): - store_utils.safe_delete_from_backend( - context, - image.image_id, - base_image.locations[0]) - - self.assertRaises(exception.StorageQuotaFull, - image.set_data, - data, - size=size) - - def test_quota_exceeded_no_size(self): - quota = 10 - data = '*' * (quota + 1) - # NOTE(jbresnah) When the image size is None it means that it is - # not known. In this case the only time we will raise an - # exception is when there is no room left at all, thus we know - # it will not fit. - # That's why 'get_remaining_quota' is mocked with return_value = 0. - with patch.object(glance.api.common, 'get_remaining_quota', - return_value=0): - self._quota_exceeded_size(str(quota), data) - - def test_quota_exceeded_with_right_size(self): - quota = 10 - data = '*' * (quota + 1) - self._quota_exceeded_size(str(quota), data, size=len(data), - deleted=False) - - def test_quota_exceeded_with_right_size_b(self): - quota = 10 - data = '*' * (quota + 1) - self._quota_exceeded_size('10B', data, size=len(data), - deleted=False) - - def test_quota_exceeded_with_right_size_kb(self): - quota = units.Ki - data = '*' * (quota + 1) - self._quota_exceeded_size('1KB', data, size=len(data), - deleted=False) - - def test_quota_exceeded_with_lie_size(self): - quota = 10 - data = '*' * (quota + 1) - self._quota_exceeded_size(str(quota), data, deleted=False, - size=quota - 1) - - def test_append_location(self): - new_location = {'url': 'file:///a/path', 'metadata': {}, - 'status': 'active'} - image = self._get_image() - pre_add_locations = image.locations[:] - image.locations.append(new_location) - pre_add_locations.append(new_location) - self.assertEqual(image.locations, pre_add_locations) - - def test_insert_location(self): - new_location = {'url': 'file:///a/path', 'metadata': {}, - 'status': 'active'} - image = self._get_image() - pre_add_locations = image.locations[:] - image.locations.insert(0, new_location) - pre_add_locations.insert(0, new_location) - self.assertEqual(image.locations, pre_add_locations) - - def test_extend_location(self): - new_location = {'url': 'file:///a/path', 'metadata': {}, - 'status': 'active'} - image = self._get_image() - pre_add_locations = image.locations[:] - image.locations.extend([new_location]) - pre_add_locations.extend([new_location]) - self.assertEqual(image.locations, pre_add_locations) - - def test_iadd_location(self): - new_location = {'url': 'file:///a/path', 'metadata': {}, - 'status': 'active'} - image = self._get_image() - pre_add_locations = image.locations[:] - image.locations += [new_location] - pre_add_locations += [new_location] - self.assertEqual(image.locations, pre_add_locations) - - def test_set_location(self): - new_location = {'url': 'file:///a/path', 'metadata': {}, - 'status': 'active'} - image = self._get_image() - image.locations = [new_location] - self.assertEqual(image.locations, [new_location]) - - def _make_image_with_quota(self, image_size=10, location_count=2): - quota = image_size * location_count - self.config(user_storage_quota=str(quota)) - return self._get_image(image_size=image_size, - location_count=location_count) - - def test_exceed_append_location(self): - image = self._make_image_with_quota() - self.assertRaises(exception.StorageQuotaFull, - image.locations.append, - {'url': 'file:///a/path', 'metadata': {}, - 'status': 'active'}) - - def test_exceed_insert_location(self): - image = self._make_image_with_quota() - self.assertRaises(exception.StorageQuotaFull, - image.locations.insert, - 0, - {'url': 'file:///a/path', 'metadata': {}, - 'status': 'active'}) - - def test_exceed_extend_location(self): - image = self._make_image_with_quota() - self.assertRaises(exception.StorageQuotaFull, - image.locations.extend, - [{'url': 'file:///a/path', 'metadata': {}, - 'status': 'active'}]) - - def test_set_location_under(self): - image = self._make_image_with_quota(location_count=1) - image.locations = [{'url': 'file:///a/path', 'metadata': {}, - 'status': 'active'}] - - def test_set_location_exceed(self): - image = self._make_image_with_quota(location_count=1) - try: - image.locations = [{'url': 'file:///a/path', 'metadata': {}, - 'status': 'active'}, - {'url': 'file:///a/path2', 'metadata': {}, - 'status': 'active'}] - self.fail('Should have raised the quota exception') - except exception.StorageQuotaFull: - pass - - def test_iadd_location_exceed(self): - image = self._make_image_with_quota(location_count=1) - try: - image.locations += [{'url': 'file:///a/path', 'metadata': {}, - 'status': 'active'}] - self.fail('Should have raised the quota exception') - except exception.StorageQuotaFull: - pass - - def test_append_location_for_queued_image(self): - context = FakeContext() - db_api = unit_test_utils.FakeDB() - store_api = unit_test_utils.FakeStoreAPI() - store = unit_test_utils.FakeStoreUtils(store_api) - base_image = FakeImage() - base_image.image_id = str(uuid.uuid4()) - image = glance.quota.ImageProxy(base_image, context, db_api, store) - self.assertIsNone(image.size) - - self.stubs.Set(store_api, 'get_size_from_backend', - unit_test_utils.fake_get_size_from_backend) - image.locations.append({'url': 'file:///fake.img.tar.gz', - 'metadata': {}}) - self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}}, - image.locations) - - def test_insert_location_for_queued_image(self): - context = FakeContext() - db_api = unit_test_utils.FakeDB() - store_api = unit_test_utils.FakeStoreAPI() - store = unit_test_utils.FakeStoreUtils(store_api) - base_image = FakeImage() - base_image.image_id = str(uuid.uuid4()) - image = glance.quota.ImageProxy(base_image, context, db_api, store) - self.assertIsNone(image.size) - - self.stubs.Set(store_api, 'get_size_from_backend', - unit_test_utils.fake_get_size_from_backend) - image.locations.insert(0, - {'url': 'file:///fake.img.tar.gz', - 'metadata': {}}) - self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}}, - image.locations) - - def test_set_location_for_queued_image(self): - context = FakeContext() - db_api = unit_test_utils.FakeDB() - store_api = unit_test_utils.FakeStoreAPI() - store = unit_test_utils.FakeStoreUtils(store_api) - base_image = FakeImage() - base_image.image_id = str(uuid.uuid4()) - image = glance.quota.ImageProxy(base_image, context, db_api, store) - self.assertIsNone(image.size) - - self.stubs.Set(store_api, 'get_size_from_backend', - unit_test_utils.fake_get_size_from_backend) - image.locations = [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}] - self.assertEqual([{'url': 'file:///fake.img.tar.gz', 'metadata': {}}], - image.locations) - - def test_iadd_location_for_queued_image(self): - context = FakeContext() - db_api = unit_test_utils.FakeDB() - store_api = unit_test_utils.FakeStoreAPI() - store = unit_test_utils.FakeStoreUtils(store_api) - base_image = FakeImage() - base_image.image_id = str(uuid.uuid4()) - image = glance.quota.ImageProxy(base_image, context, db_api, store) - self.assertIsNone(image.size) - - self.stubs.Set(store_api, 'get_size_from_backend', - unit_test_utils.fake_get_size_from_backend) - image.locations += [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}] - self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}}, - image.locations) - - -class TestImagePropertyQuotas(test_utils.BaseTestCase): - def setUp(self): - super(TestImagePropertyQuotas, self).setUp() - self.base_image = FakeImage() - self.image = glance.quota.ImageProxy(self.base_image, - mock.Mock(), - mock.Mock(), - mock.Mock()) - - self.image_repo_mock = mock.Mock() - self.image_repo_mock.add.return_value = self.base_image - self.image_repo_mock.save.return_value = self.base_image - - self.image_repo_proxy = glance.quota.ImageRepoProxy( - self.image_repo_mock, - mock.Mock(), - mock.Mock(), - mock.Mock()) - - def test_save_image_with_image_property(self): - self.config(image_property_quota=1) - - self.image.extra_properties = {'foo': 'bar'} - self.image_repo_proxy.save(self.image) - - self.image_repo_mock.save.assert_called_once_with(self.base_image, - from_state=None) - - def test_save_image_too_many_image_properties(self): - self.config(image_property_quota=1) - - self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'} - exc = self.assertRaises(exception.ImagePropertyLimitExceeded, - self.image_repo_proxy.save, self.image) - self.assertIn("Attempted: 2, Maximum: 1", - encodeutils.exception_to_unicode(exc)) - - def test_save_image_unlimited_image_properties(self): - self.config(image_property_quota=-1) - - self.image.extra_properties = {'foo': 'bar'} - self.image_repo_proxy.save(self.image) - - self.image_repo_mock.save.assert_called_once_with(self.base_image, - from_state=None) - - def test_add_image_with_image_property(self): - self.config(image_property_quota=1) - - self.image.extra_properties = {'foo': 'bar'} - self.image_repo_proxy.add(self.image) - - self.image_repo_mock.add.assert_called_once_with(self.base_image) - - def test_add_image_too_many_image_properties(self): - self.config(image_property_quota=1) - - self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'} - exc = self.assertRaises(exception.ImagePropertyLimitExceeded, - self.image_repo_proxy.add, self.image) - self.assertIn("Attempted: 2, Maximum: 1", - encodeutils.exception_to_unicode(exc)) - - def test_add_image_unlimited_image_properties(self): - self.config(image_property_quota=-1) - - self.image.extra_properties = {'foo': 'bar'} - self.image_repo_proxy.add(self.image) - - self.image_repo_mock.add.assert_called_once_with(self.base_image) - - def _quota_exceed_setup(self): - self.config(image_property_quota=2) - self.base_image.extra_properties = {'foo': 'bar', 'spam': 'ham'} - self.image = glance.quota.ImageProxy(self.base_image, - mock.Mock(), - mock.Mock(), - mock.Mock()) - - def test_modify_image_properties_when_quota_exceeded(self): - self._quota_exceed_setup() - self.config(image_property_quota=1) - self.image.extra_properties = {'foo': 'frob', 'spam': 'eggs'} - self.image_repo_proxy.save(self.image) - self.image_repo_mock.save.assert_called_once_with(self.base_image, - from_state=None) - self.assertEqual('frob', self.base_image.extra_properties['foo']) - self.assertEqual('eggs', self.base_image.extra_properties['spam']) - - def test_delete_image_properties_when_quota_exceeded(self): - self._quota_exceed_setup() - self.config(image_property_quota=1) - del self.image.extra_properties['foo'] - self.image_repo_proxy.save(self.image) - self.image_repo_mock.save.assert_called_once_with(self.base_image, - from_state=None) - self.assertNotIn('foo', self.base_image.extra_properties) - self.assertEqual('ham', self.base_image.extra_properties['spam']) - - def test_invalid_quota_config_parameter(self): - self.config(user_storage_quota='foo') - location = {"url": "file:///fake.img.tar.gz", "metadata": {}} - self.assertRaises(exception.InvalidOptionValue, - self.image.locations.append, location) - - def test_exceed_quota_during_patch_operation(self): - self._quota_exceed_setup() - self.image.extra_properties['frob'] = 'baz' - self.image.extra_properties['lorem'] = 'ipsum' - self.assertEqual('bar', self.base_image.extra_properties['foo']) - self.assertEqual('ham', self.base_image.extra_properties['spam']) - self.assertEqual('baz', self.base_image.extra_properties['frob']) - self.assertEqual('ipsum', self.base_image.extra_properties['lorem']) - - del self.image.extra_properties['frob'] - del self.image.extra_properties['lorem'] - self.image_repo_proxy.save(self.image) - call_args = mock.call(self.base_image, from_state=None) - self.assertEqual(call_args, self.image_repo_mock.save.call_args) - self.assertEqual('bar', self.base_image.extra_properties['foo']) - self.assertEqual('ham', self.base_image.extra_properties['spam']) - self.assertNotIn('frob', self.base_image.extra_properties) - self.assertNotIn('lorem', self.base_image.extra_properties) - - def test_quota_exceeded_after_delete_image_properties(self): - self.config(image_property_quota=3) - self.base_image.extra_properties = {'foo': 'bar', - 'spam': 'ham', - 'frob': 'baz'} - self.image = glance.quota.ImageProxy(self.base_image, - mock.Mock(), - mock.Mock(), - mock.Mock()) - self.config(image_property_quota=1) - del self.image.extra_properties['foo'] - self.image_repo_proxy.save(self.image) - self.image_repo_mock.save.assert_called_once_with(self.base_image, - from_state=None) - self.assertNotIn('foo', self.base_image.extra_properties) - self.assertEqual('ham', self.base_image.extra_properties['spam']) - self.assertEqual('baz', self.base_image.extra_properties['frob']) - - -class TestImageTagQuotas(test_utils.BaseTestCase): - def setUp(self): - super(TestImageTagQuotas, self).setUp() - self.base_image = mock.Mock() - self.base_image.tags = set([]) - self.base_image.extra_properties = {} - self.image = glance.quota.ImageProxy(self.base_image, - mock.Mock(), - mock.Mock(), - mock.Mock()) - - self.image_repo_mock = mock.Mock() - self.image_repo_proxy = glance.quota.ImageRepoProxy( - self.image_repo_mock, - mock.Mock(), - mock.Mock(), - mock.Mock()) - - def test_replace_image_tag(self): - self.config(image_tag_quota=1) - self.image.tags = ['foo'] - self.assertEqual(1, len(self.image.tags)) - - def test_replace_too_many_image_tags(self): - self.config(image_tag_quota=0) - - exc = self.assertRaises(exception.ImageTagLimitExceeded, - setattr, self.image, 'tags', ['foo', 'bar']) - self.assertIn('Attempted: 2, Maximum: 0', - encodeutils.exception_to_unicode(exc)) - self.assertEqual(0, len(self.image.tags)) - - def test_replace_unlimited_image_tags(self): - self.config(image_tag_quota=-1) - self.image.tags = ['foo'] - self.assertEqual(1, len(self.image.tags)) - - def test_add_image_tag(self): - self.config(image_tag_quota=1) - self.image.tags.add('foo') - self.assertEqual(1, len(self.image.tags)) - - def test_add_too_many_image_tags(self): - self.config(image_tag_quota=1) - self.image.tags.add('foo') - exc = self.assertRaises(exception.ImageTagLimitExceeded, - self.image.tags.add, 'bar') - self.assertIn('Attempted: 2, Maximum: 1', - encodeutils.exception_to_unicode(exc)) - - def test_add_unlimited_image_tags(self): - self.config(image_tag_quota=-1) - self.image.tags.add('foo') - self.assertEqual(1, len(self.image.tags)) - - def test_remove_image_tag_while_over_quota(self): - self.config(image_tag_quota=1) - self.image.tags.add('foo') - self.assertEqual(1, len(self.image.tags)) - self.config(image_tag_quota=0) - self.image.tags.remove('foo') - self.assertEqual(0, len(self.image.tags)) - - -class TestQuotaImageTagsProxy(test_utils.BaseTestCase): - def setUp(self): - super(TestQuotaImageTagsProxy, self).setUp() - - def test_add(self): - proxy = glance.quota.QuotaImageTagsProxy(set([])) - proxy.add('foo') - self.assertIn('foo', proxy) - - def test_add_too_many_tags(self): - self.config(image_tag_quota=0) - proxy = glance.quota.QuotaImageTagsProxy(set([])) - exc = self.assertRaises(exception.ImageTagLimitExceeded, - proxy.add, 'bar') - self.assertIn('Attempted: 1, Maximum: 0', - encodeutils.exception_to_unicode(exc)) - - def test_equals(self): - proxy = glance.quota.QuotaImageTagsProxy(set([])) - self.assertEqual(set([]), proxy) - - def test_not_equals(self): - proxy = glance.quota.QuotaImageTagsProxy(set([])) - self.assertNotEqual('foo', proxy) - - def test_contains(self): - proxy = glance.quota.QuotaImageTagsProxy(set(['foo'])) - self.assertIn('foo', proxy) - - def test_len(self): - proxy = glance.quota.QuotaImageTagsProxy(set(['foo', - 'bar', - 'baz', - 'niz'])) - self.assertEqual(4, len(proxy)) - - def test_iter(self): - items = set(['foo', 'bar', 'baz', 'niz']) - proxy = glance.quota.QuotaImageTagsProxy(items.copy()) - self.assertEqual(4, len(items)) - for item in proxy: - items.remove(item) - self.assertEqual(0, len(items)) - - -class TestImageMemberQuotas(test_utils.BaseTestCase): - def setUp(self): - super(TestImageMemberQuotas, self).setUp() - db_api = unit_test_utils.FakeDB() - store_api = unit_test_utils.FakeStoreAPI() - store = unit_test_utils.FakeStoreUtils(store_api) - context = FakeContext() - self.image = mock.Mock() - self.base_image_member_factory = mock.Mock() - self.image_member_factory = glance.quota.ImageMemberFactoryProxy( - self.base_image_member_factory, context, - db_api, store) - - def test_new_image_member(self): - self.config(image_member_quota=1) - - self.image_member_factory.new_image_member(self.image, - 'fake_id') - nim = self.base_image_member_factory.new_image_member - nim.assert_called_once_with(self.image, 'fake_id') - - def test_new_image_member_unlimited_members(self): - self.config(image_member_quota=-1) - - self.image_member_factory.new_image_member(self.image, - 'fake_id') - nim = self.base_image_member_factory.new_image_member - nim.assert_called_once_with(self.image, 'fake_id') - - def test_new_image_member_too_many_members(self): - self.config(image_member_quota=0) - - self.assertRaises(exception.ImageMemberLimitExceeded, - self.image_member_factory.new_image_member, - self.image, 'fake_id') - - -class TestImageLocationQuotas(test_utils.BaseTestCase): - def setUp(self): - super(TestImageLocationQuotas, self).setUp() - self.base_image = mock.Mock() - self.base_image.locations = [] - self.base_image.size = 1 - self.base_image.extra_properties = {} - self.image = glance.quota.ImageProxy(self.base_image, - mock.Mock(), - mock.Mock(), - mock.Mock()) - - self.image_repo_mock = mock.Mock() - self.image_repo_proxy = glance.quota.ImageRepoProxy( - self.image_repo_mock, - mock.Mock(), - mock.Mock(), - mock.Mock()) - - def test_replace_image_location(self): - self.config(image_location_quota=1) - self.image.locations = [{"url": "file:///fake.img.tar.gz", - "metadata": {} - }] - self.assertEqual(1, len(self.image.locations)) - - def test_replace_too_many_image_locations(self): - self.config(image_location_quota=1) - self.image.locations = [{"url": "file:///fake.img.tar.gz", - "metadata": {}} - ] - locations = [ - {"url": "file:///fake1.img.tar.gz", "metadata": {}}, - {"url": "file:///fake2.img.tar.gz", "metadata": {}}, - {"url": "file:///fake3.img.tar.gz", "metadata": {}} - ] - exc = self.assertRaises(exception.ImageLocationLimitExceeded, - setattr, self.image, 'locations', locations) - self.assertIn('Attempted: 3, Maximum: 1', - encodeutils.exception_to_unicode(exc)) - self.assertEqual(1, len(self.image.locations)) - - def test_replace_unlimited_image_locations(self): - self.config(image_location_quota=-1) - self.image.locations = [{"url": "file:///fake.img.tar.gz", - "metadata": {}} - ] - self.assertEqual(1, len(self.image.locations)) - - def test_add_image_location(self): - self.config(image_location_quota=1) - location = {"url": "file:///fake.img.tar.gz", "metadata": {}} - self.image.locations.append(location) - self.assertEqual(1, len(self.image.locations)) - - def test_add_too_many_image_locations(self): - self.config(image_location_quota=1) - location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}} - self.image.locations.append(location1) - location2 = {"url": "file:///fake2.img.tar.gz", "metadata": {}} - exc = self.assertRaises(exception.ImageLocationLimitExceeded, - self.image.locations.append, location2) - self.assertIn('Attempted: 2, Maximum: 1', - encodeutils.exception_to_unicode(exc)) - - def test_add_unlimited_image_locations(self): - self.config(image_location_quota=-1) - location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}} - self.image.locations.append(location1) - self.assertEqual(1, len(self.image.locations)) - - def test_remove_image_location_while_over_quota(self): - self.config(image_location_quota=1) - location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}} - self.image.locations.append(location1) - self.assertEqual(1, len(self.image.locations)) - self.config(image_location_quota=0) - self.image.locations.remove(location1) - self.assertEqual(0, len(self.image.locations)) diff --git a/glance/tests/unit/test_schema.py b/glance/tests/unit/test_schema.py deleted file mode 100644 index 5d1cc065..00000000 --- a/glance/tests/unit/test_schema.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glance.common import exception -import glance.schema -from glance.tests import utils as test_utils - - -class TestBasicSchema(test_utils.BaseTestCase): - - def setUp(self): - super(TestBasicSchema, self).setUp() - properties = { - 'ham': {'type': 'string'}, - 'eggs': {'type': 'string'}, - } - self.schema = glance.schema.Schema('basic', properties) - - def test_validate_passes(self): - obj = {'ham': 'no', 'eggs': 'scrambled'} - self.schema.validate(obj) # No exception raised - - def test_validate_fails_on_extra_properties(self): - obj = {'ham': 'virginia', 'eggs': 'scrambled', 'bacon': 'crispy'} - self.assertRaises(exception.InvalidObject, self.schema.validate, obj) - - def test_validate_fails_on_bad_type(self): - obj = {'eggs': 2} - self.assertRaises(exception.InvalidObject, self.schema.validate, obj) - - def test_filter_strips_extra_properties(self): - obj = {'ham': 'virginia', 'eggs': 'scrambled', 'bacon': 'crispy'} - filtered = self.schema.filter(obj) - expected = {'ham': 'virginia', 'eggs': 'scrambled'} - self.assertEqual(expected, filtered) - - def test_merge_properties(self): - self.schema.merge_properties({'bacon': {'type': 'string'}}) - expected = set(['ham', 'eggs', 'bacon']) - actual = set(self.schema.raw()['properties'].keys()) - self.assertEqual(expected, actual) - - def test_merge_conflicting_properties(self): - conflicts = {'eggs': {'type': 'integer'}} - self.assertRaises(exception.SchemaLoadError, - self.schema.merge_properties, conflicts) - - def test_merge_conflicting_but_identical_properties(self): - conflicts = {'ham': {'type': 'string'}} - self.schema.merge_properties(conflicts) # no exception raised - expected = set(['ham', 'eggs']) - actual = set(self.schema.raw()['properties'].keys()) - self.assertEqual(expected, actual) - - def test_raw_json_schema(self): - expected = { - 'name': 'basic', - 'properties': { - 'ham': {'type': 'string'}, - 'eggs': {'type': 'string'}, - }, - 'additionalProperties': False, - } - self.assertEqual(expected, self.schema.raw()) - - -class TestBasicSchemaLinks(test_utils.BaseTestCase): - - def setUp(self): - super(TestBasicSchemaLinks, self).setUp() - properties = { - 'ham': {'type': 'string'}, - 'eggs': {'type': 'string'}, - } - links = [ - {'rel': 'up', 'href': '/menu'}, - ] - self.schema = glance.schema.Schema('basic', properties, links) - - def test_raw_json_schema(self): - expected = { - 'name': 'basic', - 'properties': { - 'ham': {'type': 'string'}, - 'eggs': {'type': 'string'}, - }, - 'links': [ - {'rel': 'up', 'href': '/menu'}, - ], - 'additionalProperties': False, - } - self.assertEqual(expected, self.schema.raw()) - - -class TestPermissiveSchema(test_utils.BaseTestCase): - - def setUp(self): - super(TestPermissiveSchema, self).setUp() - properties = { - 'ham': {'type': 'string'}, - 'eggs': {'type': 'string'}, - } - self.schema = glance.schema.PermissiveSchema('permissive', properties) - - def test_validate_with_additional_properties_allowed(self): - obj = {'ham': 'virginia', 'eggs': 'scrambled', 'bacon': 'crispy'} - self.schema.validate(obj) # No exception raised - - def test_validate_rejects_non_string_extra_properties(self): - obj = {'ham': 'virginia', 'eggs': 'scrambled', 'grits': 1000} - self.assertRaises(exception.InvalidObject, self.schema.validate, obj) - - def test_filter_passes_extra_properties(self): - obj = {'ham': 'virginia', 'eggs': 'scrambled', 'bacon': 'crispy'} - filtered = self.schema.filter(obj) - self.assertEqual(obj, filtered) - - def test_raw_json_schema(self): - expected = { - 'name': 'permissive', - 'properties': { - 'ham': {'type': 'string'}, - 'eggs': {'type': 'string'}, - }, - 'additionalProperties': {'type': 'string'}, - } - self.assertEqual(expected, self.schema.raw()) - - -class TestCollectionSchema(test_utils.BaseTestCase): - - def test_raw_json_schema(self): - item_properties = {'cheese': {'type': 'string'}} - item_schema = glance.schema.Schema('mouse', item_properties) - collection_schema = glance.schema.CollectionSchema('mice', item_schema) - expected = { - 'name': 'mice', - 'properties': { - 'mice': { - 'type': 'array', - 'items': item_schema.raw(), - }, - 'first': {'type': 'string'}, - 'next': {'type': 'string'}, - 'schema': {'type': 'string'}, - }, - 'links': [ - {'rel': 'first', 'href': '{first}'}, - {'rel': 'next', 'href': '{next}'}, - {'rel': 'describedby', 'href': '{schema}'}, - ], - } - self.assertEqual(expected, collection_schema.raw()) diff --git a/glance/tests/unit/test_scrubber.py b/glance/tests/unit/test_scrubber.py deleted file mode 100644 index 83cd184d..00000000 --- a/glance/tests/unit/test_scrubber.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import glance_store -import mock -from mock import patch -from mox3 import mox -from oslo_config import cfg -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from glance.common import exception -from glance import scrubber -from glance.tests import utils as test_utils - -CONF = cfg.CONF - - -class TestScrubber(test_utils.BaseTestCase): - - def setUp(self): - super(TestScrubber, self).setUp() - glance_store.register_opts(CONF) - self.config(group='glance_store', default_store='file', - filesystem_store_datadir=self.test_dir) - glance_store.create_stores() - self.mox = mox.Mox() - - def tearDown(self): - self.mox.UnsetStubs() - # These globals impact state outside of this test class, kill them. - scrubber._file_queue = None - scrubber._db_queue = None - super(TestScrubber, self).tearDown() - - def _scrubber_cleanup_with_store_delete_exception(self, ex): - uri = 'file://some/path/%s' % uuid.uuid4() - id = 'helloworldid' - scrub = scrubber.Scrubber(glance_store) - scrub.registry = self.mox.CreateMockAnything() - scrub.registry.get_image(id).AndReturn({'status': 'pending_delete'}) - scrub.registry.update_image(id, {'status': 'deleted'}) - self.mox.StubOutWithMock(glance_store, "delete_from_backend") - glance_store.delete_from_backend( - uri, - mox.IgnoreArg()).AndRaise(ex) - self.mox.ReplayAll() - scrub._scrub_image(id, [(id, '-', uri)]) - self.mox.VerifyAll() - - def test_store_delete_successful(self): - uri = 'file://some/path/%s' % uuid.uuid4() - id = 'helloworldid' - - scrub = scrubber.Scrubber(glance_store) - scrub.registry = self.mox.CreateMockAnything() - scrub.registry.get_image(id).AndReturn({'status': 'pending_delete'}) - scrub.registry.update_image(id, {'status': 'deleted'}) - self.mox.StubOutWithMock(glance_store, "delete_from_backend") - glance_store.delete_from_backend(uri, mox.IgnoreArg()).AndReturn('') - self.mox.ReplayAll() - scrub._scrub_image(id, [(id, '-', uri)]) - self.mox.VerifyAll() - - def test_store_delete_store_exceptions(self): - # While scrubbing image data, all store exceptions, other than - # NotFound, cause image scrubbing to fail. Essentially, no attempt - # would be made to update the status of image. - - uri = 'file://some/path/%s' % uuid.uuid4() - id = 'helloworldid' - ex = glance_store.GlanceStoreException() - - scrub = scrubber.Scrubber(glance_store) - scrub.registry = self.mox.CreateMockAnything() - self.mox.StubOutWithMock(glance_store, "delete_from_backend") - glance_store.delete_from_backend( - uri, - mox.IgnoreArg()).AndRaise(ex) - self.mox.ReplayAll() - scrub._scrub_image(id, [(id, '-', uri)]) - self.mox.VerifyAll() - - def test_store_delete_notfound_exception(self): - # While scrubbing image data, NotFound exception is ignored and image - # scrubbing succeeds - uri = 'file://some/path/%s' % uuid.uuid4() - id = 'helloworldid' - ex = glance_store.NotFound(message='random') - - scrub = scrubber.Scrubber(glance_store) - scrub.registry = self.mox.CreateMockAnything() - scrub.registry.get_image(id).AndReturn({'status': 'pending_delete'}) - scrub.registry.update_image(id, {'status': 'deleted'}) - self.mox.StubOutWithMock(glance_store, "delete_from_backend") - glance_store.delete_from_backend(uri, mox.IgnoreArg()).AndRaise(ex) - self.mox.ReplayAll() - scrub._scrub_image(id, [(id, '-', uri)]) - self.mox.VerifyAll() - - def test_scrubber_exits(self): - # Checks for Scrubber exits when it is not able to fetch jobs from - # the queue - scrub_jobs = scrubber.ScrubDBQueue.get_all_locations - scrub_jobs = mock.MagicMock() - scrub_jobs.side_effect = exception.NotFound - scrub = scrubber.Scrubber(glance_store) - self.assertRaises(exception.FailedToGetScrubberJobs, - scrub._get_delete_jobs) - - -class TestScrubDBQueue(test_utils.BaseTestCase): - - def setUp(self): - super(TestScrubDBQueue, self).setUp() - - def _create_image_list(self, count): - images = [] - for x in range(count): - images.append({'id': x}) - - return images - - def test_get_all_images(self): - scrub_queue = scrubber.ScrubDBQueue() - images = self._create_image_list(15) - image_pager = ImagePager(images) - - def make_get_images_detailed(pager): - def mock_get_images_detailed(filters, marker=None): - return pager() - return mock_get_images_detailed - - with patch.object(scrub_queue.registry, 'get_images_detailed') as ( - _mock_get_images_detailed): - _mock_get_images_detailed.side_effect = ( - make_get_images_detailed(image_pager)) - actual = list(scrub_queue._get_all_images()) - - self.assertEqual(images, actual) - - def test_get_all_images_paged(self): - scrub_queue = scrubber.ScrubDBQueue() - images = self._create_image_list(15) - image_pager = ImagePager(images, page_size=4) - - def make_get_images_detailed(pager): - def mock_get_images_detailed(filters, marker=None): - return pager() - return mock_get_images_detailed - - with patch.object(scrub_queue.registry, 'get_images_detailed') as ( - _mock_get_images_detailed): - _mock_get_images_detailed.side_effect = ( - make_get_images_detailed(image_pager)) - actual = list(scrub_queue._get_all_images()) - - self.assertEqual(images, actual) - - -class ImagePager(object): - def __init__(self, images, page_size=0): - image_count = len(images) - if page_size == 0 or page_size > image_count: - page_size = image_count - self.image_batches = [] - start = 0 - l = len(images) - while start < l: - self.image_batches.append(images[start: start + page_size]) - start += page_size - if (l - start) < page_size: - page_size = l - start - - def __call__(self): - if len(self.image_batches) == 0: - return [] - else: - return self.image_batches.pop(0) diff --git a/glance/tests/unit/test_store_image.py b/glance/tests/unit/test_store_image.py deleted file mode 100644 index 4fabf481..00000000 --- a/glance/tests/unit/test_store_image.py +++ /dev/null @@ -1,915 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from cursive import exception as cursive_exception -from cursive import signature_utils -import glance_store -import mock - -from glance.common import exception -import glance.location -from glance.tests.unit import base as unit_test_base -from glance.tests.unit import utils as unit_test_utils -from glance.tests import utils - - -BASE_URI = 'http://storeurl.com/container' -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -UUID2 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' -USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' -TENANT3 = '228c6da5-29cd-4d67-9457-ed632e083fc0' - - -class ImageRepoStub(object): - def add(self, image): - return image - - def save(self, image, from_state=None): - return image - - -class ImageStub(object): - def __init__(self, image_id, status=None, locations=None, - visibility=None, extra_properties=None): - self.image_id = image_id - self.status = status - self.locations = locations or [] - self.visibility = visibility - self.size = 1 - self.extra_properties = extra_properties or {} - - def delete(self): - self.status = 'deleted' - - def get_member_repo(self): - return FakeMemberRepo(self, [TENANT1, TENANT2]) - - -class ImageFactoryStub(object): - def new_image(self, image_id=None, name=None, visibility='private', - min_disk=0, min_ram=0, protected=False, owner=None, - disk_format=None, container_format=None, - extra_properties=None, tags=None, **other_args): - return ImageStub(image_id, visibility=visibility, - extra_properties=extra_properties, **other_args) - - -class FakeMemberRepo(object): - def __init__(self, image, tenants=None): - self.image = image - self.factory = glance.domain.ImageMemberFactory() - self.tenants = tenants or [] - - def list(self, *args, **kwargs): - return [self.factory.new_image_member(self.image, tenant) - for tenant in self.tenants] - - def add(self, member): - self.tenants.append(member.member_id) - - def remove(self, member): - self.tenants.remove(member.member_id) - - -class TestStoreImage(utils.BaseTestCase): - def setUp(self): - locations = [{'url': '%s/%s' % (BASE_URI, UUID1), - 'metadata': {}, 'status': 'active'}] - self.image_stub = ImageStub(UUID1, 'active', locations) - self.store_api = unit_test_utils.FakeStoreAPI() - self.store_utils = unit_test_utils.FakeStoreUtils(self.store_api) - super(TestStoreImage, self).setUp() - - def test_image_delete(self): - image = glance.location.ImageProxy(self.image_stub, {}, - self.store_api, self.store_utils) - location = image.locations[0] - self.assertEqual('active', image.status) - self.store_api.get_from_backend(location['url'], context={}) - image.delete() - self.assertEqual('deleted', image.status) - self.assertRaises(glance_store.NotFound, - self.store_api.get_from_backend, location['url'], {}) - - def test_image_get_data(self): - image = glance.location.ImageProxy(self.image_stub, {}, - self.store_api, self.store_utils) - self.assertEqual('XXX', image.get_data()) - - def test_image_get_data_from_second_location(self): - def fake_get_from_backend(self, location, offset=0, - chunk_size=None, context=None): - if UUID1 in location: - raise Exception('not allow download from %s' % location) - else: - return self.data[location] - - image1 = glance.location.ImageProxy(self.image_stub, {}, - self.store_api, self.store_utils) - self.assertEqual('XXX', image1.get_data()) - # Multiple location support - context = glance.context.RequestContext(user=USER1) - (image2, image_stub2) = self._add_image(context, UUID2, 'ZZZ', 3) - location_data = image2.locations[0] - image1.locations.append(location_data) - self.assertEqual(2, len(image1.locations)) - self.assertEqual(UUID2, location_data['url']) - - self.stubs.Set(unit_test_utils.FakeStoreAPI, 'get_from_backend', - fake_get_from_backend) - # This time, image1.get_data() returns the data wrapped in a - # LimitingReader|CooperativeReader pipeline, so peeking under - # the hood of those objects to get at the underlying string. - self.assertEqual('ZZZ', image1.get_data().data.fd) - - image1.locations.pop(0) - self.assertEqual(1, len(image1.locations)) - image2.delete() - - def test_image_set_data(self): - context = glance.context.RequestContext(user=USER1) - image_stub = ImageStub(UUID2, status='queued', locations=[]) - image = glance.location.ImageProxy(image_stub, context, - self.store_api, self.store_utils) - image.set_data('YYYY', 4) - self.assertEqual(4, image.size) - # NOTE(markwash): FakeStore returns image_id for location - self.assertEqual(UUID2, image.locations[0]['url']) - self.assertEqual('Z', image.checksum) - self.assertEqual('active', image.status) - - def test_image_set_data_location_metadata(self): - context = glance.context.RequestContext(user=USER1) - image_stub = ImageStub(UUID2, status='queued', locations=[]) - loc_meta = {'key': 'value5032'} - store_api = unit_test_utils.FakeStoreAPI(store_metadata=loc_meta) - store_utils = unit_test_utils.FakeStoreUtils(store_api) - image = glance.location.ImageProxy(image_stub, context, - store_api, store_utils) - image.set_data('YYYY', 4) - self.assertEqual(4, image.size) - location_data = image.locations[0] - self.assertEqual(UUID2, location_data['url']) - self.assertEqual(loc_meta, location_data['metadata']) - self.assertEqual('Z', image.checksum) - self.assertEqual('active', image.status) - image.delete() - self.assertEqual(image.status, 'deleted') - self.assertRaises(glance_store.NotFound, - self.store_api.get_from_backend, - image.locations[0]['url'], {}) - - def test_image_set_data_unknown_size(self): - context = glance.context.RequestContext(user=USER1) - image_stub = ImageStub(UUID2, status='queued', locations=[]) - image = glance.location.ImageProxy(image_stub, context, - self.store_api, self.store_utils) - image.set_data('YYYY', None) - self.assertEqual(4, image.size) - # NOTE(markwash): FakeStore returns image_id for location - self.assertEqual(UUID2, image.locations[0]['url']) - self.assertEqual('Z', image.checksum) - self.assertEqual('active', image.status) - image.delete() - self.assertEqual(image.status, 'deleted') - self.assertRaises(glance_store.NotFound, - self.store_api.get_from_backend, - image.locations[0]['url'], context={}) - - @mock.patch('glance.location.LOG') - def test_image_set_data_valid_signature(self, mock_log): - context = glance.context.RequestContext(user=USER1) - extra_properties = { - 'img_signature_certificate_uuid': 'UUID', - 'img_signature_hash_method': 'METHOD', - 'img_signature_key_type': 'TYPE', - 'img_signature': 'VALID' - } - image_stub = ImageStub(UUID2, status='queued', - extra_properties=extra_properties) - self.stubs.Set(signature_utils, 'get_verifier', - unit_test_utils.fake_get_verifier) - image = glance.location.ImageProxy(image_stub, context, - self.store_api, self.store_utils) - image.set_data('YYYY', 4) - self.assertEqual('active', image.status) - mock_log.info.assert_called_once_with( - u'Successfully verified signature for image %s', - UUID2) - - def test_image_set_data_invalid_signature(self): - context = glance.context.RequestContext(user=USER1) - extra_properties = { - 'img_signature_certificate_uuid': 'UUID', - 'img_signature_hash_method': 'METHOD', - 'img_signature_key_type': 'TYPE', - 'img_signature': 'INVALID' - } - image_stub = ImageStub(UUID2, status='queued', - extra_properties=extra_properties) - self.stubs.Set(signature_utils, 'get_verifier', - unit_test_utils.fake_get_verifier) - image = glance.location.ImageProxy(image_stub, context, - self.store_api, self.store_utils) - self.assertRaises(cursive_exception.SignatureVerificationError, - image.set_data, - 'YYYY', 4) - - def test_image_set_data_invalid_signature_missing_metadata(self): - context = glance.context.RequestContext(user=USER1) - extra_properties = { - 'img_signature_hash_method': 'METHOD', - 'img_signature_key_type': 'TYPE', - 'img_signature': 'INVALID' - } - image_stub = ImageStub(UUID2, status='queued', - extra_properties=extra_properties) - self.stubs.Set(signature_utils, 'get_verifier', - unit_test_utils.fake_get_verifier) - image = glance.location.ImageProxy(image_stub, context, - self.store_api, self.store_utils) - image.set_data('YYYY', 4) - self.assertEqual(UUID2, image.locations[0]['url']) - self.assertEqual('Z', image.checksum) - # Image is still active, since invalid signature was ignored - self.assertEqual('active', image.status) - - def _add_image(self, context, image_id, data, len): - image_stub = ImageStub(image_id, status='queued', locations=[]) - image = glance.location.ImageProxy(image_stub, context, - self.store_api, self.store_utils) - image.set_data(data, len) - self.assertEqual(len, image.size) - # NOTE(markwash): FakeStore returns image_id for location - location = {'url': image_id, 'metadata': {}, 'status': 'active'} - self.assertEqual([location], image.locations) - self.assertEqual([location], image_stub.locations) - self.assertEqual('active', image.status) - return (image, image_stub) - - def test_image_change_append_invalid_location_uri(self): - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - - location_bad = {'url': 'unknown://location', 'metadata': {}} - self.assertRaises(exception.BadStoreUri, - image1.locations.append, location_bad) - - image1.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - - def test_image_change_append_invalid_location_metatdata(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - - # Using only one test rule here is enough to make sure - # 'store.check_location_metadata()' can be triggered - # in Location proxy layer. Complete test rule for - # 'store.check_location_metadata()' testing please - # check below cases within 'TestStoreMetaDataChecker'. - location_bad = {'url': UUID3, 'metadata': b"a invalid metadata"} - - self.assertRaises(glance_store.BackendException, - image1.locations.append, location_bad) - - image1.delete() - image2.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - def test_image_change_append_locations(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - - location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} - location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} - - image1.locations.append(location3) - - self.assertEqual([location2, location3], image_stub1.locations) - self.assertEqual([location2, location3], image1.locations) - - image1.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - image2.delete() - - def test_image_change_pop_location(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - - location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} - location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} - - image1.locations.append(location3) - - self.assertEqual([location2, location3], image_stub1.locations) - self.assertEqual([location2, location3], image1.locations) - - image1.locations.pop() - - self.assertEqual([location2], image_stub1.locations) - self.assertEqual([location2], image1.locations) - - image1.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - image2.delete() - - def test_image_change_extend_invalid_locations_uri(self): - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - - location_bad = {'url': 'unknown://location', 'metadata': {}} - - self.assertRaises(exception.BadStoreUri, - image1.locations.extend, [location_bad]) - - image1.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - - def test_image_change_extend_invalid_locations_metadata(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - - location_bad = {'url': UUID3, 'metadata': b"a invalid metadata"} - - self.assertRaises(glance_store.BackendException, - image1.locations.extend, [location_bad]) - - image1.delete() - image2.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - def test_image_change_extend_locations(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - - location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} - location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} - - image1.locations.extend([location3]) - - self.assertEqual([location2, location3], image_stub1.locations) - self.assertEqual([location2, location3], image1.locations) - - image1.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - image2.delete() - - def test_image_change_remove_location(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - - location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} - location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} - location_bad = {'url': 'unknown://location', 'metadata': {}} - - image1.locations.extend([location3]) - image1.locations.remove(location2) - - self.assertEqual([location3], image_stub1.locations) - self.assertEqual([location3], image1.locations) - self.assertRaises(ValueError, - image1.locations.remove, location_bad) - - image1.delete() - image2.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - def test_image_change_delete_location(self): - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - - del image1.locations[0] - - self.assertEqual([], image_stub1.locations) - self.assertEqual(0, len(image1.locations)) - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - - image1.delete() - - def test_image_change_insert_invalid_location_uri(self): - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - - location_bad = {'url': 'unknown://location', 'metadata': {}} - self.assertRaises(exception.BadStoreUri, - image1.locations.insert, 0, location_bad) - - image1.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - - def test_image_change_insert_invalid_location_metadata(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - - location_bad = {'url': UUID3, 'metadata': b"a invalid metadata"} - - self.assertRaises(glance_store.BackendException, - image1.locations.insert, 0, location_bad) - - image1.delete() - image2.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - def test_image_change_insert_location(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - - location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} - location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} - - image1.locations.insert(0, location3) - - self.assertEqual([location3, location2], image_stub1.locations) - self.assertEqual([location3, location2], image1.locations) - - image1.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - image2.delete() - - def test_image_change_delete_locations(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - - location2 = {'url': UUID2, 'metadata': {}} - location3 = {'url': UUID3, 'metadata': {}} - - image1.locations.insert(0, location3) - del image1.locations[0:100] - - self.assertEqual([], image_stub1.locations) - self.assertEqual(0, len(image1.locations)) - self.assertRaises(exception.BadStoreUri, - image1.locations.insert, 0, location2) - self.assertRaises(exception.BadStoreUri, - image2.locations.insert, 0, location3) - - image1.delete() - image2.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - def test_image_change_adding_invalid_location_uri(self): - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - image_stub1 = ImageStub('fake_image_id', status='queued', locations=[]) - image1 = glance.location.ImageProxy(image_stub1, context, - self.store_api, self.store_utils) - - location_bad = {'url': 'unknown://location', 'metadata': {}} - - self.assertRaises(exception.BadStoreUri, - image1.locations.__iadd__, [location_bad]) - self.assertEqual([], image_stub1.locations) - self.assertEqual([], image1.locations) - - image1.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - - def test_image_change_adding_invalid_location_metadata(self): - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - - image_stub2 = ImageStub('fake_image_id', status='queued', locations=[]) - image2 = glance.location.ImageProxy(image_stub2, context, - self.store_api, self.store_utils) - - location_bad = {'url': UUID2, 'metadata': b"a invalid metadata"} - - self.assertRaises(glance_store.BackendException, - image2.locations.__iadd__, [location_bad]) - self.assertEqual([], image_stub2.locations) - self.assertEqual([], image2.locations) - - image1.delete() - image2.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - - def test_image_change_adding_locations(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - - image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) - image3 = glance.location.ImageProxy(image_stub3, context, - self.store_api, self.store_utils) - - location2 = {'url': UUID2, 'metadata': {}} - location3 = {'url': UUID3, 'metadata': {}} - - image3.locations += [location2, location3] - - self.assertEqual([location2, location3], image_stub3.locations) - self.assertEqual([location2, location3], image3.locations) - - image3.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - image1.delete() - image2.delete() - - def test_image_get_location_index(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) - - image3 = glance.location.ImageProxy(image_stub3, context, - self.store_api, self.store_utils) - - location2 = {'url': UUID2, 'metadata': {}} - location3 = {'url': UUID3, 'metadata': {}} - - image3.locations += [location2, location3] - - self.assertEqual(1, image_stub3.locations.index(location3)) - - image3.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - image1.delete() - image2.delete() - - def test_image_get_location_by_index(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) - image3 = glance.location.ImageProxy(image_stub3, context, - self.store_api, self.store_utils) - - location2 = {'url': UUID2, 'metadata': {}} - location3 = {'url': UUID3, 'metadata': {}} - - image3.locations += [location2, location3] - - self.assertEqual(1, image_stub3.locations.index(location3)) - self.assertEqual(location2, image_stub3.locations[0]) - - image3.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - image1.delete() - image2.delete() - - def test_image_checking_location_exists(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - - image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) - image3 = glance.location.ImageProxy(image_stub3, context, - self.store_api, self.store_utils) - - location2 = {'url': UUID2, 'metadata': {}} - location3 = {'url': UUID3, 'metadata': {}} - location_bad = {'url': 'unknown://location', 'metadata': {}} - - image3.locations += [location2, location3] - - self.assertIn(location3, image_stub3.locations) - self.assertNotIn(location_bad, image_stub3.locations) - - image3.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - image1.delete() - image2.delete() - - def test_image_reverse_locations_order(self): - UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' - self.assertEqual(2, len(self.store_api.data.keys())) - - context = glance.context.RequestContext(user=USER1) - (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) - (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) - - location2 = {'url': UUID2, 'metadata': {}} - location3 = {'url': UUID3, 'metadata': {}} - - image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) - image3 = glance.location.ImageProxy(image_stub3, context, - self.store_api, self.store_utils) - image3.locations += [location2, location3] - - image_stub3.locations.reverse() - - self.assertEqual([location3, location2], image_stub3.locations) - self.assertEqual([location3, location2], image3.locations) - - image3.delete() - - self.assertEqual(2, len(self.store_api.data.keys())) - self.assertNotIn(UUID2, self.store_api.data.keys()) - self.assertNotIn(UUID3, self.store_api.data.keys()) - - image1.delete() - image2.delete() - - -class TestStoreImageRepo(utils.BaseTestCase): - def setUp(self): - super(TestStoreImageRepo, self).setUp() - self.store_api = unit_test_utils.FakeStoreAPI() - store_utils = unit_test_utils.FakeStoreUtils(self.store_api) - self.image_stub = ImageStub(UUID1) - self.image = glance.location.ImageProxy(self.image_stub, {}, - self.store_api, store_utils) - self.image_repo_stub = ImageRepoStub() - self.image_repo = glance.location.ImageRepoProxy(self.image_repo_stub, - {}, self.store_api, - store_utils) - patcher = mock.patch("glance.location._get_member_repo_for_store", - self.get_fake_member_repo) - patcher.start() - self.addCleanup(patcher.stop) - self.fake_member_repo = FakeMemberRepo(self.image, [TENANT1, TENANT2]) - self.image_member_repo = glance.location.ImageMemberRepoProxy( - self.fake_member_repo, - self.image, - {}, self.store_api) - - def get_fake_member_repo(self, image, context, db_api, store_api): - return FakeMemberRepo(self.image, [TENANT1, TENANT2]) - - def test_add_updates_acls(self): - self.image_stub.locations = [{'url': 'foo', 'metadata': {}, - 'status': 'active'}, - {'url': 'bar', 'metadata': {}, - 'status': 'active'}] - self.image_stub.visibility = 'public' - self.image_repo.add(self.image) - self.assertTrue(self.store_api.acls['foo']['public']) - self.assertEqual([], self.store_api.acls['foo']['read']) - self.assertEqual([], self.store_api.acls['foo']['write']) - self.assertTrue(self.store_api.acls['bar']['public']) - self.assertEqual([], self.store_api.acls['bar']['read']) - self.assertEqual([], self.store_api.acls['bar']['write']) - - def test_add_ignores_acls_if_no_locations(self): - self.image_stub.locations = [] - self.image_stub.visibility = 'public' - self.image_repo.add(self.image) - self.assertEqual(0, len(self.store_api.acls)) - - def test_save_updates_acls(self): - self.image_stub.locations = [{'url': 'foo', 'metadata': {}, - 'status': 'active'}] - self.image_repo.save(self.image) - self.assertIn('foo', self.store_api.acls) - - def test_add_fetches_members_if_private(self): - self.image_stub.locations = [{'url': 'glue', 'metadata': {}, - 'status': 'active'}] - self.image_stub.visibility = 'private' - self.image_repo.add(self.image) - self.assertIn('glue', self.store_api.acls) - acls = self.store_api.acls['glue'] - self.assertFalse(acls['public']) - self.assertEqual([], acls['write']) - self.assertEqual([TENANT1, TENANT2], acls['read']) - - def test_save_fetches_members_if_private(self): - self.image_stub.locations = [{'url': 'glue', 'metadata': {}, - 'status': 'active'}] - self.image_stub.visibility = 'private' - self.image_repo.save(self.image) - self.assertIn('glue', self.store_api.acls) - acls = self.store_api.acls['glue'] - self.assertFalse(acls['public']) - self.assertEqual([], acls['write']) - self.assertEqual([TENANT1, TENANT2], acls['read']) - - def test_member_addition_updates_acls(self): - self.image_stub.locations = [{'url': 'glug', 'metadata': {}, - 'status': 'active'}] - self.image_stub.visibility = 'private' - membership = glance.domain.ImageMembership( - UUID1, TENANT3, None, None, status='accepted') - self.image_member_repo.add(membership) - self.assertIn('glug', self.store_api.acls) - acls = self.store_api.acls['glug'] - self.assertFalse(acls['public']) - self.assertEqual([], acls['write']) - self.assertEqual([TENANT1, TENANT2, TENANT3], acls['read']) - - def test_member_removal_updates_acls(self): - self.image_stub.locations = [{'url': 'glug', 'metadata': {}, - 'status': 'active'}] - self.image_stub.visibility = 'private' - membership = glance.domain.ImageMembership( - UUID1, TENANT1, None, None, status='accepted') - self.image_member_repo.remove(membership) - self.assertIn('glug', self.store_api.acls) - acls = self.store_api.acls['glug'] - self.assertFalse(acls['public']) - self.assertEqual([], acls['write']) - self.assertEqual([TENANT2], acls['read']) - - -class TestImageFactory(unit_test_base.StoreClearingUnitTest): - - def setUp(self): - super(TestImageFactory, self).setUp() - store_api = unit_test_utils.FakeStoreAPI() - store_utils = unit_test_utils.FakeStoreUtils(store_api) - self.image_factory = glance.location.ImageFactoryProxy( - ImageFactoryStub(), - glance.context.RequestContext(user=USER1), - store_api, - store_utils) - - def test_new_image(self): - image = self.image_factory.new_image() - self.assertIsNone(image.image_id) - self.assertIsNone(image.status) - self.assertEqual('private', image.visibility) - self.assertEqual([], image.locations) - - def test_new_image_with_location(self): - locations = [{'url': '%s/%s' % (BASE_URI, UUID1), - 'metadata': {}}] - image = self.image_factory.new_image(locations=locations) - self.assertEqual(locations, image.locations) - location_bad = {'url': 'unknown://location', 'metadata': {}} - self.assertRaises(exception.BadStoreUri, - self.image_factory.new_image, - locations=[location_bad]) - - -class TestStoreMetaDataChecker(utils.BaseTestCase): - - def test_empty(self): - glance_store.check_location_metadata({}) - - def test_unicode(self): - m = {'key': u'somevalue'} - glance_store.check_location_metadata(m) - - def test_unicode_list(self): - m = {'key': [u'somevalue', u'2']} - glance_store.check_location_metadata(m) - - def test_unicode_dict(self): - inner = {'key1': u'somevalue', 'key2': u'somevalue'} - m = {'topkey': inner} - glance_store.check_location_metadata(m) - - def test_unicode_dict_list(self): - inner = {'key1': u'somevalue', 'key2': u'somevalue'} - m = {'topkey': inner, 'list': [u'somevalue', u'2'], 'u': u'2'} - glance_store.check_location_metadata(m) - - def test_nested_dict(self): - inner = {'key1': u'somevalue', 'key2': u'somevalue'} - inner = {'newkey': inner} - inner = {'anotherkey': inner} - m = {'topkey': inner} - glance_store.check_location_metadata(m) - - def test_simple_bad(self): - m = {'key1': object()} - self.assertRaises(glance_store.BackendException, - glance_store.check_location_metadata, - m) - - def test_list_bad(self): - m = {'key1': [u'somevalue', object()]} - self.assertRaises(glance_store.BackendException, - glance_store.check_location_metadata, - m) - - def test_nested_dict_bad(self): - inner = {'key1': u'somevalue', 'key2': object()} - inner = {'newkey': inner} - inner = {'anotherkey': inner} - m = {'topkey': inner} - - self.assertRaises(glance_store.BackendException, - glance_store.check_location_metadata, - m) diff --git a/glance/tests/unit/test_store_location.py b/glance/tests/unit/test_store_location.py deleted file mode 100644 index 1d9dbdd7..00000000 --- a/glance/tests/unit/test_store_location.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2011-2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import glance_store -import mock - -from glance.common import exception -from glance.common import store_utils -import glance.location -from glance.tests.unit import base - - -CONF = {'default_store': 'file', - 'swift_store_auth_address': 'localhost:8080', - 'swift_store_container': 'glance', - 'swift_store_user': 'user', - 'swift_store_key': 'key', - 'default_swift_reference': 'store_1' - } - - -class TestStoreLocation(base.StoreClearingUnitTest): - - class FakeImageProxy(object): - size = None - context = None - store_api = mock.Mock() - store_utils = store_utils - - def test_add_location_for_image_without_size(self): - - def fake_get_size_from_backend(uri, context=None): - return 1 - - self.stubs.Set(glance_store, 'get_size_from_backend', - fake_get_size_from_backend) - - with mock.patch('glance.location._check_image_location'): - loc1 = {'url': 'file:///fake1.img.tar.gz', 'metadata': {}} - loc2 = {'url': 'file:///fake2.img.tar.gz', 'metadata': {}} - - # Test for insert location - image1 = TestStoreLocation.FakeImageProxy() - locations = glance.location.StoreLocations(image1, []) - locations.insert(0, loc2) - self.assertEqual(1, image1.size) - - # Test for set_attr of _locations_proxy - image2 = TestStoreLocation.FakeImageProxy() - locations = glance.location.StoreLocations(image2, [loc1]) - locations[0] = loc2 - self.assertIn(loc2, locations) - self.assertEqual(1, image2.size) - - def test_add_location_with_restricted_sources(self): - - loc1 = {'url': 'file:///fake1.img.tar.gz', 'metadata': {}} - loc2 = {'url': 'swift+config:///xxx', 'metadata': {}} - loc3 = {'url': 'filesystem:///foo.img.tar.gz', 'metadata': {}} - - # Test for insert location - image1 = TestStoreLocation.FakeImageProxy() - locations = glance.location.StoreLocations(image1, []) - self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc1) - self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc3) - self.assertNotIn(loc1, locations) - self.assertNotIn(loc3, locations) - - # Test for set_attr of _locations_proxy - image2 = TestStoreLocation.FakeImageProxy() - locations = glance.location.StoreLocations(image2, [loc1]) - self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc2) - self.assertNotIn(loc2, locations) diff --git a/glance/tests/unit/test_versions.py b/glance/tests/unit/test_versions.py deleted file mode 100644 index aed722a7..00000000 --- a/glance/tests/unit/test_versions.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from six.moves import http_client as http -import webob - -from glance.api.middleware import version_negotiation -from glance.api import versions -from glance.common.wsgi import Request as WsgiRequest -from glance.tests.unit import base - - -class VersionsTest(base.IsolatedUnitTest): - - """Test the version information returned from the API service.""" - - def test_get_version_list(self): - req = webob.Request.blank('/', base_url='http://127.0.0.1:9292/') - req.accept = 'application/json' - self.config(bind_host='127.0.0.1', bind_port=9292) - res = versions.Controller().index(req) - self.assertEqual(http.MULTIPLE_CHOICES, res.status_int) - self.assertEqual('application/json', res.content_type) - results = jsonutils.loads(res.body)['versions'] - expected = [ - { - 'id': 'v2.5', - 'status': 'CURRENT', - 'links': [{'rel': 'self', - 'href': 'http://127.0.0.1:9292/v2/'}], - }, - { - 'id': 'v2.4', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'http://127.0.0.1:9292/v2/'}], - }, - { - 'id': 'v2.3', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'http://127.0.0.1:9292/v2/'}], - }, - { - 'id': 'v2.2', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'http://127.0.0.1:9292/v2/'}], - }, - { - 'id': 'v2.1', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'http://127.0.0.1:9292/v2/'}], - }, - { - 'id': 'v2.0', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'http://127.0.0.1:9292/v2/'}], - }, - { - 'id': 'v1.1', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', - 'href': 'http://127.0.0.1:9292/v1/'}], - }, - { - 'id': 'v1.0', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', - 'href': 'http://127.0.0.1:9292/v1/'}], - }, - ] - self.assertEqual(expected, results) - - def test_get_version_list_public_endpoint(self): - req = webob.Request.blank('/', base_url='http://127.0.0.1:9292/') - req.accept = 'application/json' - self.config(bind_host='127.0.0.1', bind_port=9292, - public_endpoint='https://example.com:9292') - res = versions.Controller().index(req) - self.assertEqual(http.MULTIPLE_CHOICES, res.status_int) - self.assertEqual('application/json', res.content_type) - results = jsonutils.loads(res.body)['versions'] - expected = [ - { - 'id': 'v2.5', - 'status': 'CURRENT', - 'links': [{'rel': 'self', - 'href': 'https://example.com:9292/v2/'}], - }, - { - 'id': 'v2.4', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'https://example.com:9292/v2/'}], - }, - { - 'id': 'v2.3', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'https://example.com:9292/v2/'}], - }, - { - 'id': 'v2.2', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'https://example.com:9292/v2/'}], - }, - { - 'id': 'v2.1', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'https://example.com:9292/v2/'}], - }, - { - 'id': 'v2.0', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'https://example.com:9292/v2/'}], - }, - { - 'id': 'v1.1', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', - 'href': 'https://example.com:9292/v1/'}], - }, - { - 'id': 'v1.0', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', - 'href': 'https://example.com:9292/v1/'}], - }, - ] - self.assertEqual(expected, results) - - def test_get_version_list_secure_proxy_ssl_header(self): - self.config(secure_proxy_ssl_header='HTTP_X_FORWARDED_PROTO') - environ = webob.request.environ_from_url('http://localhost:9292') - req = WsgiRequest(environ) - res = versions.Controller().index(req) - self.assertEqual(http.MULTIPLE_CHOICES, res.status_int) - self.assertEqual('application/json', res.content_type) - results = jsonutils.loads(res.body)['versions'] - expected = [ - { - 'id': 'v2.5', - 'status': 'CURRENT', - 'links': [{'rel': 'self', - 'href': 'http://localhost:9292/v2/'}], - }, - { - 'id': 'v2.4', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'http://localhost:9292/v2/'}], - }, - { - 'id': 'v2.3', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'http://localhost:9292/v2/'}], - }, - { - 'id': 'v2.2', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'http://localhost:9292/v2/'}], - }, - { - 'id': 'v2.1', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'http://localhost:9292/v2/'}], - }, - { - 'id': 'v2.0', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'http://localhost:9292/v2/'}], - }, - { - 'id': 'v1.1', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', - 'href': 'http://localhost:9292/v1/'}], - }, - { - 'id': 'v1.0', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', - 'href': 'http://localhost:9292/v1/'}], - }, - ] - self.assertEqual(expected, results) - - def test_get_version_list_secure_proxy_ssl_header_https(self): - self.config(secure_proxy_ssl_header='HTTP_X_FORWARDED_PROTO') - environ = webob.request.environ_from_url('http://localhost:9292') - environ['HTTP_X_FORWARDED_PROTO'] = "https" - req = WsgiRequest(environ) - res = versions.Controller().index(req) - self.assertEqual(http.MULTIPLE_CHOICES, res.status_int) - self.assertEqual('application/json', res.content_type) - results = jsonutils.loads(res.body)['versions'] - expected = [ - { - 'id': 'v2.5', - 'status': 'CURRENT', - 'links': [{'rel': 'self', - 'href': 'https://localhost:9292/v2/'}], - }, - { - 'id': 'v2.4', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'https://localhost:9292/v2/'}], - }, - { - 'id': 'v2.3', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'https://localhost:9292/v2/'}], - }, - { - 'id': 'v2.2', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'https://localhost:9292/v2/'}], - }, - { - 'id': 'v2.1', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'https://localhost:9292/v2/'}], - }, - { - 'id': 'v2.0', - 'status': 'SUPPORTED', - 'links': [{'rel': 'self', - 'href': 'https://localhost:9292/v2/'}], - }, - { - 'id': 'v1.1', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', - 'href': 'https://localhost:9292/v1/'}], - }, - { - 'id': 'v1.0', - 'status': 'DEPRECATED', - 'links': [{'rel': 'self', - 'href': 'https://localhost:9292/v1/'}], - }, - ] - self.assertEqual(expected, results) - - -class VersionNegotiationTest(base.IsolatedUnitTest): - - def setUp(self): - super(VersionNegotiationTest, self).setUp() - self.middleware = version_negotiation.VersionNegotiationFilter(None) - - def test_request_url_v1(self): - request = webob.Request.blank('/v1/images') - self.middleware.process_request(request) - self.assertEqual('/v1/images', request.path_info) - - def test_request_url_v1_0(self): - request = webob.Request.blank('/v1.0/images') - self.middleware.process_request(request) - self.assertEqual('/v1/images', request.path_info) - - def test_request_url_v1_1(self): - request = webob.Request.blank('/v1.1/images') - self.middleware.process_request(request) - self.assertEqual('/v1/images', request.path_info) - - def test_request_accept_v1(self): - request = webob.Request.blank('/images') - request.headers = {'accept': 'application/vnd.openstack.images-v1'} - self.middleware.process_request(request) - self.assertEqual('/v1/images', request.path_info) - - def test_request_url_v2(self): - request = webob.Request.blank('/v2/images') - self.middleware.process_request(request) - self.assertEqual('/v2/images', request.path_info) - - def test_request_url_v2_0(self): - request = webob.Request.blank('/v2.0/images') - self.middleware.process_request(request) - self.assertEqual('/v2/images', request.path_info) - - def test_request_url_v2_1(self): - request = webob.Request.blank('/v2.1/images') - self.middleware.process_request(request) - self.assertEqual('/v2/images', request.path_info) - - def test_request_url_v2_2(self): - request = webob.Request.blank('/v2.2/images') - self.middleware.process_request(request) - self.assertEqual('/v2/images', request.path_info) - - def test_request_url_v2_3(self): - request = webob.Request.blank('/v2.3/images') - self.middleware.process_request(request) - self.assertEqual('/v2/images', request.path_info) - - def test_request_url_v2_4(self): - request = webob.Request.blank('/v2.4/images') - self.middleware.process_request(request) - self.assertEqual('/v2/images', request.path_info) - - def test_request_url_v2_5(self): - request = webob.Request.blank('/v2.5/images') - self.middleware.process_request(request) - self.assertEqual('/v2/images', request.path_info) - - def test_request_url_v2_6_unsupported(self): - request = webob.Request.blank('/v2.6/images') - resp = self.middleware.process_request(request) - self.assertIsInstance(resp, versions.Controller) - - def test_request_url_v4_unsupported(self): - request = webob.Request.blank('/v4/images') - resp = self.middleware.process_request(request) - self.assertIsInstance(resp, versions.Controller) - - -class VersionsAndNegotiationTest(VersionNegotiationTest, VersionsTest): - - """ - Test that versions mentioned in the versions response are correctly - negotiated. - """ - - def _get_list_of_version_ids(self, status): - request = webob.Request.blank('/') - request.accept = 'application/json' - response = versions.Controller().index(request) - v_list = jsonutils.loads(response.body)['versions'] - return [v['id'] for v in v_list if v['status'] == status] - - def _assert_version_is_negotiated(self, version_id): - request = webob.Request.blank("/%s/images" % version_id) - self.middleware.process_request(request) - major = version_id.split('.', 1)[0] - expected = "/%s/images" % major - self.assertEqual(expected, request.path_info) - - def test_current_is_negotiated(self): - # NOTE(rosmaita): Bug 1609571: the versions response was correct, but - # the negotiation had not been updated for the CURRENT version. - to_check = self._get_list_of_version_ids('CURRENT') - self.assertTrue(to_check) - for version_id in to_check: - self._assert_version_is_negotiated(version_id) - - def test_supported_is_negotiated(self): - to_check = self._get_list_of_version_ids('SUPPORTED') - for version_id in to_check: - self._assert_version_is_negotiated(version_id) - - def test_deprecated_is_negotiated(self): - to_check = self._get_list_of_version_ids('DEPRECATED') - for version_id in to_check: - self._assert_version_is_negotiated(version_id) diff --git a/glance/tests/unit/utils.py b/glance/tests/unit/utils.py deleted file mode 100644 index 83636a03..00000000 --- a/glance/tests/unit/utils.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from cryptography import exceptions as crypto_exception -import glance_store as store -import mock -from oslo_config import cfg -from six.moves import urllib - -from glance.common import exception -from glance.common import store_utils -from glance.common import wsgi -import glance.context -import glance.db.simple.api as simple_db - - -CONF = cfg.CONF - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -UUID2 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' - -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' - -USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' -USER2 = '0b3b3006-cb76-4517-ae32-51397e22c754' -USER3 = '2hss8dkl-d8jh-88yd-uhs9-879sdjsd8skd' - -BASE_URI = 'http://storeurl.com/container' - - -def sort_url_by_qs_keys(url): - # NOTE(kragniz): this only sorts the keys of the query string of a url. - # For example, an input of '/v2/tasks?sort_key=id&sort_dir=asc&limit=10' - # returns '/v2/tasks?limit=10&sort_dir=asc&sort_key=id'. This is to prevent - # non-deterministic ordering of the query string causing problems with unit - # tests. - - parsed = urllib.parse.urlparse(url) - queries = urllib.parse.parse_qsl(parsed.query, True) - sorted_query = sorted(queries, key=lambda x: x[0]) - - encoded_sorted_query = urllib.parse.urlencode(sorted_query, True) - - url_parts = (parsed.scheme, parsed.netloc, parsed.path, - parsed.params, encoded_sorted_query, - parsed.fragment) - - return urllib.parse.urlunparse(url_parts) - - -def get_fake_request(path='', method='POST', is_admin=False, user=USER1, - roles=None, tenant=TENANT1): - if roles is None: - roles = ['member'] - - req = wsgi.Request.blank(path) - req.method = method - - kwargs = { - 'user': user, - 'tenant': tenant, - 'roles': roles, - 'is_admin': is_admin, - } - - req.context = glance.context.RequestContext(**kwargs) - return req - - -def fake_get_size_from_backend(uri, context=None): - return 1 - - -def fake_get_verifier(context, img_signature_certificate_uuid, - img_signature_hash_method, img_signature, - img_signature_key_type): - verifier = mock.Mock() - if (img_signature is not None and img_signature == 'VALID'): - verifier.verify.return_value = None - else: - ex = crypto_exception.InvalidSignature() - verifier.verify.side_effect = ex - return verifier - - -class FakeDB(object): - - def __init__(self, initialize=True): - self.reset() - if initialize: - self.init_db() - - @staticmethod - def init_db(): - images = [ - {'id': UUID1, 'owner': TENANT1, 'status': 'queued', - 'locations': [{'url': '%s/%s' % (BASE_URI, UUID1), - 'metadata': {}, 'status': 'queued'}]}, - {'id': UUID2, 'owner': TENANT1, 'status': 'queued'}, - ] - [simple_db.image_create(None, image) for image in images] - - members = [ - {'image_id': UUID1, 'member': TENANT1, 'can_share': True}, - {'image_id': UUID1, 'member': TENANT2, 'can_share': False}, - ] - [simple_db.image_member_create(None, member) for member in members] - - simple_db.image_tag_set_all(None, UUID1, ['ping', 'pong']) - - @staticmethod - def reset(): - simple_db.reset() - - def __getattr__(self, key): - return getattr(simple_db, key) - - -class FakeStoreUtils(object): - def __init__(self, store_api): - self.store_api = store_api - - def safe_delete_from_backend(self, context, id, location): - try: - del self.store_api.data[location['url']] - except KeyError: - pass - - def schedule_delayed_delete_from_backend(self, context, id, location): - pass - - def delete_image_location_from_backend(self, context, - image_id, location): - if CONF.delayed_delete: - self.schedule_delayed_delete_from_backend(context, image_id, - location) - else: - self.safe_delete_from_backend(context, image_id, location) - - def validate_external_location(self, uri): - if uri and urllib.parse.urlparse(uri).scheme: - return store_utils.validate_external_location(uri) - else: - return True - - -class FakeStoreAPI(object): - def __init__(self, store_metadata=None): - self.data = { - '%s/%s' % (BASE_URI, UUID1): ('XXX', 3), - '%s/fake_location' % (BASE_URI): ('YYY', 3) - } - self.acls = {} - if store_metadata is None: - self.store_metadata = {} - else: - self.store_metadata = store_metadata - - def create_stores(self): - pass - - def set_acls(self, uri, public=False, read_tenants=None, - write_tenants=None, context=None): - if read_tenants is None: - read_tenants = [] - if write_tenants is None: - write_tenants = [] - - self.acls[uri] = { - 'public': public, - 'read': read_tenants, - 'write': write_tenants, - } - - def get_from_backend(self, location, offset=0, - chunk_size=None, context=None): - try: - scheme = location[:location.find('/') - 1] - if scheme == 'unknown': - raise store.UnknownScheme(scheme=scheme) - return self.data[location] - except KeyError: - raise store.NotFound(image=location) - - def get_size_from_backend(self, location, context=None): - return self.get_from_backend(location, context=context)[1] - - def add_to_backend(self, conf, image_id, data, size, - scheme=None, context=None, verifier=None): - store_max_size = 7 - current_store_size = 2 - for location in self.data.keys(): - if image_id in location: - raise exception.Duplicate() - if not size: - # 'data' is a string wrapped in a LimitingReader|CooperativeReader - # pipeline, so peek under the hood of those objects to get at the - # string itself. - size = len(data.data.fd) - if (current_store_size + size) > store_max_size: - raise exception.StorageFull() - if context.user == USER2: - raise exception.Forbidden() - if context.user == USER3: - raise exception.StorageWriteDenied() - self.data[image_id] = (data, size) - checksum = 'Z' - return (image_id, size, checksum, self.store_metadata) - - def check_location_metadata(self, val, key=''): - store.check_location_metadata(val) - - -class FakePolicyEnforcer(object): - def __init__(self, *_args, **kwargs): - self.rules = {} - - def enforce(self, _ctxt, action, target=None, **kwargs): - """Raise Forbidden if a rule for given action is set to false.""" - if self.rules.get(action) is False: - raise exception.Forbidden() - - def set_rules(self, rules): - self.rules = rules - - -class FakeNotifier(object): - def __init__(self, *_args, **kwargs): - self.log = [] - - def _notify(self, event_type, payload, level): - log = { - 'notification_type': level, - 'event_type': event_type, - 'payload': payload - } - self.log.append(log) - - def warn(self, event_type, payload): - self._notify(event_type, payload, 'WARN') - - def info(self, event_type, payload): - self._notify(event_type, payload, 'INFO') - - def error(self, event_type, payload): - self._notify(event_type, payload, 'ERROR') - - def debug(self, event_type, payload): - self._notify(event_type, payload, 'DEBUG') - - def critical(self, event_type, payload): - self._notify(event_type, payload, 'CRITICAL') - - def get_logs(self): - return self.log - - -class FakeGateway(object): - def __init__(self, image_factory=None, image_member_factory=None, - image_repo=None, task_factory=None, task_repo=None): - self.image_factory = image_factory - self.image_member_factory = image_member_factory - self.image_repo = image_repo - self.task_factory = task_factory - self.task_repo = task_repo - - def get_image_factory(self, context): - return self.image_factory - - def get_image_member_factory(self, context): - return self.image_member_factory - - def get_repo(self, context): - return self.image_repo - - def get_task_factory(self, context): - return self.task_factory - - def get_task_repo(self, context): - return self.task_repo - - -class FakeTask(object): - def __init__(self, task_id, type=None, status=None): - self.task_id = task_id - self.type = type - self.message = None - self.input = None - self._status = status - self._executor = None - - def success(self, result): - self.result = result - self._status = 'success' - - def fail(self, message): - self.message = message - self._status = 'failure' diff --git a/glance/tests/unit/v1/__init__.py b/glance/tests/unit/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/unit/v1/test_api.py b/glance/tests/unit/v1/test_api.py deleted file mode 100644 index 589c48c4..00000000 --- a/glance/tests/unit/v1/test_api.py +++ /dev/null @@ -1,4841 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import datetime -import hashlib -import os -import signal -import uuid - -import glance_store as store -import mock -from oslo_config import cfg -from oslo_serialization import jsonutils -import routes -import six -from six.moves import http_client -import webob - -import glance.api -import glance.api.common -from glance.api.v1 import router -from glance.api.v1 import upload_utils -import glance.common.config -from glance.common import exception -from glance.common import timeutils -import glance.context -from glance.db.sqlalchemy import api as db_api -from glance.db.sqlalchemy import models as db_models -import glance.registry.client.v1.api as registry -from glance.tests.unit import base -import glance.tests.unit.utils as unit_test_utils -from glance.tests import utils as test_utils - -CONF = cfg.CONF - -_gen_uuid = lambda: str(uuid.uuid4()) - -UUID1 = _gen_uuid() -UUID2 = _gen_uuid() -UUID3 = _gen_uuid() - - -class TestGlanceAPI(base.IsolatedUnitTest): - def setUp(self): - """Establish a clean test environment""" - super(TestGlanceAPI, self).setUp() - self.mapper = routes.Mapper() - self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper)) - self.FIXTURES = [ - {'id': UUID1, - 'name': 'fake image #1', - 'status': 'active', - 'disk_format': 'ami', - 'container_format': 'ami', - 'is_public': False, - 'created_at': timeutils.utcnow(), - 'updated_at': timeutils.utcnow(), - 'deleted_at': None, - 'deleted': False, - 'checksum': None, - 'size': 13, - 'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID1), - 'metadata': {}, 'status': 'active'}], - 'properties': {'type': 'kernel'}}, - {'id': UUID2, - 'name': 'fake image #2', - 'status': 'active', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'is_public': True, - 'created_at': timeutils.utcnow(), - 'updated_at': timeutils.utcnow(), - 'deleted_at': None, - 'deleted': False, - 'checksum': 'abc123', - 'size': 19, - 'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID2), - 'metadata': {}, 'status': 'active'}], - 'properties': {}}, - {'id': UUID3, - 'name': 'fake image #3', - 'status': 'deactivated', - 'disk_format': 'ami', - 'container_format': 'ami', - 'is_public': False, - 'created_at': timeutils.utcnow(), - 'updated_at': timeutils.utcnow(), - 'deleted_at': None, - 'deleted': False, - 'checksum': '13', - 'size': 13, - 'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID1), - 'metadata': {}, 'status': 'active'}], - 'properties': {}}] - self.context = glance.context.RequestContext(is_admin=True) - db_api.get_engine() - self.destroy_fixtures() - self.addCleanup(self.destroy_fixtures) - self.create_fixtures() - # Used to store/track image status changes for post-analysis - self.image_status = [] - self.http_server_pid = None - self.addCleanup(self._cleanup_server) - ret = test_utils.start_http_server("foo_image_id", b"foo_image") - self.http_server_pid, self.http_port = ret - - def _cleanup_server(self): - if self.http_server_pid is not None: - os.kill(self.http_server_pid, signal.SIGKILL) - - def create_fixtures(self): - for fixture in self.FIXTURES: - db_api.image_create(self.context, fixture) - # We write a fake image file to the filesystem - with open("%s/%s" % (self.test_dir, fixture['id']), 'wb') as image: - image.write(b"chunk00000remainder") - image.flush() - - def destroy_fixtures(self): - # Easiest to just drop the models and re-create them... - db_models.unregister_models(db_api.get_engine()) - db_models.register_models(db_api.get_engine()) - - def _do_test_defaulted_format(self, format_key, format_value): - fixture_headers = {'x-image-meta-name': 'defaulted', - 'x-image-meta-location': 'http://localhost:0/image', - format_key: format_value} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - http = store.get_store_from_scheme('http') - - with mock.patch.object(http, 'get_size') as mocked_size: - mocked_size.return_value = 0 - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual(format_value, res_body['disk_format']) - self.assertEqual(format_value, res_body['container_format']) - - def _http_loc_url(self, path): - return 'http://127.0.0.1:%d%s' % (self.http_port, path) - - def test_defaulted_amazon_format(self): - for key in ('x-image-meta-disk-format', - 'x-image-meta-container-format'): - for value in ('aki', 'ari', 'ami'): - self._do_test_defaulted_format(key, value) - - def test_bad_time_create_minus_int(self): - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-created_at': '-42', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_bad_time_create_string(self): - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-created_at': 'foo', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_bad_time_create_low_year(self): - # 'strftime' only allows values after 1900 in glance v1 - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-created_at': '1100', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_bad_time_create_string_in_date(self): - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-created_at': '2012-01-01hey12:32:12', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_bad_min_disk_size_create(self): - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-min-disk': '-42', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Invalid value', res.body) - - def test_updating_imageid_after_creation(self): - # Test incorrect/illegal id update - req = webob.Request.blank("/images/%s" % UUID1) - req.method = 'PUT' - req.headers['x-image-meta-id'] = '000000-000-0000-0000-000' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - # Test using id of another image - req = webob.Request.blank("/images/%s" % UUID1) - req.method = 'PUT' - req.headers['x-image-meta-id'] = UUID2 - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_bad_min_disk_size_update(self): - fixture_headers = {'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - image_id = res_body['id'] - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['x-image-meta-min-disk'] = '-42' - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Invalid value', res.body) - - def test_invalid_min_disk_size_update(self): - fixture_headers = {'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - image_id = res_body['id'] - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['x-image-meta-min-disk'] = str(2 ** 31 + 1) - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_bad_min_ram_size_create(self): - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-min-ram': '-42', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Invalid value', res.body) - - def test_bad_min_ram_size_update(self): - fixture_headers = {'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - image_id = res_body['id'] - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['x-image-meta-min-ram'] = '-42' - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Invalid value', res.body) - - def test_invalid_min_ram_size_update(self): - fixture_headers = {'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - image_id = res_body['id'] - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['x-image-meta-min-ram'] = str(2 ** 31 + 1) - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_bad_disk_format(self): - fixture_headers = { - 'x-image-meta-store': 'bad', - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': 'http://localhost:0/image.tar.gz', - 'x-image-meta-disk-format': 'invalid', - 'x-image-meta-container-format': 'ami', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Invalid disk format', res.body) - - def test_configured_disk_format_good(self): - self.config(disk_formats=['foo'], group="image_format") - fixture_headers = { - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': 'http://localhost:0/image.tar.gz', - 'x-image-meta-disk-format': 'foo', - 'x-image-meta-container-format': 'bare', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - http = store.get_store_from_scheme('http') - with mock.patch.object(http, 'get_size') as mocked_size: - mocked_size.return_value = 0 - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - def test_configured_disk_format_bad(self): - self.config(disk_formats=['foo'], group="image_format") - fixture_headers = { - 'x-image-meta-store': 'bad', - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': 'http://localhost:0/image.tar.gz', - 'x-image-meta-disk-format': 'bar', - 'x-image-meta-container-format': 'bare', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Invalid disk format', res.body) - - def test_configured_container_format_good(self): - self.config(container_formats=['foo'], group="image_format") - fixture_headers = { - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': 'http://localhost:0/image.tar.gz', - 'x-image-meta-disk-format': 'raw', - 'x-image-meta-container-format': 'foo', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - http = store.get_store_from_scheme('http') - - with mock.patch.object(http, 'get_size') as mocked_size: - mocked_size.return_value = 0 - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - def test_configured_container_format_bad(self): - self.config(container_formats=['foo'], group="image_format") - fixture_headers = { - 'x-image-meta-store': 'bad', - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': 'http://localhost:0/image.tar.gz', - 'x-image-meta-disk-format': 'raw', - 'x-image-meta-container-format': 'bar', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Invalid container format', res.body) - - def test_container_and_disk_amazon_format_differs(self): - fixture_headers = { - 'x-image-meta-store': 'bad', - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': 'http://localhost:0/image.tar.gz', - 'x-image-meta-disk-format': 'aki', - 'x-image-meta-container-format': 'ami'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - expected = (b"Invalid mix of disk and container formats. " - b"When setting a disk or container format to one of " - b"'aki', 'ari', or 'ami', " - b"the container and disk formats must match.") - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(expected, res.body) - - def test_create_with_location_no_container_format(self): - fixture_headers = { - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': 'http://localhost:0/image.tar.gz', - 'x-image-meta-disk-format': 'vhd', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - http = store.get_store_from_scheme('http') - - with mock.patch.object(http, 'get_size') as mocked_size: - mocked_size.return_value = 0 - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Container format is not specified', res.body) - - def test_create_with_location_no_disk_format(self): - fixture_headers = { - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': 'http://localhost:0/image.tar.gz', - 'x-image-meta-container-format': 'bare', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - http = store.get_store_from_scheme('http') - - with mock.patch.object(http, 'get_size') as mocked_size: - mocked_size.return_value = 0 - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Disk format is not specified', res.body) - - def test_create_with_empty_location(self): - fixture_headers = { - 'x-image-meta-location': '', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_create_with_empty_copy_from(self): - fixture_headers = { - 'x-glance-api-copy-from': '', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_create_delayed_image_with_no_disk_and_container_formats(self): - fixture_headers = { - 'x-image-meta-name': 'delayed', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - http = store.get_store_from_scheme('http') - - with mock.patch.object(http, 'get_size') as mocked_size: - mocked_size.return_value = 0 - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - def test_create_with_bad_store_name(self): - fixture_headers = { - 'x-image-meta-store': 'bad', - 'x-image-meta-name': 'bogus', - 'x-image-meta-disk-format': 'qcow2', - 'x-image-meta-container-format': 'bare', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Required store bad is invalid', res.body) - - @mock.patch.object(glance.api.v1.images.Controller, '_external_source') - @mock.patch.object(store, 'get_store_from_location') - def test_create_with_location_get_store_or_400_raises_exception( - self, mock_get_store_from_location, mock_external_source): - location = 'bad+scheme://localhost:0/image.qcow2' - scheme = 'bad+scheme' - fixture_headers = { - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': location, - 'x-image-meta-disk-format': 'qcow2', - 'x-image-meta-container-format': 'bare', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - mock_external_source.return_value = location - mock_get_store_from_location.return_value = scheme - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(1, mock_external_source.call_count) - self.assertEqual(1, mock_get_store_from_location.call_count) - self.assertIn('Store for scheme %s not found' % scheme, - res.body.decode('utf-8')) - - def test_create_with_location_unknown_scheme(self): - fixture_headers = { - 'x-image-meta-store': 'bad', - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': 'bad+scheme://localhost:0/image.qcow2', - 'x-image-meta-disk-format': 'qcow2', - 'x-image-meta-container-format': 'bare', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'External sources are not supported', res.body) - - def test_create_with_location_bad_store_uri(self): - fixture_headers = { - 'x-image-meta-store': 'file', - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': 'http://', - 'x-image-meta-disk-format': 'qcow2', - 'x-image-meta-container-format': 'bare', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Invalid location', res.body) - - def test_create_image_with_too_many_properties(self): - self.config(image_property_quota=1) - another_request = unit_test_utils.get_fake_request( - path='/images', method='POST') - headers = {'x-auth-token': 'user:tenant:joe_soap', - 'x-image-meta-property-x_all_permitted': '1', - 'x-image-meta-property-x_all_permitted_foo': '2'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, - output.status_int) - - def test_bad_container_format(self): - fixture_headers = { - 'x-image-meta-store': 'bad', - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': 'http://localhost:0/image.tar.gz', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'invalid', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Invalid container format', res.body) - - def test_bad_image_size(self): - fixture_headers = { - 'x-image-meta-store': 'bad', - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': self._http_loc_url('/image.tar.gz'), - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'bare', - } - - def exec_bad_size_test(bad_size, expected_substr): - fixture_headers['x-image-meta-size'] = bad_size - req = webob.Request.blank("/images", - method='POST', - headers=fixture_headers) - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(expected_substr, res.body) - - expected = b"Cannot convert image size 'invalid' to an integer." - exec_bad_size_test('invalid', expected) - expected = b"Cannot be a negative value." - exec_bad_size_test(-10, expected) - - def test_bad_image_name(self): - fixture_headers = { - 'x-image-meta-store': 'bad', - 'x-image-meta-name': 'X' * 256, - 'x-image-meta-location': self._http_loc_url('/image.tar.gz'), - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'bare', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_add_image_no_location_no_image_as_body(self): - """Tests creates a queued image for no body and no loc header""" - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3', - 'x-image-created_at': '2015-11-20', - 'x-image-updated_at': '2015-12-01 12:10:01', - 'x-image-deleted_at': '2000'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - image_id = res_body['id'] - - # Test that we are able to edit the Location field - # per LP Bug #911599 - - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['x-image-meta-location'] = 'http://localhost:0/images/123' - - http = store.get_store_from_scheme('http') - - with mock.patch.object(http, 'get_size') as mocked_size: - mocked_size.return_value = 0 - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - # Once the location is set, the image should be activated - # see LP Bug #939484 - self.assertEqual('active', res_body['status']) - self.assertNotIn('location', res_body) # location never shown - - def test_add_image_no_location_no_content_type(self): - """Tests creates a queued image for no body and no loc header""" - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - req.body = b"chunk00000remainder" - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_add_image_size_header_too_big(self): - """Tests raises BadRequest for supplied image size that is too big""" - fixture_headers = {'x-image-meta-size': CONF.image_size_cap + 1, - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_add_image_size_chunked_data_too_big(self): - self.config(image_size_cap=512) - fixture_headers = { - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-container_format': 'ami', - 'x-image-meta-disk_format': 'ami', - 'transfer-encoding': 'chunked', - 'content-type': 'application/octet-stream', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - - req.body_file = six.StringIO('X' * (CONF.image_size_cap + 1)) - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, res.status_int) - - def test_add_image_size_data_too_big(self): - self.config(image_size_cap=512) - fixture_headers = { - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-container_format': 'ami', - 'x-image-meta-disk_format': 'ami', - 'content-type': 'application/octet-stream', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - - req.body = b'X' * (CONF.image_size_cap + 1) - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_add_image_size_header_exceed_quota(self): - quota = 500 - self.config(user_storage_quota=str(quota)) - fixture_headers = {'x-image-meta-size': quota + 1, - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-container_format': 'bare', - 'x-image-meta-disk_format': 'qcow2', - 'content-type': 'application/octet-stream', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - req.body = b'X' * (quota + 1) - res = req.get_response(self.api) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, res.status_int) - - def test_add_image_size_data_exceed_quota(self): - quota = 500 - self.config(user_storage_quota=str(quota)) - fixture_headers = { - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-container_format': 'bare', - 'x-image-meta-disk_format': 'qcow2', - 'content-type': 'application/octet-stream', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - - req.body = b'X' * (quota + 1) - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, res.status_int) - - def test_add_image_size_data_exceed_quota_readd(self): - quota = 500 - self.config(user_storage_quota=str(quota)) - fixture_headers = { - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-container_format': 'bare', - 'x-image-meta-disk_format': 'qcow2', - 'content-type': 'application/octet-stream', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - req.body = b'X' * (quota + 1) - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, res.status_int) - - used_size = sum([f['size'] for f in self.FIXTURES]) - - req = webob.Request.blank("/images") - req.method = 'POST' - req.body = b'X' * (quota - used_size) - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - def _add_check_no_url_info(self): - - fixture_headers = {'x-image-meta-disk-format': 'ami', - 'x-image-meta-container-format': 'ami', - 'x-image-meta-size': '0', - 'x-image-meta-name': 'empty image'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - res_body = jsonutils.loads(res.body)['image'] - self.assertNotIn('locations', res_body) - self.assertNotIn('direct_url', res_body) - image_id = res_body['id'] - - # HEAD empty image - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertNotIn('x-image-meta-locations', res.headers) - self.assertNotIn('x-image-meta-direct_url', res.headers) - - def test_add_check_no_url_info_ml(self): - self.config(show_multiple_locations=True) - self._add_check_no_url_info() - - def test_add_check_no_url_info_direct_url(self): - self.config(show_image_direct_url=True) - self._add_check_no_url_info() - - def test_add_check_no_url_info_both_on(self): - self.config(show_image_direct_url=True) - self.config(show_multiple_locations=True) - self._add_check_no_url_info() - - def test_add_check_no_url_info_both_off(self): - self._add_check_no_url_info() - - def test_add_image_zero_size(self): - """Tests creating an active image with explicitly zero size""" - fixture_headers = {'x-image-meta-disk-format': 'ami', - 'x-image-meta-container-format': 'ami', - 'x-image-meta-size': '0', - 'x-image-meta-name': 'empty image'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('active', res_body['status']) - image_id = res_body['id'] - - # GET empty image - req = webob.Request.blank("/images/%s" % image_id) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(0, len(res.body)) - - def _do_test_add_image_attribute_mismatch(self, attributes): - fixture_headers = { - 'x-image-meta-name': 'fake image #3', - } - fixture_headers.update(attributes) - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - req.body = b"XXXX" - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_add_image_checksum_mismatch(self): - attributes = { - 'x-image-meta-checksum': 'asdf', - } - self._do_test_add_image_attribute_mismatch(attributes) - - def test_add_image_size_mismatch(self): - attributes = { - 'x-image-meta-size': str(len("XXXX") + 1), - } - self._do_test_add_image_attribute_mismatch(attributes) - - def test_add_image_checksum_and_size_mismatch(self): - attributes = { - 'x-image-meta-checksum': 'asdf', - 'x-image-meta-size': str(len("XXXX") + 1), - } - self._do_test_add_image_attribute_mismatch(attributes) - - def test_add_image_bad_store(self): - """Tests raises BadRequest for invalid store header""" - fixture_headers = {'x-image-meta-store': 'bad', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - req.body = b"chunk00000remainder" - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_add_image_basic_file_store(self): - """Tests to add a basic image in the file store""" - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - req.body = b"chunk00000remainder" - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - # Test that the Location: header is set to the URI to - # edit the newly-created image, as required by APP. - # See LP Bug #719825 - self.assertIn('location', res.headers, - "'location' not in response headers.\n" - "res.headerlist = %r" % res.headerlist) - res_body = jsonutils.loads(res.body)['image'] - self.assertIn('/images/%s' % res_body['id'], res.headers['location']) - self.assertEqual('active', res_body['status']) - image_id = res_body['id'] - - # Test that we are NOT able to edit the Location field - # per LP Bug #911599 - - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - url = self._http_loc_url('/images/123') - req.headers['x-image-meta-location'] = url - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_add_image_unauthorized(self): - rules = {"add_image": '!'} - self.set_policy_rules(rules) - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - req.body = b"chunk00000remainder" - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_add_publicize_image_unauthorized(self): - rules = {"add_image": '@', "modify_image": '@', - "publicize_image": '!'} - self.set_policy_rules(rules) - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-is-public': 'true', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - req.body = b"chunk00000remainder" - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_add_publicize_image_authorized(self): - rules = {"add_image": '@', "modify_image": '@', - "publicize_image": '@', "upload_image": '@'} - self.set_policy_rules(rules) - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-is-public': 'true', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - req.body = b"chunk00000remainder" - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - def test_add_copy_from_image_unauthorized(self): - rules = {"add_image": '@', "copy_from": '!'} - self.set_policy_rules(rules) - url = self._http_loc_url('/i.ovf') - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-glance-api-copy-from': url, - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #F'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - req.body = b"chunk00000remainder" - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_add_copy_from_upload_image_unauthorized(self): - rules = {"add_image": '@', "copy_from": '@', "upload_image": '!'} - self.set_policy_rules(rules) - url = self._http_loc_url('/i.ovf') - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-glance-api-copy-from': url, - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #F'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_add_copy_from_image_authorized_upload_image_authorized(self): - rules = {"add_image": '@', "copy_from": '@', "upload_image": '@'} - self.set_policy_rules(rules) - url = self._http_loc_url('/i.ovf') - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-glance-api-copy-from': url, - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #F'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - http = store.get_store_from_scheme('http') - - with mock.patch.object(http, 'get_size') as mock_size: - mock_size.return_value = 0 - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - def test_upload_image_http_nonexistent_location_url(self): - # Ensure HTTP 404 response returned when try to upload - # image from non-existent http location URL. - rules = {"add_image": '@', "copy_from": '@', "upload_image": '@'} - self.set_policy_rules(rules) - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-glance-api-copy-from': - self._http_loc_url('/non_existing_image_path'), - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #F'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - def test_add_copy_from_with_nonempty_body(self): - """Tests creates an image from copy-from and nonempty body""" - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-glance-api-copy-from': 'http://0.0.0.0:1/c.ovf', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #F'} - - req = webob.Request.blank("/images") - req.headers['Content-Type'] = 'application/octet-stream' - req.method = 'POST' - req.body = b"chunk00000remainder" - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_add_location_with_nonempty_body(self): - """Tests creates an image from location and nonempty body""" - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-location': 'http://0.0.0.0:1/c.tgz', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #F'} - - req = webob.Request.blank("/images") - req.headers['Content-Type'] = 'application/octet-stream' - req.method = 'POST' - req.body = b"chunk00000remainder" - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_add_location_with_conflict_image_size(self): - """Tests creates an image from location and conflict image size""" - - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-location': 'http://a/b/c.tar.gz', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #F', - 'x-image-meta-size': '1'} - - req = webob.Request.blank("/images") - req.headers['Content-Type'] = 'application/octet-stream' - req.method = 'POST' - - http = store.get_store_from_scheme('http') - - with mock.patch.object(http, 'get_size') as size: - size.return_value = 2 - - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.CONFLICT, res.status_int) - - def test_add_location_with_invalid_location_on_conflict_image_size(self): - """Tests creates an image from location and conflict image size""" - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-location': 'http://0.0.0.0:1/c.tgz', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #F', - 'x-image-meta-size': '1'} - - req = webob.Request.blank("/images") - req.headers['Content-Type'] = 'application/octet-stream' - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_add_location_with_invalid_location_on_restricted_sources(self): - """Tests creates an image from location and restricted sources""" - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-location': 'file:///etc/passwd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #F'} - - req = webob.Request.blank("/images") - req.headers['Content-Type'] = 'application/octet-stream' - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-location': 'swift+config://xxx', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #F'} - - req = webob.Request.blank("/images") - req.headers['Content-Type'] = 'application/octet-stream' - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_create_image_with_nonexistent_location_url(self): - # Ensure HTTP 404 response returned when try to create - # image with non-existent http location URL. - - fixture_headers = { - 'x-image-meta-name': 'bogus', - 'x-image-meta-location': - self._http_loc_url('/non_existing_image_path'), - 'x-image-meta-disk-format': 'qcow2', - 'x-image-meta-container-format': 'bare', - } - req = webob.Request.blank("/images") - req.method = 'POST' - - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - def test_add_copy_from_with_location(self): - """Tests creates an image from copy-from and location""" - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-glance-api-copy-from': 'http://0.0.0.0:1/c.ovf', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #F', - 'x-image-meta-location': 'http://0.0.0.0:1/c.tgz'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_add_copy_from_with_restricted_sources(self): - """Tests creates an image from copy-from with restricted sources""" - header_template = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #F'} - - schemas = ["file:///etc/passwd", - "swift+config:///xxx", - "filesystem:///etc/passwd"] - - for schema in schemas: - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(header_template): - req.headers[k] = v - req.headers['x-glance-api-copy-from'] = schema - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_add_copy_from_upload_image_unauthorized_with_body(self): - rules = {"upload_image": '!', "modify_image": '@', - "add_image": '@'} - self.set_policy_rules(rules) - self.config(image_size_cap=512) - fixture_headers = { - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-container_format': 'ami', - 'x-image-meta-disk_format': 'ami', - 'transfer-encoding': 'chunked', - 'content-type': 'application/octet-stream', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - - req.body_file = six.StringIO('X' * (CONF.image_size_cap)) - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_update_data_upload_bad_store_uri(self): - fixture_headers = {'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - image_id = res_body['id'] - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/octet-stream' - req.headers['x-image-disk-format'] = 'vhd' - req.headers['x-image-container-format'] = 'ovf' - req.headers['x-image-meta-location'] = 'http://' - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIn(b'Invalid location', res.body) - - def test_update_data_upload_image_unauthorized(self): - rules = {"upload_image": '!', "modify_image": '@', - "add_image": '@'} - self.set_policy_rules(rules) - """Tests creates a queued image for no body and no loc header""" - self.config(image_size_cap=512) - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - image_id = res_body['id'] - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/octet-stream' - req.headers['transfer-encoding'] = 'chunked' - req.headers['x-image-disk-format'] = 'vhd' - req.headers['x-image-container-format'] = 'ovf' - req.body_file = six.StringIO('X' * (CONF.image_size_cap)) - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_update_copy_from_upload_image_unauthorized(self): - rules = {"upload_image": '!', "modify_image": '@', - "add_image": '@', "copy_from": '@'} - self.set_policy_rules(rules) - - fixture_headers = {'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - image_id = res_body['id'] - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/octet-stream' - req.headers['x-glance-api-copy-from'] = self._http_loc_url('/i.ovf') - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_update_copy_from_unauthorized(self): - rules = {"upload_image": '@', "modify_image": '@', - "add_image": '@', "copy_from": '!'} - self.set_policy_rules(rules) - - fixture_headers = {'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - image_id = res_body['id'] - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/octet-stream' - req.headers['x-glance-api-copy-from'] = self._http_loc_url('/i.ovf') - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def _do_test_post_image_content_missing_format(self, missing): - """Tests creation of an image with missing format""" - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - header = 'x-image-meta-' + missing.replace('_', '-') - - del fixture_headers[header] - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - req.body = b"chunk00000remainder" - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_post_image_content_missing_disk_format(self): - """Tests creation of an image with missing disk format""" - self._do_test_post_image_content_missing_format('disk_format') - - def test_post_image_content_missing_container_type(self): - """Tests creation of an image with missing container format""" - self._do_test_post_image_content_missing_format('container_format') - - def _do_test_put_image_content_missing_format(self, missing): - """Tests delayed activation of an image with missing format""" - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - header = 'x-image-meta-' + missing.replace('_', '-') - - del fixture_headers[header] - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - image_id = res_body['id'] - - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - req.headers['Content-Type'] = 'application/octet-stream' - req.body = b"chunk00000remainder" - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_put_image_content_missing_disk_format(self): - """Tests delayed activation of image with missing disk format""" - self._do_test_put_image_content_missing_format('disk_format') - - def test_put_image_content_missing_container_type(self): - """Tests delayed activation of image with missing container format""" - self._do_test_put_image_content_missing_format('container_format') - - def test_download_deactivated_images(self): - """Tests exception raised trying to download a deactivated image""" - req = webob.Request.blank("/images/%s" % UUID3) - req.method = 'GET' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_update_deleted_image(self): - """Tests that exception raised trying to update a deleted image""" - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - fixture = {'name': 'test_del_img'} - req = webob.Request.blank('/images/%s' % UUID2) - req.method = 'PUT' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(image=fixture)) - - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - self.assertIn(b'Forbidden to update deleted image', res.body) - - def test_delete_deleted_image(self): - """Tests that exception raised trying to delete a deleted image""" - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - # Verify the status is 'deleted' - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual("deleted", res.headers['x-image-meta-status']) - - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - msg = "Image %s not found." % UUID2 - self.assertIn(msg, res.body.decode()) - - # Verify the status is still 'deleted' - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual("deleted", res.headers['x-image-meta-status']) - - def test_image_status_when_delete_fails(self): - """ - Tests that the image status set to active if deletion of image fails. - """ - - fs = store.get_store_from_scheme('file') - - with mock.patch.object(fs, 'delete') as mock_fsstore_delete: - mock_fsstore_delete.side_effect = exception.Forbidden() - - # trigger the v1 delete api - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - self.assertIn(b'Forbidden to delete image', res.body) - - # check image metadata is still there with active state - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual("active", res.headers['x-image-meta-status']) - - def test_delete_pending_delete_image(self): - """ - Tests that correct response returned when deleting - a pending_delete image - """ - # First deletion - self.config(delayed_delete=True) - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - # Verify the status is 'pending_delete' - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual("pending_delete", res.headers['x-image-meta-status']) - - # Second deletion - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - self.assertIn(b'Forbidden to delete a pending_delete image', res.body) - - # Verify the status is still 'pending_delete' - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual("pending_delete", res.headers['x-image-meta-status']) - - def test_upload_to_image_status_saving(self): - """Test image upload conflict. - - If an image is uploaded before an existing upload to the same image - completes, the original upload should succeed and the conflicting - one should fail and any data be deleted. - """ - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'some-foo-image'} - - # create an image but don't upload yet. - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - res_body = jsonutils.loads(res.body)['image'] - - image_id = res_body['id'] - self.assertIn('/images/%s' % image_id, res.headers['location']) - - # verify the status is 'queued' - self.assertEqual('queued', res_body['status']) - - orig_get_image_metadata = registry.get_image_metadata - orig_image_get = db_api._image_get - orig_image_update = db_api._image_update - orig_initiate_deletion = upload_utils.initiate_deletion - - # this will be used to track what is called and their order. - call_sequence = [] - # use this to determine if we are within a db session i.e. atomic - # operation, that is setting our active state. - # We want first status check to be 'queued' so we get past the - # first guard. - test_status = { - 'activate_session_started': False, - 'queued_guard_passed': False - } - - state_changes = [] - - def mock_image_update(context, values, image_id, purge_props=False, - from_state=None): - - status = values.get('status') - if status: - state_changes.append(status) - if status == 'active': - # We only expect this state to be entered once. - if test_status['activate_session_started']: - raise Exception("target session already started") - - test_status['activate_session_started'] = True - call_sequence.append('update_active') - - else: - call_sequence.append('update') - - return orig_image_update(context, values, image_id, - purge_props=purge_props, - from_state=from_state) - - def mock_image_get(*args, **kwargs): - """Force status to 'saving' if not within activate db session. - - If we are in the activate db session we return 'active' which we - then expect to cause exception.Conflict to be raised since this - indicates that another upload has succeeded. - """ - image = orig_image_get(*args, **kwargs) - if test_status['activate_session_started']: - call_sequence.append('image_get_active') - setattr(image, 'status', 'active') - else: - setattr(image, 'status', 'saving') - - return image - - def mock_get_image_metadata(*args, **kwargs): - """Force image status sequence. - """ - call_sequence.append('get_image_meta') - meta = orig_get_image_metadata(*args, **kwargs) - if not test_status['queued_guard_passed']: - meta['status'] = 'queued' - test_status['queued_guard_passed'] = True - - return meta - - def mock_initiate_deletion(*args, **kwargs): - call_sequence.append('init_del') - orig_initiate_deletion(*args, **kwargs) - - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/octet-stream' - req.body = b"chunk00000remainder" - - with mock.patch.object( - upload_utils, 'initiate_deletion') as mock_init_del: - mock_init_del.side_effect = mock_initiate_deletion - with mock.patch.object( - registry, 'get_image_metadata') as mock_get_meta: - mock_get_meta.side_effect = mock_get_image_metadata - with mock.patch.object(db_api, '_image_get') as mock_db_get: - mock_db_get.side_effect = mock_image_get - with mock.patch.object( - db_api, '_image_update') as mock_db_update: - mock_db_update.side_effect = mock_image_update - - # Expect a 409 Conflict. - res = req.get_response(self.api) - self.assertEqual(http_client.CONFLICT, res.status_int) - - # Check expected call sequence - self.assertEqual(['get_image_meta', 'get_image_meta', - 'update', 'update_active', - 'image_get_active', - 'init_del'], - call_sequence) - - self.assertTrue(mock_get_meta.called) - self.assertTrue(mock_db_get.called) - self.assertTrue(mock_db_update.called) - - # Ensure cleanup occurred. - self.assertEqual(1, mock_init_del.call_count) - - self.assertEqual(['saving', 'active'], state_changes) - - def test_register_and_upload(self): - """ - Test that the process of registering an image with - some metadata, then uploading an image file with some - more metadata doesn't mark the original metadata deleted - :see LP Bug#901534 - """ - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-property-key1': 'value1'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - res_body = jsonutils.loads(res.body)['image'] - - self.assertIn('id', res_body) - - image_id = res_body['id'] - self.assertIn('/images/%s' % image_id, res.headers['location']) - - # Verify the status is queued - self.assertIn('status', res_body) - self.assertEqual('queued', res_body['status']) - - # Check properties are not deleted - self.assertIn('properties', res_body) - self.assertIn('key1', res_body['properties']) - self.assertEqual('value1', res_body['properties']['key1']) - - # Now upload the image file along with some more - # metadata and verify original metadata properties - # are not marked deleted - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/octet-stream' - req.headers['x-image-meta-property-key2'] = 'value2' - req.body = b"chunk00000remainder" - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - # Verify the status is 'queued' - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'HEAD' - - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertIn('x-image-meta-property-key1', res.headers, - "Did not find required property in headers. " - "Got headers: %r" % res.headers) - self.assertEqual("active", res.headers['x-image-meta-status']) - - def test_upload_image_raises_store_disabled(self): - """Test that uploading an image file returns HTTTP 410 response""" - # create image - fs = store.get_store_from_scheme('file') - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-property-key1': 'value1'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - res_body = jsonutils.loads(res.body)['image'] - - self.assertIn('id', res_body) - - image_id = res_body['id'] - self.assertIn('/images/%s' % image_id, res.headers['location']) - - # Verify the status is queued - self.assertIn('status', res_body) - self.assertEqual('queued', res_body['status']) - - # Now upload the image file - with mock.patch.object(fs, 'add') as mock_fsstore_add: - mock_fsstore_add.side_effect = store.StoreAddDisabled - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/octet-stream' - req.body = b"chunk00000remainder" - res = req.get_response(self.api) - self.assertEqual(http_client.GONE, res.status_int) - self._verify_image_status(image_id, 'killed') - - def _get_image_status(self, image_id): - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'HEAD' - return req.get_response(self.api) - - def _verify_image_status(self, image_id, status, check_deleted=False, - use_cached=False): - if not use_cached: - res = self._get_image_status(image_id) - else: - res = self.image_status.pop(0) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(status, res.headers['x-image-meta-status']) - self.assertEqual(str(check_deleted), - res.headers['x-image-meta-deleted']) - - def _upload_safe_kill_common(self, mocks): - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-property-key1': 'value1'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - res_body = jsonutils.loads(res.body)['image'] - - self.assertIn('id', res_body) - - self.image_id = res_body['id'] - self.assertIn('/images/%s' % - self.image_id, res.headers['location']) - - # Verify the status is 'queued' - self.assertEqual('queued', res_body['status']) - - for m in mocks: - m['mock'].side_effect = m['side_effect'] - - # Now upload the image file along with some more metadata and - # verify original metadata properties are not marked deleted - req = webob.Request.blank("/images/%s" % self.image_id) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/octet-stream' - req.headers['x-image-meta-property-key2'] = 'value2' - req.body = b"chunk00000remainder" - res = req.get_response(self.api) - # We expect 500 since an exception occurred during upload. - self.assertEqual(http_client.INTERNAL_SERVER_ERROR, res.status_int) - - @mock.patch('glance_store.store_add_to_backend') - def test_upload_safe_kill(self, mock_store_add_to_backend): - - def mock_store_add_to_backend_w_exception(*args, **kwargs): - """Trigger mid-upload failure by raising an exception.""" - self.image_status.append(self._get_image_status(self.image_id)) - # Raise an exception to emulate failed upload. - raise Exception("== UNIT TEST UPLOAD EXCEPTION ==") - - mocks = [{'mock': mock_store_add_to_backend, - 'side_effect': mock_store_add_to_backend_w_exception}] - - self._upload_safe_kill_common(mocks) - - # Check we went from 'saving' -> 'killed' - self._verify_image_status(self.image_id, 'saving', use_cached=True) - self._verify_image_status(self.image_id, 'killed') - - self.assertEqual(1, mock_store_add_to_backend.call_count) - - @mock.patch('glance_store.store_add_to_backend') - def test_upload_safe_kill_deleted(self, mock_store_add_to_backend): - test_router_api = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware(test_router_api, - is_admin=True) - - def mock_store_add_to_backend_w_exception(*args, **kwargs): - """We now delete the image, assert status is 'deleted' then - raise an exception to emulate a failed upload. This will be caught - by upload_data_to_store() which will then try to set status to - 'killed' which will be ignored since the image has been deleted. - """ - # expect 'saving' - self.image_status.append(self._get_image_status(self.image_id)) - - req = webob.Request.blank("/images/%s" % self.image_id) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - # expect 'deleted' - self.image_status.append(self._get_image_status(self.image_id)) - - # Raise an exception to make the upload fail. - raise Exception("== UNIT TEST UPLOAD EXCEPTION ==") - - mocks = [{'mock': mock_store_add_to_backend, - 'side_effect': mock_store_add_to_backend_w_exception}] - - self._upload_safe_kill_common(mocks) - - # Check we went from 'saving' -> 'deleted' -> 'deleted' - self._verify_image_status(self.image_id, 'saving', check_deleted=False, - use_cached=True) - - self._verify_image_status(self.image_id, 'deleted', check_deleted=True, - use_cached=True) - - self._verify_image_status(self.image_id, 'deleted', check_deleted=True) - - self.assertEqual(1, mock_store_add_to_backend.call_count) - - def _check_delete_during_image_upload(self, is_admin=False): - - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-property-key1': 'value1'} - - req = unit_test_utils.get_fake_request(path="/images", - is_admin=is_admin) - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - res_body = jsonutils.loads(res.body)['image'] - - self.assertIn('id', res_body) - - image_id = res_body['id'] - self.assertIn('/images/%s' % image_id, res.headers['location']) - - # Verify the status is 'queued' - self.assertEqual('queued', res_body['status']) - - called = {'initiate_deletion': False} - - def mock_initiate_deletion(*args, **kwargs): - called['initiate_deletion'] = True - - self.stubs.Set(glance.api.v1.upload_utils, 'initiate_deletion', - mock_initiate_deletion) - - orig_update_image_metadata = registry.update_image_metadata - - data = b"somedata" - - def mock_update_image_metadata(*args, **kwargs): - - if args[2].get('size') == len(data): - path = "/images/%s" % image_id - req = unit_test_utils.get_fake_request(path=path, - method='DELETE', - is_admin=is_admin) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - self.stubs.Set(registry, 'update_image_metadata', - orig_update_image_metadata) - - return orig_update_image_metadata(*args, **kwargs) - - self.stubs.Set(registry, 'update_image_metadata', - mock_update_image_metadata) - - req = unit_test_utils.get_fake_request(path="/images/%s" % image_id, - method='PUT') - req.headers['Content-Type'] = 'application/octet-stream' - req.body = data - res = req.get_response(self.api) - self.assertEqual(http_client.PRECONDITION_FAILED, res.status_int) - self.assertFalse(res.location) - - self.assertTrue(called['initiate_deletion']) - - req = unit_test_utils.get_fake_request(path="/images/%s" % image_id, - method='HEAD', - is_admin=True) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual('True', res.headers['x-image-meta-deleted']) - self.assertEqual('deleted', res.headers['x-image-meta-status']) - - def test_delete_during_image_upload_by_normal_user(self): - self._check_delete_during_image_upload(is_admin=False) - - def test_delete_during_image_upload_by_admin(self): - self._check_delete_during_image_upload(is_admin=True) - - def test_disable_purge_props(self): - """ - Test the special x-glance-registry-purge-props header controls - the purge property behaviour of the registry. - :see LP Bug#901534 - """ - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-property-key1': 'value1'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - req.body = b"chunk00000remainder" - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - res_body = jsonutils.loads(res.body)['image'] - - self.assertIn('id', res_body) - - image_id = res_body['id'] - self.assertIn('/images/%s' % image_id, res.headers['location']) - - # Verify the status is queued - self.assertIn('status', res_body) - self.assertEqual('active', res_body['status']) - - # Check properties are not deleted - self.assertIn('properties', res_body) - self.assertIn('key1', res_body['properties']) - self.assertEqual('value1', res_body['properties']['key1']) - - # Now update the image, setting new properties without - # passing the x-glance-registry-purge-props header and - # verify that original properties are marked deleted. - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['x-image-meta-property-key2'] = 'value2' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - # Verify the original property no longer in headers - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'HEAD' - - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertIn('x-image-meta-property-key2', res.headers, - "Did not find required property in headers. " - "Got headers: %r" % res.headers) - self.assertNotIn('x-image-meta-property-key1', res.headers, - "Found property in headers that was not expected. " - "Got headers: %r" % res.headers) - - # Now update the image, setting new properties and - # passing the x-glance-registry-purge-props header with - # a value of "false" and verify that second property - # still appears in headers. - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - req.headers['x-image-meta-property-key3'] = 'value3' - req.headers['x-glance-registry-purge-props'] = 'false' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - # Verify the second and third property in headers - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'HEAD' - - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertIn('x-image-meta-property-key2', res.headers, - "Did not find required property in headers. " - "Got headers: %r" % res.headers) - self.assertIn('x-image-meta-property-key3', res.headers, - "Did not find required property in headers. " - "Got headers: %r" % res.headers) - - def test_publicize_image_unauthorized(self): - """Create a non-public image then fail to make public""" - rules = {"add_image": '@', "publicize_image": '!'} - self.set_policy_rules(rules) - - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-is-public': 'false', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - req = webob.Request.blank("/images/%s" % res_body['id']) - req.method = 'PUT' - req.headers['x-image-meta-is-public'] = 'true' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_update_image_size_header_too_big(self): - """Tests raises BadRequest for supplied image size that is too big""" - fixture_headers = {'x-image-meta-size': CONF.image_size_cap + 1} - - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'PUT' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_update_image_size_data_too_big(self): - self.config(image_size_cap=512) - - fixture_headers = {'content-type': 'application/octet-stream'} - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'PUT' - - req.body = b'X' * (CONF.image_size_cap + 1) - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_update_image_size_chunked_data_too_big(self): - self.config(image_size_cap=512) - - # Create new image that has no data - req = webob.Request.blank("/images") - req.method = 'POST' - req.headers['x-image-meta-name'] = 'something' - req.headers['x-image-meta-container_format'] = 'ami' - req.headers['x-image-meta-disk_format'] = 'ami' - res = req.get_response(self.api) - image_id = jsonutils.loads(res.body)['image']['id'] - - fixture_headers = { - 'content-type': 'application/octet-stream', - 'transfer-encoding': 'chunked', - } - req = webob.Request.blank("/images/%s" % image_id) - req.method = 'PUT' - - req.body_file = six.StringIO('X' * (CONF.image_size_cap + 1)) - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, res.status_int) - - def test_update_non_existing_image(self): - self.config(image_size_cap=100) - - req = webob.Request.blank("images/%s" % _gen_uuid()) - req.method = 'PUT' - req.body = b'test' - req.headers['x-image-meta-name'] = 'test' - req.headers['x-image-meta-container_format'] = 'ami' - req.headers['x-image-meta-disk_format'] = 'ami' - req.headers['x-image-meta-is_public'] = 'False' - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - def test_update_public_image(self): - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-is-public': 'true', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - req = webob.Request.blank("/images/%s" % res_body['id']) - req.method = 'PUT' - req.headers['x-image-meta-name'] = 'updated public image' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - @mock.patch.object(registry, 'update_image_metadata') - def test_update_without_public_attribute(self, mock_update_image_metadata): - req = webob.Request.blank("/images/%s" % UUID1) - req.context = self.context - image_meta = {'properties': {}} - image_controller = glance.api.v1.images.Controller() - - with mock.patch.object( - image_controller, 'update_store_acls' - ) as mock_update_store_acls: - mock_update_store_acls.return_value = None - mock_update_image_metadata.return_value = {} - image_controller.update( - req, UUID1, image_meta, None) - self.assertEqual(0, mock_update_store_acls.call_count) - - def test_add_image_wrong_content_type(self): - fixture_headers = { - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-container_format': 'ami', - 'x-image-meta-disk_format': 'ami', - 'transfer-encoding': 'chunked', - 'content-type': 'application/octet-st', - } - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_get_index_sort_name_asc(self): - """ - Tests that the /images API returns list of - public images sorted alphabetically by name in - ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'is_public': True, - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'asdf', - 'size': 19, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'is_public': True, - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'xyz', - 'size': 20, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/images?sort_key=name&sort_dir=asc') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(3, len(images)) - self.assertEqual(UUID3, images[0]['id']) - self.assertEqual(UUID2, images[1]['id']) - self.assertEqual(UUID4, images[2]['id']) - - def test_get_details_filter_changes_since(self): - """ - Tests that the /images/detail API returns list of - images that changed since the time defined by changes-since - """ - dt1 = timeutils.utcnow() - datetime.timedelta(1) - iso1 = timeutils.isotime(dt1) - - date_only1 = dt1.strftime('%Y-%m-%d') - date_only2 = dt1.strftime('%Y%m%d') - date_only3 = dt1.strftime('%Y-%m%d') - - dt2 = timeutils.utcnow() + datetime.timedelta(1) - iso2 = timeutils.isotime(dt2) - - image_ts = timeutils.utcnow() + datetime.timedelta(2) - hour_before = image_ts.strftime('%Y-%m-%dT%H:%M:%S%%2B01:00') - hour_after = image_ts.strftime('%Y-%m-%dT%H:%M:%S-01:00') - - dt4 = timeutils.utcnow() + datetime.timedelta(3) - iso4 = timeutils.isotime(dt4) - - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'is_public': True, - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'fake image #3', - 'size': 18, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - db_api.image_destroy(self.context, UUID3) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'is_public': True, - 'disk_format': 'ami', - 'container_format': 'ami', - 'name': 'fake image #4', - 'size': 20, - 'checksum': None, - 'created_at': image_ts, - 'updated_at': image_ts} - - db_api.image_create(self.context, extra_fixture) - - # Check a standard list, 4 images in db (2 deleted) - req = webob.Request.blank('/images/detail') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - res_dict = jsonutils.loads(res.body) - images = res_dict['images'] - self.assertEqual(2, len(images)) - self.assertEqual(UUID4, images[0]['id']) - self.assertEqual(UUID2, images[1]['id']) - - # Expect 3 images (1 deleted) - req = webob.Request.blank('/images/detail?changes-since=%s' % iso1) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - res_dict = jsonutils.loads(res.body) - images = res_dict['images'] - self.assertEqual(3, len(images)) - self.assertEqual(UUID4, images[0]['id']) - self.assertEqual(UUID3, images[1]['id']) # deleted - self.assertEqual(UUID2, images[2]['id']) - - # Expect 1 images (0 deleted) - req = webob.Request.blank('/images/detail?changes-since=%s' % iso2) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - res_dict = jsonutils.loads(res.body) - images = res_dict['images'] - self.assertEqual(1, len(images)) - self.assertEqual(UUID4, images[0]['id']) - - # Expect 1 images (0 deleted) - req = webob.Request.blank('/images/detail?changes-since=%s' % - hour_before) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - res_dict = jsonutils.loads(res.body) - images = res_dict['images'] - self.assertEqual(1, len(images)) - self.assertEqual(UUID4, images[0]['id']) - - # Expect 0 images (0 deleted) - req = webob.Request.blank('/images/detail?changes-since=%s' % - hour_after) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - res_dict = jsonutils.loads(res.body) - images = res_dict['images'] - self.assertEqual(0, len(images)) - - # Expect 0 images (0 deleted) - req = webob.Request.blank('/images/detail?changes-since=%s' % iso4) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - res_dict = jsonutils.loads(res.body) - images = res_dict['images'] - self.assertEqual(0, len(images)) - - for param in [date_only1, date_only2, date_only3]: - # Expect 3 images (1 deleted) - req = webob.Request.blank('/images/detail?changes-since=%s' % - param) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - res_dict = jsonutils.loads(res.body) - images = res_dict['images'] - self.assertEqual(3, len(images)) - self.assertEqual(UUID4, images[0]['id']) - self.assertEqual(UUID3, images[1]['id']) # deleted - self.assertEqual(UUID2, images[2]['id']) - - # Bad request (empty changes-since param) - req = webob.Request.blank('/images/detail?changes-since=') - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_get_images_bad_urls(self): - """Check that routes collections are not on (LP bug 1185828)""" - req = webob.Request.blank('/images/detail.xxx') - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - req = webob.Request.blank('/images.xxx') - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - req = webob.Request.blank('/images/new') - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - req = webob.Request.blank("/images/%s/members" % UUID1) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - req = webob.Request.blank("/images/%s/members.xxx" % UUID1) - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - def test_get_index_filter_on_user_defined_properties(self): - """Check that image filtering works on user-defined properties""" - - image1_id = _gen_uuid() - properties = {'distro': 'ubuntu', 'arch': 'i386'} - extra_fixture = {'id': image1_id, - 'status': 'active', - 'is_public': True, - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'image-extra-1', - 'size': 18, 'properties': properties, - 'checksum': None} - db_api.image_create(self.context, extra_fixture) - - image2_id = _gen_uuid() - properties = {'distro': 'ubuntu', 'arch': 'x86_64', 'foo': 'bar'} - extra_fixture = {'id': image2_id, - 'status': 'active', - 'is_public': True, - 'disk_format': 'ami', - 'container_format': 'ami', - 'name': 'image-extra-2', - 'size': 20, 'properties': properties, - 'checksum': None} - db_api.image_create(self.context, extra_fixture) - - # Test index with filter containing one user-defined property. - # Filter is 'property-distro=ubuntu'. - # Verify both image1 and image2 are returned - req = webob.Request.blank('/images?property-distro=ubuntu') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(2, len(images)) - self.assertEqual(image2_id, images[0]['id']) - self.assertEqual(image1_id, images[1]['id']) - - # Test index with filter containing one user-defined property but - # non-existent value. Filter is 'property-distro=fedora'. - # Verify neither images are returned - req = webob.Request.blank('/images?property-distro=fedora') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(0, len(images)) - - # Test index with filter containing one user-defined property but - # unique value. Filter is 'property-arch=i386'. - # Verify only image1 is returned. - req = webob.Request.blank('/images?property-arch=i386') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image1_id, images[0]['id']) - - # Test index with filter containing one user-defined property but - # unique value. Filter is 'property-arch=x86_64'. - # Verify only image1 is returned. - req = webob.Request.blank('/images?property-arch=x86_64') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image2_id, images[0]['id']) - - # Test index with filter containing unique user-defined property. - # Filter is 'property-foo=bar'. - # Verify only image2 is returned. - req = webob.Request.blank('/images?property-foo=bar') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image2_id, images[0]['id']) - - # Test index with filter containing unique user-defined property but - # .value is non-existent. Filter is 'property-foo=baz'. - # Verify neither images are returned. - req = webob.Request.blank('/images?property-foo=baz') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(0, len(images)) - - # Test index with filter containing multiple user-defined properties - # Filter is 'property-arch=x86_64&property-distro=ubuntu'. - # Verify only image2 is returned. - req = webob.Request.blank('/images?property-arch=x86_64&' - 'property-distro=ubuntu') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image2_id, images[0]['id']) - - # Test index with filter containing multiple user-defined properties - # Filter is 'property-arch=i386&property-distro=ubuntu'. - # Verify only image1 is returned. - req = webob.Request.blank('/images?property-arch=i386&' - 'property-distro=ubuntu') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image1_id, images[0]['id']) - - # Test index with filter containing multiple user-defined properties. - # Filter is 'property-arch=random&property-distro=ubuntu'. - # Verify neither images are returned. - req = webob.Request.blank('/images?property-arch=random&' - 'property-distro=ubuntu') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(0, len(images)) - - # Test index with filter containing multiple user-defined properties. - # Filter is 'property-arch=random&property-distro=random'. - # Verify neither images are returned. - req = webob.Request.blank('/images?property-arch=random&' - 'property-distro=random') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(0, len(images)) - - # Test index with filter containing multiple user-defined properties. - # Filter is 'property-boo=far&property-poo=far'. - # Verify neither images are returned. - req = webob.Request.blank('/images?property-boo=far&' - 'property-poo=far') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(0, len(images)) - - # Test index with filter containing multiple user-defined properties. - # Filter is 'property-foo=bar&property-poo=far'. - # Verify neither images are returned. - req = webob.Request.blank('/images?property-foo=bar&' - 'property-poo=far') - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(0, len(images)) - - def test_get_images_detailed_unauthorized(self): - rules = {"get_images": '!'} - self.set_policy_rules(rules) - req = webob.Request.blank('/images/detail') - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_get_images_unauthorized(self): - rules = {"get_images": '!'} - self.set_policy_rules(rules) - req = webob.Request.blank('/images') - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_store_location_not_revealed(self): - """ - Test that the internal store location is NOT revealed - through the API server - """ - # Check index and details... - for url in ('/images', '/images/detail'): - req = webob.Request.blank(url) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - num_locations = sum([1 for record in images - if 'location' in record.keys()]) - self.assertEqual(0, num_locations, images) - - # Check GET - req = webob.Request.blank("/images/%s" % UUID2) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertNotIn('X-Image-Meta-Location', res.headers) - - # Check HEAD - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertNotIn('X-Image-Meta-Location', res.headers) - - # Check PUT - req = webob.Request.blank("/images/%s" % UUID2) - req.body = res.body - req.method = 'PUT' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - res_body = jsonutils.loads(res.body) - self.assertNotIn('location', res_body['image']) - - # Check POST - req = webob.Request.blank("/images") - headers = {'x-image-meta-location': 'http://localhost', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - for k, v in six.iteritems(headers): - req.headers[k] = v - req.method = 'POST' - - http = store.get_store_from_scheme('http') - - with mock.patch.object(http, 'get_size') as size: - size.return_value = 0 - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - res_body = jsonutils.loads(res.body) - self.assertNotIn('location', res_body['image']) - - def test_image_is_checksummed(self): - """Test that the image contents are checksummed properly""" - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - image_contents = b"chunk00000remainder" - image_checksum = hashlib.md5(image_contents).hexdigest() - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - req.body = image_contents - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual(image_checksum, res_body['checksum'], - "Mismatched checksum. Expected %s, got %s" % - (image_checksum, res_body['checksum'])) - - def test_etag_equals_checksum_header(self): - """Test that the ETag header matches the x-image-meta-checksum""" - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - image_contents = b"chunk00000remainder" - image_checksum = hashlib.md5(image_contents).hexdigest() - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - req.body = image_contents - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - image = jsonutils.loads(res.body)['image'] - - # HEAD the image and check the ETag equals the checksum header... - expected_headers = {'x-image-meta-checksum': image_checksum, - 'etag': image_checksum} - req = webob.Request.blank("/images/%s" % image['id']) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - for key in expected_headers.keys(): - self.assertIn(key, res.headers, - "required header '%s' missing from " - "returned headers" % key) - for key, value in six.iteritems(expected_headers): - self.assertEqual(value, res.headers[key]) - - def test_bad_checksum_prevents_image_creation(self): - """Test that the image contents are checksummed properly""" - image_contents = b"chunk00000remainder" - bad_checksum = hashlib.md5(b"invalid").hexdigest() - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-checksum': bad_checksum, - 'x-image-meta-is-public': 'true'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - - req.headers['Content-Type'] = 'application/octet-stream' - req.body = image_contents - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - # Test that only one image was returned (that already exists) - req = webob.Request.blank("/images") - req.method = 'GET' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - images = jsonutils.loads(res.body)['images'] - self.assertEqual(1, len(images)) - - def test_image_meta(self): - """Test for HEAD /images/""" - expected_headers = {'x-image-meta-id': UUID2, - 'x-image-meta-name': 'fake image #2'} - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertFalse(res.location) - - for key, value in six.iteritems(expected_headers): - self.assertEqual(value, res.headers[key]) - - def test_image_meta_unauthorized(self): - rules = {"get_image": '!'} - self.set_policy_rules(rules) - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_show_image_basic(self): - req = webob.Request.blank("/images/%s" % UUID2) - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertFalse(res.location) - self.assertEqual('application/octet-stream', res.content_type) - self.assertEqual(b'chunk00000remainder', res.body) - - def test_show_non_exists_image(self): - req = webob.Request.blank("/images/%s" % _gen_uuid()) - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - def test_show_image_unauthorized(self): - rules = {"get_image": '!'} - self.set_policy_rules(rules) - req = webob.Request.blank("/images/%s" % UUID2) - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_show_image_unauthorized_download(self): - rules = {"download_image": '!'} - self.set_policy_rules(rules) - req = webob.Request.blank("/images/%s" % UUID2) - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_show_image_restricted_download_for_core_property(self): - rules = { - "restricted": - "not ('1024M':%(min_ram)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - self.set_policy_rules(rules) - req = webob.Request.blank("/images/%s" % UUID2) - req.headers['X-Auth-Token'] = 'user:tenant:_member_' - req.headers['min_ram'] = '1024M' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_show_image_restricted_download_for_custom_property(self): - rules = { - "restricted": - "not ('test_1234'==%(x_test_key)s and role:_member_)", - "download_image": "role:admin or rule:restricted" - } - self.set_policy_rules(rules) - req = webob.Request.blank("/images/%s" % UUID2) - req.headers['X-Auth-Token'] = 'user:tenant:_member_' - req.headers['x_test_key'] = 'test_1234' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_download_service_unavailable(self): - """Test image download returns HTTPServiceUnavailable.""" - image_fixture = self.FIXTURES[1] - image_fixture.update({'location': 'http://0.0.0.0:1/file.tar.gz'}) - request = webob.Request.blank("/images/%s" % UUID2) - request.context = self.context - - image_controller = glance.api.v1.images.Controller() - with mock.patch.object(image_controller, - 'get_active_image_meta_or_error' - ) as mocked_get_image: - mocked_get_image.return_value = image_fixture - self.assertRaises(webob.exc.HTTPServiceUnavailable, - image_controller.show, - request, mocked_get_image) - - @mock.patch('glance_store._drivers.filesystem.Store.get') - def test_show_image_store_get_not_support(self, m_get): - m_get.side_effect = store.StoreGetNotSupported() - req = webob.Request.blank("/images/%s" % UUID2) - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - @mock.patch('glance_store._drivers.filesystem.Store.get') - def test_show_image_store_random_get_not_support(self, m_get): - m_get.side_effect = store.StoreRandomGetNotSupported(chunk_size=0, - offset=0) - req = webob.Request.blank("/images/%s" % UUID2) - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_delete_image(self): - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertFalse(res.location) - self.assertEqual(b'', res.body) - - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'GET' - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int, res.body) - - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual('True', res.headers['x-image-meta-deleted']) - self.assertEqual('deleted', res.headers['x-image-meta-status']) - - def test_delete_non_exists_image(self): - req = webob.Request.blank("/images/%s" % _gen_uuid()) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - def test_delete_not_allowed(self): - # Verify we can get the image data - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'GET' - req.headers['X-Auth-Token'] = 'user:tenant:' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(19, len(res.body)) - - # Verify we cannot delete the image - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - # Verify the image data is still there - req.method = 'GET' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(19, len(res.body)) - - def test_delete_queued_image(self): - """Delete an image in a queued state - - Bug #747799 demonstrated that trying to DELETE an image - that had had its save process killed manually results in failure - because the location attribute is None. - - Bug #1048851 demonstrated that the status was not properly - being updated to 'deleted' from 'queued'. - """ - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - - # Now try to delete the image... - req = webob.Request.blank("/images/%s" % res_body['id']) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - req = webob.Request.blank('/images/%s' % res_body['id']) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual('True', res.headers['x-image-meta-deleted']) - self.assertEqual('deleted', res.headers['x-image-meta-status']) - - def test_delete_queued_image_delayed_delete(self): - """Delete an image in a queued state when delayed_delete is on - - Bug #1048851 demonstrated that the status was not properly - being updated to 'deleted' from 'queued'. - """ - self.config(delayed_delete=True) - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-name': 'fake image #3'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - - # Now try to delete the image... - req = webob.Request.blank("/images/%s" % res_body['id']) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - req = webob.Request.blank('/images/%s' % res_body['id']) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual('True', res.headers['x-image-meta-deleted']) - self.assertEqual('deleted', res.headers['x-image-meta-status']) - - def test_delete_protected_image(self): - fixture_headers = {'x-image-meta-store': 'file', - 'x-image-meta-name': 'fake image #3', - 'x-image-meta-disk-format': 'vhd', - 'x-image-meta-container-format': 'ovf', - 'x-image-meta-protected': 'True'} - - req = webob.Request.blank("/images") - req.method = 'POST' - for k, v in six.iteritems(fixture_headers): - req.headers[k] = v - res = req.get_response(self.api) - self.assertEqual(http_client.CREATED, res.status_int) - - res_body = jsonutils.loads(res.body)['image'] - self.assertEqual('queued', res_body['status']) - - # Now try to delete the image... - req = webob.Request.blank("/images/%s" % res_body['id']) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_delete_image_unauthorized(self): - rules = {"delete_image": '!'} - self.set_policy_rules(rules) - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - def test_head_details(self): - req = webob.Request.blank('/images/detail') - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.METHOD_NOT_ALLOWED, res.status_int) - self.assertEqual('GET', res.headers.get('Allow')) - self.assertEqual(('GET',), res.allow) - req.method = 'GET' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - def test_get_details_invalid_marker(self): - """ - Tests that the /images/detail API returns a 400 - when an invalid marker is provided - """ - req = webob.Request.blank('/images/detail?marker=%s' % _gen_uuid()) - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_get_image_members(self): - """ - Tests members listing for existing images - """ - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'GET' - - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - memb_list = jsonutils.loads(res.body) - num_members = len(memb_list['members']) - self.assertEqual(0, num_members) - - def test_get_image_members_allowed_by_policy(self): - rules = {"get_members": '@'} - self.set_policy_rules(rules) - - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'GET' - - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - memb_list = jsonutils.loads(res.body) - num_members = len(memb_list['members']) - self.assertEqual(0, num_members) - - def test_get_image_members_forbidden_by_policy(self): - rules = {"get_members": '!'} - self.set_policy_rules(rules) - - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'GET' - - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) - - def test_get_image_members_not_existing(self): - """ - Tests proper exception is raised if attempt to get members of - non-existing image - """ - req = webob.Request.blank('/images/%s/members' % _gen_uuid()) - req.method = 'GET' - - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - def test_add_member_positive(self): - """ - Tests adding image members - """ - test_router_api = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router_api, is_admin=True) - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) - req.method = 'PUT' - - res = req.get_response(self.api) - self.assertEqual(http_client.NO_CONTENT, res.status_int) - - def test_get_member_images(self): - """ - Tests image listing for members - """ - req = webob.Request.blank('/shared-images/pattieblack') - req.method = 'GET' - - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - memb_list = jsonutils.loads(res.body) - num_members = len(memb_list['shared_images']) - self.assertEqual(0, num_members) - - def test_replace_members(self): - """ - Tests replacing image members raises right exception - """ - test_router_api = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router_api, is_admin=False) - fixture = dict(member_id='pattieblack') - - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'PUT' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) - - res = req.get_response(self.api) - self.assertEqual(http_client.UNAUTHORIZED, res.status_int) - - def test_active_image_immutable_props_for_user(self): - """ - Tests user cannot update immutable props of active image - """ - test_router_api = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router_api, is_admin=False) - fixture_header_list = [{'x-image-meta-checksum': '1234'}, - {'x-image-meta-size': '12345'}] - for fixture_header in fixture_header_list: - req = webob.Request.blank('/images/%s' % UUID2) - req.method = 'PUT' - for k, v in six.iteritems(fixture_header): - req = webob.Request.blank('/images/%s' % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - orig_value = res.headers[k] - - req = webob.Request.blank('/images/%s' % UUID2) - req.headers[k] = v - req.method = 'PUT' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - prop = k[len('x-image-meta-'):] - body = res.body.decode('utf-8') - self.assertNotEqual(-1, body.find( - "Forbidden to modify '%s' of active image" % prop)) - - req = webob.Request.blank('/images/%s' % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(orig_value, res.headers[k]) - - def test_deactivated_image_immutable_props_for_user(self): - """ - Tests user cannot update immutable props of deactivated image - """ - test_router_api = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router_api, is_admin=False) - fixture_header_list = [{'x-image-meta-checksum': '1234'}, - {'x-image-meta-size': '12345'}] - for fixture_header in fixture_header_list: - req = webob.Request.blank('/images/%s' % UUID3) - req.method = 'PUT' - for k, v in six.iteritems(fixture_header): - req = webob.Request.blank('/images/%s' % UUID3) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - orig_value = res.headers[k] - - req = webob.Request.blank('/images/%s' % UUID3) - req.headers[k] = v - req.method = 'PUT' - res = req.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - prop = k[len('x-image-meta-'):] - body = res.body.decode('utf-8') - self.assertNotEqual(-1, body.find( - "Forbidden to modify '%s' of deactivated image" % prop)) - - req = webob.Request.blank('/images/%s' % UUID3) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(orig_value, res.headers[k]) - - def test_props_of_active_image_mutable_for_admin(self): - """ - Tests admin can update 'immutable' props of active image - """ - test_router_api = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router_api, is_admin=True) - fixture_header_list = [{'x-image-meta-checksum': '1234'}, - {'x-image-meta-size': '12345'}] - for fixture_header in fixture_header_list: - req = webob.Request.blank('/images/%s' % UUID2) - req.method = 'PUT' - for k, v in six.iteritems(fixture_header): - req = webob.Request.blank('/images/%s' % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - req = webob.Request.blank('/images/%s' % UUID2) - req.headers[k] = v - req.method = 'PUT' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - req = webob.Request.blank('/images/%s' % UUID2) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(v, res.headers[k]) - - def test_props_of_deactivated_image_mutable_for_admin(self): - """ - Tests admin can update 'immutable' props of deactivated image - """ - test_router_api = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router_api, is_admin=True) - fixture_header_list = [{'x-image-meta-checksum': '1234'}, - {'x-image-meta-size': '12345'}] - for fixture_header in fixture_header_list: - req = webob.Request.blank('/images/%s' % UUID3) - req.method = 'PUT' - for k, v in six.iteritems(fixture_header): - req = webob.Request.blank('/images/%s' % UUID3) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - req = webob.Request.blank('/images/%s' % UUID3) - req.headers[k] = v - req.method = 'PUT' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - req = webob.Request.blank('/images/%s' % UUID3) - req.method = 'HEAD' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(v, res.headers[k]) - - def test_replace_members_non_existing_image(self): - """ - Tests replacing image members raises right exception - """ - test_router_api = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router_api, is_admin=True) - fixture = dict(member_id='pattieblack') - req = webob.Request.blank('/images/%s/members' % _gen_uuid()) - req.method = 'PUT' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) - - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - def test_replace_members_bad_request(self): - """ - Tests replacing image members raises bad request if body is wrong - """ - test_router_api = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router_api, is_admin=True) - fixture = dict(member_id='pattieblack') - - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'PUT' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) - - res = req.get_response(self.api) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_replace_members_positive(self): - """ - Tests replacing image members - """ - test_router = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router, is_admin=True) - - fixture = [dict(member_id='pattieblack', can_share=False)] - # Replace - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'PUT' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) - res = req.get_response(self.api) - self.assertEqual(http_client.NO_CONTENT, res.status_int) - - def test_replace_members_forbidden_by_policy(self): - rules = {"modify_member": '!'} - self.set_policy_rules(rules) - self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper), - is_admin=True) - fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}] - - req = webob.Request.blank('/images/%s/members' % UUID1) - req.method = 'PUT' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) - - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) - - def test_replace_members_allowed_by_policy(self): - rules = {"modify_member": '@'} - self.set_policy_rules(rules) - self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper), - is_admin=True) - fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}] - - req = webob.Request.blank('/images/%s/members' % UUID1) - req.method = 'PUT' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) - - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) - - def test_add_member_unauthorized(self): - """ - Tests adding image members raises right exception - """ - test_router = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router, is_admin=False) - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) - req.method = 'PUT' - - res = req.get_response(self.api) - self.assertEqual(http_client.UNAUTHORIZED, res.status_int) - - def test_add_member_non_existing_image(self): - """ - Tests adding image members raises right exception - """ - test_router = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router, is_admin=True) - test_uri = '/images/%s/members/pattieblack' - req = webob.Request.blank(test_uri % _gen_uuid()) - req.method = 'PUT' - - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - def test_add_member_with_body(self): - """ - Tests adding image members - """ - fixture = dict(can_share=True) - test_router = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router, is_admin=True) - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) - req.method = 'PUT' - req.body = jsonutils.dump_as_bytes(dict(member=fixture)) - res = req.get_response(self.api) - self.assertEqual(http_client.NO_CONTENT, res.status_int) - - def test_add_member_overlimit(self): - self.config(image_member_quota=0) - test_router_api = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router_api, is_admin=True) - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) - req.method = 'PUT' - - res = req.get_response(self.api) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, res.status_int) - - def test_add_member_unlimited(self): - self.config(image_member_quota=-1) - test_router_api = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router_api, is_admin=True) - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) - req.method = 'PUT' - - res = req.get_response(self.api) - self.assertEqual(http_client.NO_CONTENT, res.status_int) - - def test_add_member_forbidden_by_policy(self): - rules = {"modify_member": '!'} - self.set_policy_rules(rules) - self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper), - is_admin=True) - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID1) - req.method = 'PUT' - - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) - - def test_add_member_allowed_by_policy(self): - rules = {"modify_member": '@'} - self.set_policy_rules(rules) - self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper), - is_admin=True) - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID1) - req.method = 'PUT' - - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) - - def test_get_members_of_deleted_image_raises_404(self): - """ - Tests members listing for deleted image raises 404. - """ - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'GET' - - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) - self.assertIn('Image with identifier %s has been deleted.' % UUID2, - res.body.decode()) - - def test_delete_member_of_deleted_image_raises_404(self): - """ - Tests deleting members of deleted image raises 404. - """ - test_router = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) - req.method = 'DELETE' - - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) - self.assertIn('Image with identifier %s has been deleted.' % UUID2, - res.body.decode()) - - def test_update_members_of_deleted_image_raises_404(self): - """ - Tests update members of deleted image raises 404. - """ - test_router = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) - - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) - req.method = 'PUT' - res = req.get_response(self.api) - self.assertEqual(http_client.NO_CONTENT, res.status_int) - - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}] - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'PUT' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) - body = res.body.decode('utf-8') - self.assertIn( - 'Image with identifier %s has been deleted.' % UUID2, body) - - def test_replace_members_of_image(self): - test_router = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) - - fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}] - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'PUT' - req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) - res = req.get_response(self.api) - self.assertEqual(http_client.NO_CONTENT, res.status_int) - - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'GET' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - memb_list = jsonutils.loads(res.body) - self.assertEqual(1, len(memb_list)) - - def test_replace_members_of_image_overlimit(self): - # Set image_member_quota to 1 - self.config(image_member_quota=1) - test_router = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) - - # PUT an original member entry - fixture = [{'member_id': 'baz', 'can_share': False}] - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'PUT' - req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) - res = req.get_response(self.api) - self.assertEqual(http_client.NO_CONTENT, res.status_int) - - # GET original image member list - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'GET' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - original_members = jsonutils.loads(res.body)['members'] - self.assertEqual(1, len(original_members)) - - # PUT 2 image members to replace existing (overlimit) - fixture = [{'member_id': 'foo1', 'can_share': False}, - {'member_id': 'foo2', 'can_share': False}] - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'PUT' - req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) - res = req.get_response(self.api) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, res.status_int) - - # GET member list - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'GET' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - # Assert the member list was not changed - memb_list = jsonutils.loads(res.body)['members'] - self.assertEqual(original_members, memb_list) - - def test_replace_members_of_image_unlimited(self): - self.config(image_member_quota=-1) - test_router = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) - - fixture = [{'member_id': 'foo1', 'can_share': False}, - {'member_id': 'foo2', 'can_share': False}] - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'PUT' - req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) - res = req.get_response(self.api) - self.assertEqual(http_client.NO_CONTENT, res.status_int) - - req = webob.Request.blank('/images/%s/members' % UUID2) - req.method = 'GET' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - memb_list = jsonutils.loads(res.body)['members'] - self.assertEqual(fixture, memb_list) - - def test_create_member_to_deleted_image_raises_404(self): - """ - Tests adding members to deleted image raises 404. - """ - test_router = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(http_client.OK, res.status_int) - - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) - req.method = 'PUT' - - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) - self.assertIn('Image with identifier %s has been deleted.' % UUID2, - res.body.decode()) - - def test_delete_member(self): - """ - Tests deleting image members raises right exception - """ - test_router = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_router, is_admin=False) - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) - req.method = 'DELETE' - - res = req.get_response(self.api) - self.assertEqual(http_client.UNAUTHORIZED, res.status_int) - - def test_delete_member_on_non_existing_image(self): - """ - Tests deleting image members raises right exception - """ - test_router = router.API(self.mapper) - api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) - test_uri = '/images/%s/members/pattieblack' - req = webob.Request.blank(test_uri % _gen_uuid()) - req.method = 'DELETE' - - res = req.get_response(api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - def test_delete_non_exist_member(self): - """ - Test deleting image members raises right exception - """ - test_router = router.API(self.mapper) - api = test_utils.FakeAuthMiddleware( - test_router, is_admin=True) - req = webob.Request.blank('/images/%s/members/test_user' % UUID2) - req.method = 'DELETE' - res = req.get_response(api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - def test_delete_image_member(self): - test_rserver = router.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_rserver, is_admin=True) - - # Add member to image: - fixture = dict(can_share=True) - test_uri = '/images/%s/members/test_add_member_positive' - req = webob.Request.blank(test_uri % UUID2) - req.method = 'PUT' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(member=fixture)) - res = req.get_response(self.api) - self.assertEqual(http_client.NO_CONTENT, res.status_int) - - # Delete member - test_uri = '/images/%s/members/test_add_member_positive' - req = webob.Request.blank(test_uri % UUID2) - req.headers['X-Auth-Token'] = 'test1:test1:' - req.method = 'DELETE' - req.content_type = 'application/json' - res = req.get_response(self.api) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertIn(b'Forbidden', res.body) - - def test_delete_member_allowed_by_policy(self): - rules = {"delete_member": '@', "modify_member": '@'} - self.set_policy_rules(rules) - self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper), - is_admin=True) - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) - req.method = 'PUT' - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) - - def test_delete_member_forbidden_by_policy(self): - rules = {"delete_member": '!', "modify_member": '@'} - self.set_policy_rules(rules) - self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper), - is_admin=True) - req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) - req.method = 'PUT' - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) - req.method = 'DELETE' - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) - - -class TestImageSerializer(base.IsolatedUnitTest): - def setUp(self): - """Establish a clean test environment""" - super(TestImageSerializer, self).setUp() - self.receiving_user = 'fake_user' - self.receiving_tenant = 2 - self.context = glance.context.RequestContext( - is_admin=True, - user=self.receiving_user, - tenant=self.receiving_tenant) - self.serializer = glance.api.v1.images.ImageSerializer() - - def image_iter(): - for x in [b'chunk', b'678911234', b'56789']: - yield x - - self.FIXTURE = { - 'image_iterator': image_iter(), - 'image_meta': { - 'id': UUID2, - 'name': 'fake image #2', - 'status': 'active', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'is_public': True, - 'created_at': timeutils.utcnow(), - 'updated_at': timeutils.utcnow(), - 'deleted_at': None, - 'deleted': False, - 'checksum': '06ff575a2856444fbe93100157ed74ab92eb7eff', - 'size': 19, - 'owner': _gen_uuid(), - 'location': "file:///tmp/glance-tests/2", - 'properties': {}, - } - } - - def test_meta(self): - exp_headers = {'x-image-meta-id': UUID2, - 'x-image-meta-location': 'file:///tmp/glance-tests/2', - 'ETag': self.FIXTURE['image_meta']['checksum'], - 'x-image-meta-name': 'fake image #2'} - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'HEAD' - req.remote_addr = "1.2.3.4" - req.context = self.context - response = webob.Response(request=req) - self.serializer.meta(response, self.FIXTURE) - for key, value in six.iteritems(exp_headers): - self.assertEqual(value, response.headers[key]) - - def test_meta_utf8(self): - # We get unicode strings from JSON, and therefore all strings in the - # metadata will actually be unicode when handled internally. But we - # want to output utf-8. - FIXTURE = { - 'image_meta': { - 'id': six.text_type(UUID2), - 'name': u'fake image #2 with utf-8 éàè', - 'status': u'active', - 'disk_format': u'vhd', - 'container_format': u'ovf', - 'is_public': True, - 'created_at': timeutils.utcnow(), - 'updated_at': timeutils.utcnow(), - 'deleted_at': None, - 'deleted': False, - 'checksum': u'06ff575a2856444fbe93100157ed74ab92eb7eff', - 'size': 19, - 'owner': six.text_type(_gen_uuid()), - 'location': u"file:///tmp/glance-tests/2", - 'properties': { - u'prop_éé': u'ça marche', - u'prop_çé': u'çé', - } - } - } - exp_headers = {'x-image-meta-id': UUID2, - 'x-image-meta-location': 'file:///tmp/glance-tests/2', - 'ETag': '06ff575a2856444fbe93100157ed74ab92eb7eff', - 'x-image-meta-size': '19', # str, not int - 'x-image-meta-name': 'fake image #2 with utf-8 éàè', - 'x-image-meta-property-prop_éé': 'ça marche', - 'x-image-meta-property-prop_çé': 'çé'} - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'HEAD' - req.remote_addr = "1.2.3.4" - req.context = self.context - response = webob.Response(request=req) - self.serializer.meta(response, FIXTURE) - if six.PY2: - self.assertNotEqual(type(FIXTURE['image_meta']['name']), - type(response.headers['x-image-meta-name'])) - if six.PY3: - self.assertEqual(FIXTURE['image_meta']['name'], - response.headers['x-image-meta-name']) - else: - self.assertEqual( - FIXTURE['image_meta']['name'], - response.headers['x-image-meta-name'].decode('utf-8')) - - for key, value in six.iteritems(exp_headers): - self.assertEqual(value, response.headers[key]) - - if six.PY2: - FIXTURE['image_meta']['properties'][u'prop_bad'] = 'çé' - self.assertRaises(UnicodeDecodeError, - self.serializer.meta, response, FIXTURE) - - def test_show(self): - exp_headers = {'x-image-meta-id': UUID2, - 'x-image-meta-location': 'file:///tmp/glance-tests/2', - 'ETag': self.FIXTURE['image_meta']['checksum'], - 'x-image-meta-name': 'fake image #2'} - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'GET' - req.context = self.context - response = webob.Response(request=req) - self.serializer.show(response, self.FIXTURE) - for key, value in six.iteritems(exp_headers): - self.assertEqual(value, response.headers[key]) - - self.assertEqual(b'chunk67891123456789', response.body) - - def test_show_notify(self): - """Make sure an eventlet posthook for notify_image_sent is added.""" - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'GET' - req.context = self.context - response = webob.Response(request=req) - response.request.environ['eventlet.posthooks'] = [] - - self.serializer.show(response, self.FIXTURE) - - # just make sure the app_iter is called - for chunk in response.app_iter: - pass - - self.assertNotEqual([], response.request.environ['eventlet.posthooks']) - - def test_image_send_notification(self): - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'GET' - req.remote_addr = '1.2.3.4' - req.context = self.context - - image_meta = self.FIXTURE['image_meta'] - called = {"notified": False} - expected_payload = { - 'bytes_sent': 19, - 'image_id': UUID2, - 'owner_id': image_meta['owner'], - 'receiver_tenant_id': self.receiving_tenant, - 'receiver_user_id': self.receiving_user, - 'destination_ip': '1.2.3.4', - } - - def fake_info(_event_type, _payload): - self.assertEqual(expected_payload, _payload) - called['notified'] = True - - self.stubs.Set(self.serializer.notifier, 'info', fake_info) - - glance.api.common.image_send_notification(19, 19, image_meta, req, - self.serializer.notifier) - - self.assertTrue(called['notified']) - - def test_image_send_notification_error(self): - """Ensure image.send notification is sent on error.""" - req = webob.Request.blank("/images/%s" % UUID2) - req.method = 'GET' - req.remote_addr = '1.2.3.4' - req.context = self.context - - image_meta = self.FIXTURE['image_meta'] - called = {"notified": False} - expected_payload = { - 'bytes_sent': 17, - 'image_id': UUID2, - 'owner_id': image_meta['owner'], - 'receiver_tenant_id': self.receiving_tenant, - 'receiver_user_id': self.receiving_user, - 'destination_ip': '1.2.3.4', - } - - def fake_error(_event_type, _payload): - self.assertEqual(expected_payload, _payload) - called['notified'] = True - - self.stubs.Set(self.serializer.notifier, 'error', fake_error) - - # expected and actually sent bytes differ - glance.api.common.image_send_notification(17, 19, image_meta, req, - self.serializer.notifier) - - self.assertTrue(called['notified']) - - def test_redact_location(self): - """Ensure location redaction does not change original metadata""" - image_meta = {'size': 3, 'id': '123', 'location': 'http://localhost'} - redacted_image_meta = {'size': 3, 'id': '123'} - copy_image_meta = copy.deepcopy(image_meta) - tmp_image_meta = glance.api.v1.images.redact_loc(image_meta) - - self.assertEqual(image_meta, copy_image_meta) - self.assertEqual(redacted_image_meta, tmp_image_meta) - - def test_noop_redact_location(self): - """Check no-op location redaction does not change original metadata""" - image_meta = {'size': 3, 'id': '123'} - redacted_image_meta = {'size': 3, 'id': '123'} - copy_image_meta = copy.deepcopy(image_meta) - tmp_image_meta = glance.api.v1.images.redact_loc(image_meta) - - self.assertEqual(image_meta, copy_image_meta) - self.assertEqual(redacted_image_meta, tmp_image_meta) - self.assertEqual(redacted_image_meta, image_meta) - - -class TestFilterValidator(base.IsolatedUnitTest): - def test_filter_validator(self): - self.assertFalse(glance.api.v1.filters.validate('size_max', -1)) - self.assertTrue(glance.api.v1.filters.validate('size_max', 1)) - self.assertTrue(glance.api.v1.filters.validate('protected', 'True')) - self.assertTrue(glance.api.v1.filters.validate('protected', 'FALSE')) - self.assertFalse(glance.api.v1.filters.validate('protected', '-1')) - - -class TestAPIProtectedProps(base.IsolatedUnitTest): - def setUp(self): - """Establish a clean test environment""" - super(TestAPIProtectedProps, self).setUp() - self.mapper = routes.Mapper() - # turn on property protections - self.set_property_protections() - self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper)) - db_api.get_engine() - db_models.unregister_models(db_api.get_engine()) - db_models.register_models(db_api.get_engine()) - - def tearDown(self): - """Clear the test environment""" - super(TestAPIProtectedProps, self).tearDown() - self.destroy_fixtures() - - def destroy_fixtures(self): - # Easiest to just drop the models and re-create them... - db_models.unregister_models(db_api.get_engine()) - db_models.register_models(db_api.get_engine()) - - def _create_admin_image(self, props=None): - if props is None: - props = {} - request = unit_test_utils.get_fake_request(path='/images') - headers = {'x-image-meta-disk-format': 'ami', - 'x-image-meta-container-format': 'ami', - 'x-image-meta-name': 'foo', - 'x-image-meta-size': '0', - 'x-auth-token': 'user:tenant:admin'} - headers.update(props) - for k, v in six.iteritems(headers): - request.headers[k] = v - created_image = request.get_response(self.api) - res_body = jsonutils.loads(created_image.body)['image'] - image_id = res_body['id'] - return image_id - - def test_prop_protection_with_create_and_permitted_role(self): - """ - As admin role, create an image and verify permitted role 'member' can - create a protected property - """ - image_id = self._create_admin_image() - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:member', - 'x-image-meta-property-x_owner_foo': 'bar'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual('bar', res_body['properties']['x_owner_foo']) - - def test_prop_protection_with_permitted_policy_config(self): - """ - As admin role, create an image and verify permitted role 'member' can - create a protected property - """ - self.set_property_protections(use_policies=True) - image_id = self._create_admin_image() - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:admin', - 'x-image-meta-property-spl_create_prop_policy': 'bar'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual('bar', - res_body['properties']['spl_create_prop_policy']) - - def test_prop_protection_with_create_and_unpermitted_role(self): - """ - As admin role, create an image and verify unpermitted role - 'fake_member' can *not* create a protected property - """ - image_id = self._create_admin_image() - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:fake_member', - 'x-image-meta-property-x_owner_foo': 'bar'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - another_request.get_response(self.api) - output = another_request.get_response(self.api) - self.assertEqual(webob.exc.HTTPForbidden.code, output.status_int) - self.assertIn("Property '%s' is protected" % - "x_owner_foo", output.body.decode()) - - def test_prop_protection_with_show_and_permitted_role(self): - """ - As admin role, create an image with a protected property, and verify - permitted role 'member' can read that protected property via HEAD - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - method='HEAD', path='/images/%s' % image_id) - headers = {'x-auth-token': 'user:tenant:member'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - res2 = another_request.get_response(self.api) - self.assertEqual('bar', - res2.headers['x-image-meta-property-x_owner_foo']) - - def test_prop_protection_with_show_and_unpermitted_role(self): - """ - As admin role, create an image with a protected property, and verify - permitted role 'fake_role' can *not* read that protected property via - HEAD - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - method='HEAD', path='/images/%s' % image_id) - headers = {'x-auth-token': 'user:tenant:fake_role'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - self.assertEqual(b'', output.body) - self.assertNotIn('x-image-meta-property-x_owner_foo', output.headers) - - def test_prop_protection_with_get_and_permitted_role(self): - """ - As admin role, create an image with a protected property, and verify - permitted role 'member' can read that protected property via GET - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - method='GET', path='/images/%s' % image_id) - headers = {'x-auth-token': 'user:tenant:member'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - res2 = another_request.get_response(self.api) - self.assertEqual('bar', - res2.headers['x-image-meta-property-x_owner_foo']) - - def test_prop_protection_with_get_and_unpermitted_role(self): - """ - As admin role, create an image with a protected property, and verify - permitted role 'fake_role' can *not* read that protected property via - GET - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - method='GET', path='/images/%s' % image_id) - headers = {'x-auth-token': 'user:tenant:fake_role'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - self.assertEqual(b'', output.body) - self.assertNotIn('x-image-meta-property-x_owner_foo', output.headers) - - def test_prop_protection_with_detail_and_permitted_role(self): - """ - As admin role, create an image with a protected property, and verify - permitted role 'member' can read that protected property via - /images/detail - """ - self._create_admin_image({'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - method='GET', path='/images/detail') - headers = {'x-auth-token': 'user:tenant:member'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - res_body = jsonutils.loads(output.body)['images'][0] - self.assertEqual('bar', res_body['properties']['x_owner_foo']) - - def test_prop_protection_with_detail_and_permitted_policy(self): - """ - As admin role, create an image with a protected property, and verify - permitted role 'member' can read that protected property via - /images/detail - """ - self.set_property_protections(use_policies=True) - self._create_admin_image({'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - method='GET', path='/images/detail') - headers = {'x-auth-token': 'user:tenant:member'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - res_body = jsonutils.loads(output.body)['images'][0] - self.assertEqual('bar', res_body['properties']['x_owner_foo']) - - def test_prop_protection_with_detail_and_unpermitted_role(self): - """ - As admin role, create an image with a protected property, and verify - permitted role 'fake_role' can *not* read that protected property via - /images/detail - """ - self._create_admin_image({'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - method='GET', path='/images/detail') - headers = {'x-auth-token': 'user:tenant:fake_role'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - res_body = jsonutils.loads(output.body)['images'][0] - self.assertNotIn('x-image-meta-property-x_owner_foo', - res_body['properties']) - - def test_prop_protection_with_detail_and_unpermitted_policy(self): - """ - As admin role, create an image with a protected property, and verify - permitted role 'fake_role' can *not* read that protected property via - /images/detail - """ - self.set_property_protections(use_policies=True) - self._create_admin_image({'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - method='GET', path='/images/detail') - headers = {'x-auth-token': 'user:tenant:fake_role'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - res_body = jsonutils.loads(output.body)['images'][0] - self.assertNotIn('x-image-meta-property-x_owner_foo', - res_body['properties']) - - def test_prop_protection_with_update_and_permitted_role(self): - """ - As admin role, create an image with protected property, and verify - permitted role 'member' can update that protected property - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:member', - 'x-image-meta-property-x_owner_foo': 'baz'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual('baz', res_body['properties']['x_owner_foo']) - - def test_prop_protection_with_update_and_permitted_policy(self): - """ - As admin role, create an image with protected property, and verify - permitted role 'admin' can update that protected property - """ - self.set_property_protections(use_policies=True) - image_id = self._create_admin_image( - {'x-image-meta-property-spl_default_policy': 'bar'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:admin', - 'x-image-meta-property-spl_default_policy': 'baz'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual('baz', res_body['properties']['spl_default_policy']) - - def test_prop_protection_with_update_and_unpermitted_role(self): - """ - As admin role, create an image with protected property, and verify - unpermitted role 'fake_role' can *not* update that protected property - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:fake_role', - 'x-image-meta-property-x_owner_foo': 'baz'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(webob.exc.HTTPForbidden.code, output.status_int) - self.assertIn("Property '%s' is protected" % - "x_owner_foo", output.body.decode()) - - def test_prop_protection_with_update_and_unpermitted_policy(self): - """ - As admin role, create an image with protected property, and verify - unpermitted role 'fake_role' can *not* update that protected property - """ - self.set_property_protections(use_policies=True) - image_id = self._create_admin_image( - {'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:fake_role', - 'x-image-meta-property-x_owner_foo': 'baz'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(webob.exc.HTTPForbidden.code, output.status_int) - self.assertIn("Property '%s' is protected" % - "x_owner_foo", output.body.decode()) - - def test_prop_protection_update_without_read(self): - """ - Test protected property cannot be updated without read permission - """ - image_id = self._create_admin_image( - {'x-image-meta-property-spl_update_only_prop': 'foo'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:spl_role', - 'x-image-meta-property-spl_update_only_prop': 'bar'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(webob.exc.HTTPForbidden.code, output.status_int) - self.assertIn("Property '%s' is protected" % - "spl_update_only_prop", output.body.decode()) - - def test_prop_protection_update_noop(self): - """ - Test protected property update is allowed as long as the user has read - access and the value is unchanged - """ - image_id = self._create_admin_image( - {'x-image-meta-property-spl_read_prop': 'foo'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:spl_role', - 'x-image-meta-property-spl_read_prop': 'foo'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual('foo', res_body['properties']['spl_read_prop']) - self.assertEqual(http_client.OK, output.status_int) - - def test_prop_protection_with_delete_and_permitted_role(self): - """ - As admin role, create an image with protected property, and verify - permitted role 'member' can can delete that protected property - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:member', - 'X-Glance-Registry-Purge-Props': 'True'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual({}, res_body['properties']) - - def test_prop_protection_with_delete_and_permitted_policy(self): - """ - As admin role, create an image with protected property, and verify - permitted role 'member' can can delete that protected property - """ - self.set_property_protections(use_policies=True) - image_id = self._create_admin_image( - {'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:member', - 'X-Glance-Registry-Purge-Props': 'True'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual({}, res_body['properties']) - - def test_prop_protection_with_delete_and_unpermitted_read(self): - """ - Test protected property cannot be deleted without read permission - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_owner_foo': 'bar'}) - - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:fake_role', - 'X-Glance-Registry-Purge-Props': 'True'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - self.assertNotIn('x-image-meta-property-x_owner_foo', output.headers) - - another_request = unit_test_utils.get_fake_request( - method='HEAD', path='/images/%s' % image_id) - headers = {'x-auth-token': 'user:tenant:admin'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - self.assertEqual(b'', output.body) - self.assertEqual('bar', - output.headers['x-image-meta-property-x_owner_foo']) - - def test_prop_protection_with_delete_and_unpermitted_delete(self): - """ - Test protected property cannot be deleted without delete permission - """ - image_id = self._create_admin_image( - {'x-image-meta-property-spl_update_prop': 'foo'}) - - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:spl_role', - 'X-Glance-Registry-Purge-Props': 'True'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, output.status_int) - self.assertIn("Property '%s' is protected" % - "spl_update_prop", output.body.decode()) - - another_request = unit_test_utils.get_fake_request( - method='HEAD', path='/images/%s' % image_id) - headers = {'x-auth-token': 'user:tenant:admin'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - self.assertEqual(b'', output.body) - self.assertEqual( - 'foo', output.headers['x-image-meta-property-spl_update_prop']) - - def test_read_protected_props_leak_with_update(self): - """ - Verify when updating props that ones we don't have read permission for - are not disclosed - """ - image_id = self._create_admin_image( - {'x-image-meta-property-spl_update_prop': '0', - 'x-image-meta-property-foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:spl_role', - 'x-image-meta-property-spl_update_prop': '1', - 'X-Glance-Registry-Purge-Props': 'False'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual('1', res_body['properties']['spl_update_prop']) - self.assertNotIn('foo', res_body['properties']) - - def test_update_protected_props_mix_no_read(self): - """ - Create an image with two props - one only readable by admin, and one - readable/updatable by member. Verify member can successfully update - their property while the admin owned one is ignored transparently - """ - image_id = self._create_admin_image( - {'x-image-meta-property-admin_foo': 'bar', - 'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:member', - 'x-image-meta-property-x_owner_foo': 'baz'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual('baz', res_body['properties']['x_owner_foo']) - self.assertNotIn('admin_foo', res_body['properties']) - - def test_update_protected_props_mix_read(self): - """ - Create an image with two props - one readable/updatable by admin, but - also readable by spl_role. The other is readable/updatable by - spl_role. Verify spl_role can successfully update their property but - not the admin owned one - """ - custom_props = { - 'x-image-meta-property-spl_read_only_prop': '1', - 'x-image-meta-property-spl_update_prop': '2' - } - image_id = self._create_admin_image(custom_props) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - - # verify spl_role can update it's prop - headers = {'x-auth-token': 'user:tenant:spl_role', - 'x-image-meta-property-spl_read_only_prop': '1', - 'x-image-meta-property-spl_update_prop': '1'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual(http_client.OK, output.status_int) - self.assertEqual('1', res_body['properties']['spl_read_only_prop']) - self.assertEqual('1', res_body['properties']['spl_update_prop']) - - # verify spl_role can not update admin controlled prop - headers = {'x-auth-token': 'user:tenant:spl_role', - 'x-image-meta-property-spl_read_only_prop': '2', - 'x-image-meta-property-spl_update_prop': '1'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, output.status_int) - - def test_delete_protected_props_mix_no_read(self): - """ - Create an image with two props - one only readable by admin, and one - readable/deletable by member. Verify member can successfully delete - their property while the admin owned one is ignored transparently - """ - image_id = self._create_admin_image( - {'x-image-meta-property-admin_foo': 'bar', - 'x-image-meta-property-x_owner_foo': 'bar'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:member', - 'X-Glance-Registry-Purge-Props': 'True'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertNotIn('x_owner_foo', res_body['properties']) - self.assertNotIn('admin_foo', res_body['properties']) - - def test_delete_protected_props_mix_read(self): - """ - Create an image with two props - one readable/deletable by admin, but - also readable by spl_role. The other is readable/deletable by - spl_role. Verify spl_role is forbidden to purge_props in this scenario - without retaining the readable prop. - """ - custom_props = { - 'x-image-meta-property-spl_read_only_prop': '1', - 'x-image-meta-property-spl_delete_prop': '2' - } - image_id = self._create_admin_image(custom_props) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:spl_role', - 'X-Glance-Registry-Purge-Props': 'True'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, output.status_int) - - def test_create_protected_prop_check_case_insensitive(self): - """ - Verify that role check is case-insensitive i.e. the property - marked with role Member is creatable by the member role - """ - image_id = self._create_admin_image() - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:member', - 'x-image-meta-property-x_case_insensitive': '1'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual('1', res_body['properties']['x_case_insensitive']) - - def test_read_protected_prop_check_case_insensitive(self): - """ - Verify that role check is case-insensitive i.e. the property - marked with role Member is readable by the member role - """ - custom_props = { - 'x-image-meta-property-x_case_insensitive': '1' - } - image_id = self._create_admin_image(custom_props) - another_request = unit_test_utils.get_fake_request( - method='HEAD', path='/images/%s' % image_id) - headers = {'x-auth-token': 'user:tenant:member'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - self.assertEqual(b'', output.body) - self.assertEqual( - '1', output.headers['x-image-meta-property-x_case_insensitive']) - - def test_update_protected_props_check_case_insensitive(self): - """ - Verify that role check is case-insensitive i.e. the property - marked with role Member is updatable by the member role - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_case_insensitive': '1'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:member', - 'x-image-meta-property-x_case_insensitive': '2'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual('2', res_body['properties']['x_case_insensitive']) - - def test_delete_protected_props_check_case_insensitive(self): - """ - Verify that role check is case-insensitive i.e. the property - marked with role Member is deletable by the member role - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_case_insensitive': '1'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:member', - 'X-Glance-Registry-Purge-Props': 'True'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual({}, res_body['properties']) - - def test_create_non_protected_prop(self): - """ - Verify property marked with special char '@' is creatable by an unknown - role - """ - image_id = self._create_admin_image() - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:joe_soap', - 'x-image-meta-property-x_all_permitted': '1'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual('1', res_body['properties']['x_all_permitted']) - - def test_read_non_protected_prop(self): - """ - Verify property marked with special char '@' is readable by an unknown - role - """ - custom_props = { - 'x-image-meta-property-x_all_permitted': '1' - } - image_id = self._create_admin_image(custom_props) - another_request = unit_test_utils.get_fake_request( - method='HEAD', path='/images/%s' % image_id) - headers = {'x-auth-token': 'user:tenant:joe_soap'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - self.assertEqual(b'', output.body) - self.assertEqual( - '1', output.headers['x-image-meta-property-x_all_permitted']) - - def test_update_non_protected_prop(self): - """ - Verify property marked with special char '@' is updatable by an unknown - role - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_all_permitted': '1'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:joe_soap', - 'x-image-meta-property-x_all_permitted': '2'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual('2', res_body['properties']['x_all_permitted']) - - def test_delete_non_protected_prop(self): - """ - Verify property marked with special char '@' is deletable by an unknown - role - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_all_permitted': '1'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:joe_soap', - 'X-Glance-Registry-Purge-Props': 'True'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual({}, res_body['properties']) - - def test_create_locked_down_protected_prop(self): - """ - Verify a property protected by special char '!' is creatable by no one - """ - image_id = self._create_admin_image() - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:member', - 'x-image-meta-property-x_none_permitted': '1'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, output.status_int) - # also check admin can not create - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:admin', - 'x-image-meta-property-x_none_permitted_admin': '1'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, output.status_int) - - def test_read_locked_down_protected_prop(self): - """ - Verify a property protected by special char '!' is readable by no one - """ - custom_props = { - 'x-image-meta-property-x_none_read': '1' - } - image_id = self._create_admin_image(custom_props) - another_request = unit_test_utils.get_fake_request( - method='HEAD', path='/images/%s' % image_id) - headers = {'x-auth-token': 'user:tenant:member'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - self.assertNotIn('x_none_read', output.headers) - # also check admin can not read - another_request = unit_test_utils.get_fake_request( - method='HEAD', path='/images/%s' % image_id) - headers = {'x-auth-token': 'user:tenant:admin'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.OK, output.status_int) - self.assertNotIn('x_none_read', output.headers) - - def test_update_locked_down_protected_prop(self): - """ - Verify a property protected by special char '!' is updatable by no one - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_none_update': '1'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:member', - 'x-image-meta-property-x_none_update': '2'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, output.status_int) - # also check admin can't update property - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:admin', - 'x-image-meta-property-x_none_update': '2'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, output.status_int) - - def test_delete_locked_down_protected_prop(self): - """ - Verify a property protected by special char '!' is deletable by no one - """ - image_id = self._create_admin_image( - {'x-image-meta-property-x_none_delete': '1'}) - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:member', - 'X-Glance-Registry-Purge-Props': 'True'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, output.status_int) - # also check admin can't delete - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:admin', - 'X-Glance-Registry-Purge-Props': 'True'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - output = another_request.get_response(self.api) - self.assertEqual(http_client.FORBIDDEN, output.status_int) - - -class TestAPIPropertyQuotas(base.IsolatedUnitTest): - def setUp(self): - """Establish a clean test environment""" - super(TestAPIPropertyQuotas, self).setUp() - self.mapper = routes.Mapper() - self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper)) - db_api.get_engine() - db_models.unregister_models(db_api.get_engine()) - db_models.register_models(db_api.get_engine()) - - def _create_admin_image(self, props=None): - if props is None: - props = {} - request = unit_test_utils.get_fake_request(path='/images') - headers = {'x-image-meta-disk-format': 'ami', - 'x-image-meta-container-format': 'ami', - 'x-image-meta-name': 'foo', - 'x-image-meta-size': '0', - 'x-auth-token': 'user:tenant:admin'} - headers.update(props) - for k, v in six.iteritems(headers): - request.headers[k] = v - created_image = request.get_response(self.api) - res_body = jsonutils.loads(created_image.body)['image'] - image_id = res_body['id'] - return image_id - - def test_update_image_with_too_many_properties(self): - """ - Ensure that updating image properties enforces the quota. - """ - self.config(image_property_quota=1) - image_id = self._create_admin_image() - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:joe_soap', - 'x-image-meta-property-x_all_permitted': '1', - 'x-image-meta-property-x_all_permitted_foo': '2'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - - output = another_request.get_response(self.api) - - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, - output.status_int) - self.assertIn("Attempted: 2, Maximum: 1", output.text) - - def test_update_image_with_too_many_properties_without_purge_props(self): - """ - Ensure that updating image properties counts existing image properties - when enforcing property quota. - """ - self.config(image_property_quota=1) - request = unit_test_utils.get_fake_request(path='/images') - headers = {'x-image-meta-disk-format': 'ami', - 'x-image-meta-container-format': 'ami', - 'x-image-meta-name': 'foo', - 'x-image-meta-size': '0', - 'x-image-meta-property-x_all_permitted_create': '1', - 'x-auth-token': 'user:tenant:admin'} - for k, v in six.iteritems(headers): - request.headers[k] = v - created_image = request.get_response(self.api) - res_body = jsonutils.loads(created_image.body)['image'] - image_id = res_body['id'] - - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:joe_soap', - 'x-glance-registry-purge-props': 'False', - 'x-image-meta-property-x_all_permitted': '1'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - - output = another_request.get_response(self.api) - - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, - output.status_int) - self.assertIn("Attempted: 2, Maximum: 1", output.text) - - def test_update_properties_without_purge_props_overwrite_value(self): - """ - Ensure that updating image properties does not count against image - property quota. - """ - self.config(image_property_quota=2) - request = unit_test_utils.get_fake_request(path='/images') - headers = {'x-image-meta-disk-format': 'ami', - 'x-image-meta-container-format': 'ami', - 'x-image-meta-name': 'foo', - 'x-image-meta-size': '0', - 'x-image-meta-property-x_all_permitted_create': '1', - 'x-auth-token': 'user:tenant:admin'} - for k, v in six.iteritems(headers): - request.headers[k] = v - created_image = request.get_response(self.api) - res_body = jsonutils.loads(created_image.body)['image'] - image_id = res_body['id'] - - another_request = unit_test_utils.get_fake_request( - path='/images/%s' % image_id, method='PUT') - headers = {'x-auth-token': 'user:tenant:joe_soap', - 'x-glance-registry-purge-props': 'False', - 'x-image-meta-property-x_all_permitted_create': '3', - 'x-image-meta-property-x_all_permitted': '1'} - for k, v in six.iteritems(headers): - another_request.headers[k] = v - - output = another_request.get_response(self.api) - - self.assertEqual(http_client.OK, output.status_int) - res_body = jsonutils.loads(output.body)['image'] - self.assertEqual('1', res_body['properties']['x_all_permitted']) - self.assertEqual('3', res_body['properties']['x_all_permitted_create']) diff --git a/glance/tests/unit/v1/test_registry_api.py b/glance/tests/unit/v1/test_registry_api.py deleted file mode 100644 index 3461617d..00000000 --- a/glance/tests/unit/v1/test_registry_api.py +++ /dev/null @@ -1,2162 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -import mock -from oslo_serialization import jsonutils -import routes -import six -from six.moves import http_client as http -import webob - -import glance.api.common -import glance.common.config -from glance.common import crypt -from glance.common import timeutils -from glance import context -from glance.db.sqlalchemy import api as db_api -from glance.db.sqlalchemy import models as db_models -from glance.registry.api import v1 as rserver -from glance.tests.unit import base -from glance.tests import utils as test_utils - -_gen_uuid = lambda: str(uuid.uuid4()) - -UUID1 = _gen_uuid() -UUID2 = _gen_uuid() - - -class TestRegistryAPI(base.IsolatedUnitTest, test_utils.RegistryAPIMixIn): - def setUp(self): - """Establish a clean test environment""" - super(TestRegistryAPI, self).setUp() - self.mapper = routes.Mapper() - self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), - is_admin=True) - - def _get_extra_fixture(id, name, **kwargs): - return self.get_extra_fixture( - id, name, - locations=[{'url': "file:///%s/%s" % (self.test_dir, id), - 'metadata': {}, 'status': 'active'}], **kwargs) - - self.FIXTURES = [ - _get_extra_fixture(UUID1, 'fake image #1', is_public=False, - disk_format='ami', container_format='ami', - min_disk=0, min_ram=0, owner=123, - size=13, properties={'type': 'kernel'}), - _get_extra_fixture(UUID2, 'fake image #2', - min_disk=5, min_ram=256, - size=19, properties={})] - self.context = context.RequestContext(is_admin=True) - db_api.get_engine() - self.destroy_fixtures() - self.create_fixtures() - - def tearDown(self): - """Clear the test environment""" - super(TestRegistryAPI, self).tearDown() - self.destroy_fixtures() - - def test_show(self): - """ - Tests that the /images/ registry API endpoint - returns the expected image - """ - fixture = {'id': UUID2, - 'name': 'fake image #2', - 'size': 19, - 'min_ram': 256, - 'min_disk': 5, - 'checksum': None} - res = self.get_api_response_ext(http.OK, '/images/%s' % UUID2) - res_dict = jsonutils.loads(res.body) - image = res_dict['image'] - for k, v in six.iteritems(fixture): - self.assertEqual(v, image[k]) - - def test_show_unknown(self): - """ - Tests that the /images/ registry API endpoint - returns a 404 for an unknown image id - """ - self.get_api_response_ext(http.NOT_FOUND, '/images/%s' % _gen_uuid()) - - def test_show_invalid(self): - """ - Tests that the /images/ registry API endpoint - returns a 404 for an invalid (therefore unknown) image id - """ - self.get_api_response_ext(http.NOT_FOUND, '/images/%s' % _gen_uuid()) - - def test_show_deleted_image_as_admin(self): - """ - Tests that the /images/ registry API endpoint - returns a 200 for deleted image to admin user. - """ - # Delete image #2 - self.get_api_response_ext(http.OK, '/images/%s' % UUID2, - method='DELETE') - - self.get_api_response_ext(http.OK, '/images/%s' % UUID2) - - def test_show_deleted_image_as_nonadmin(self): - """ - Tests that the /images/ registry API endpoint - returns a 404 for deleted image to non-admin user. - """ - # Delete image #2 - self.get_api_response_ext(http.OK, '/images/%s' % UUID2, - method='DELETE') - - api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), - is_admin=False) - self.get_api_response_ext(http.NOT_FOUND, '/images/%s' % UUID2, - api=api) - - def test_show_private_image_with_no_admin_user(self): - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, size=18, owner='test user', - is_public=False) - db_api.image_create(self.context, extra_fixture) - test_rserv = rserver.API(self.mapper) - api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) - self.get_api_response_ext(http.NOT_FOUND, '/images/%s' % UUID4, - api=api) - - def test_get_root(self): - """ - Tests that the root registry API returns "index", - which is a list of public images - """ - fixture = {'id': UUID2, 'size': 19, 'checksum': None} - res = self.get_api_response_ext(http.OK, url='/') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(1, len(images)) - - for k, v in six.iteritems(fixture): - self.assertEqual(v, images[0][k]) - - def test_get_index(self): - """ - Tests that the /images registry API returns list of - public images - """ - fixture = {'id': UUID2, 'size': 19, 'checksum': None} - res = self.get_api_response_ext(http.OK) - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(1, len(images)) - - for k, v in six.iteritems(fixture): - self.assertEqual(v, images[0][k]) - - def test_get_index_marker(self): - """ - Tests that the /images registry API returns list of - public images that conforms to a marker query param - """ - time1 = timeutils.utcnow() + datetime.timedelta(seconds=5) - time2 = timeutils.utcnow() + datetime.timedelta(seconds=4) - time3 = timeutils.utcnow() - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, size=19, created_at=time1) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, created_at=time2) - - db_api.image_create(self.context, extra_fixture) - - UUID5 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID5, created_at=time3) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, - url='/images?marker=%s' % UUID4) - self.assertEqualImages(res, (UUID5, UUID2)) - - def test_get_index_unknown_marker(self): - """ - Tests that the /images registry API returns a 400 - when an unknown marker is provided - """ - self.get_api_response_ext(http.BAD_REQUEST, - url='/images?marker=%s' % _gen_uuid()) - - def test_get_index_malformed_marker(self): - """ - Tests that the /images registry API returns a 400 - when a malformed marker is provided - """ - res = self.get_api_response_ext(http.BAD_REQUEST, - url='/images?marker=4') - self.assertIn(b'marker', res.body) - - def test_get_index_forbidden_marker(self): - """ - Tests that the /images registry API returns a 400 - when a forbidden marker is provided - """ - test_rserv = rserver.API(self.mapper) - api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) - self.get_api_response_ext(http.BAD_REQUEST, - url='/images?marker=%s' % UUID1, api=api) - - def test_get_index_limit(self): - """ - Tests that the /images registry API returns list of - public images that conforms to a limit query param - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, size=19) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, url='/images?limit=1') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(1, len(images)) - - # expect list to be sorted by created_at desc - self.assertEqual(UUID4, images[0]['id']) - - def test_get_index_limit_negative(self): - """ - Tests that the /images registry API returns list of - public images that conforms to a limit query param - """ - self.get_api_response_ext(http.BAD_REQUEST, url='/images?limit=-1') - - def test_get_index_limit_non_int(self): - """ - Tests that the /images registry API returns list of - public images that conforms to a limit query param - """ - self.get_api_response_ext(http.BAD_REQUEST, url='/images?limit=a') - - def test_get_index_limit_marker(self): - """ - Tests that the /images registry API returns list of - public images that conforms to limit and marker query params - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, size=19) - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid()) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext( - http.OK, url='/images?marker=%s&limit=1' % UUID3) - self.assertEqualImages(res, (UUID2,)) - - def test_get_index_filter_on_user_defined_properties(self): - """ - Tests that /images registry API returns list of public images based - a filter on user-defined properties. - """ - image1_id = _gen_uuid() - properties = {'distro': 'ubuntu', 'arch': 'i386'} - extra_fixture = self.get_fixture(id=image1_id, name='image-extra-1', - properties=properties) - db_api.image_create(self.context, extra_fixture) - - image2_id = _gen_uuid() - properties = {'distro': 'ubuntu', 'arch': 'x86_64', 'foo': 'bar'} - extra_fixture = self.get_fixture(id=image2_id, name='image-extra-2', - properties=properties) - db_api.image_create(self.context, extra_fixture) - - # Test index with filter containing one user-defined property. - # Filter is 'property-distro=ubuntu'. - # Verify both image1 and image2 are returned - res = self.get_api_response_ext(http.OK, url='/images?' - 'property-distro=ubuntu') - images = jsonutils.loads(res.body)['images'] - self.assertEqual(2, len(images)) - self.assertEqual(image2_id, images[0]['id']) - self.assertEqual(image1_id, images[1]['id']) - - # Test index with filter containing one user-defined property but - # non-existent value. Filter is 'property-distro=fedora'. - # Verify neither images are returned - res = self.get_api_response_ext(http.OK, url='/images?' - 'property-distro=fedora') - images = jsonutils.loads(res.body)['images'] - self.assertEqual(0, len(images)) - - # Test index with filter containing one user-defined property but - # unique value. Filter is 'property-arch=i386'. - # Verify only image1 is returned. - res = self.get_api_response_ext(http.OK, url='/images?' - 'property-arch=i386') - images = jsonutils.loads(res.body)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image1_id, images[0]['id']) - - # Test index with filter containing one user-defined property but - # unique value. Filter is 'property-arch=x86_64'. - # Verify only image1 is returned. - res = self.get_api_response_ext(http.OK, url='/images?' - 'property-arch=x86_64') - images = jsonutils.loads(res.body)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image2_id, images[0]['id']) - - # Test index with filter containing unique user-defined property. - # Filter is 'property-foo=bar'. - # Verify only image2 is returned. - res = self.get_api_response_ext(http.OK, - url='/images?property-foo=bar') - images = jsonutils.loads(res.body)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image2_id, images[0]['id']) - - # Test index with filter containing unique user-defined property but - # .value is non-existent. Filter is 'property-foo=baz'. - # Verify neither images are returned. - res = self.get_api_response_ext(http.OK, - url='/images?property-foo=baz') - images = jsonutils.loads(res.body)['images'] - self.assertEqual(0, len(images)) - - # Test index with filter containing multiple user-defined properties - # Filter is 'property-arch=x86_64&property-distro=ubuntu'. - # Verify only image2 is returned. - res = self.get_api_response_ext(http.OK, url='/images?' - 'property-arch=x86_64&' - 'property-distro=ubuntu') - images = jsonutils.loads(res.body)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image2_id, images[0]['id']) - - # Test index with filter containing multiple user-defined properties - # Filter is 'property-arch=i386&property-distro=ubuntu'. - # Verify only image1 is returned. - res = self.get_api_response_ext(http.OK, - url='/images?property-arch=i386&' - 'property-distro=ubuntu') - images = jsonutils.loads(res.body)['images'] - self.assertEqual(1, len(images)) - self.assertEqual(image1_id, images[0]['id']) - - # Test index with filter containing multiple user-defined properties. - # Filter is 'property-arch=random&property-distro=ubuntu'. - # Verify neither images are returned. - res = self.get_api_response_ext(http.OK, url='/images?' - 'property-arch=random&' - 'property-distro=ubuntu') - images = jsonutils.loads(res.body)['images'] - self.assertEqual(0, len(images)) - - # Test index with filter containing multiple user-defined properties. - # Filter is 'property-arch=random&property-distro=random'. - # Verify neither images are returned. - res = self.get_api_response_ext(http.OK, url='/images?' - 'property-arch=random&' - 'property-distro=random') - images = jsonutils.loads(res.body)['images'] - self.assertEqual(0, len(images)) - - # Test index with filter containing multiple user-defined properties. - # Filter is 'property-boo=far&property-poo=far'. - # Verify neither images are returned. - res = self.get_api_response_ext(http.OK, - url='/images?property-boo=far&' - 'property-poo=far') - images = jsonutils.loads(res.body)['images'] - self.assertEqual(0, len(images)) - - # Test index with filter containing multiple user-defined properties. - # Filter is 'property-foo=bar&property-poo=far'. - # Verify neither images are returned. - res = self.get_api_response_ext(http.OK, - url='/images?property-foo=bar&' - 'property-poo=far') - images = jsonutils.loads(res.body)['images'] - self.assertEqual(0, len(images)) - - def test_get_index_filter_name(self): - """ - Tests that the /images registry API returns list of - public images that have a specific name. This is really a sanity - check, filtering is tested more in-depth using /images/detail - """ - - extra_fixture = self.get_fixture(id=_gen_uuid(), - name='new name! #123', size=19) - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123') - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, - url='/images?name=new name! #123') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(2, len(images)) - - for image in images: - self.assertEqual('new name! #123', image['name']) - - def test_get_index_sort_default_created_at_desc(self): - """ - Tests that the /images registry API returns list of - public images that conforms to a default sort key/dir - """ - time1 = timeutils.utcnow() + datetime.timedelta(seconds=5) - time2 = timeutils.utcnow() + datetime.timedelta(seconds=4) - time3 = timeutils.utcnow() - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, size=19, created_at=time1) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, created_at=time2) - - db_api.image_create(self.context, extra_fixture) - - UUID5 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID5, created_at=time3) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, url='/images') - self.assertEqualImages(res, (UUID3, UUID4, UUID5, UUID2)) - - def test_get_index_bad_sort_key(self): - """Ensure a 400 is returned when a bad sort_key is provided.""" - self.get_api_response_ext(http.BAD_REQUEST, - url='/images?sort_key=asdf') - - def test_get_index_bad_sort_dir(self): - """Ensure a 400 is returned when a bad sort_dir is provided.""" - self.get_api_response_ext(http.BAD_REQUEST, - url='/images?sort_dir=asdf') - - def test_get_index_null_name(self): - """Check 200 is returned when sort_key is null name - - Check 200 is returned when sort_key is name and name is null - for specified marker - """ - UUID6 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID6, name=None) - - db_api.image_create(self.context, extra_fixture) - self.get_api_response_ext( - http.OK, url='/images?sort_key=name&marker=%s' % UUID6) - - def test_get_index_null_disk_format(self): - """Check 200 is returned when sort_key is null disk_format - - Check 200 is returned when sort_key is disk_format and - disk_format is null for specified marker - """ - UUID6 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID6, disk_format=None, size=19) - - db_api.image_create(self.context, extra_fixture) - self.get_api_response_ext( - http.OK, url='/images?sort_key=disk_format&marker=%s' % UUID6) - - def test_get_index_null_container_format(self): - """Check 200 is returned when sort_key is null container_format - - Check 200 is returned when sort_key is container_format and - container_format is null for specified marker - """ - UUID6 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID6, container_format=None) - - db_api.image_create(self.context, extra_fixture) - self.get_api_response_ext( - http.OK, url='/images?sort_key=container_format&marker=%s' % UUID6) - - def test_get_index_sort_name_asc(self): - """ - Tests that the /images registry API returns list of - public images sorted alphabetically by name in - ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', size=19) - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='xyz') - - db_api.image_create(self.context, extra_fixture) - - url = '/images?sort_key=name&sort_dir=asc' - res = self.get_api_response_ext(http.OK, url=url) - self.assertEqualImages(res, (UUID3, UUID2, UUID4)) - - def test_get_index_sort_status_desc(self): - """ - Tests that the /images registry API returns list of - public images sorted alphabetically by status in - descending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, status='queued', size=19) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, url=( - '/images?sort_key=status&sort_dir=desc')) - self.assertEqualImages(res, (UUID3, UUID4, UUID2)) - - def test_get_index_sort_disk_format_asc(self): - """ - Tests that the /images registry API returns list of - public images sorted alphabetically by disk_format in - ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, disk_format='ami', - container_format='ami', size=19) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, disk_format='vdi') - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, url=( - '/images?sort_key=disk_format&sort_dir=asc')) - self.assertEqualImages(res, (UUID3, UUID4, UUID2)) - - def test_get_index_sort_container_format_desc(self): - """ - Tests that the /images registry API returns list of - public images sorted alphabetically by container_format in - descending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, size=19, disk_format='ami', - container_format='ami') - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, disk_format='iso', - container_format='bare') - - db_api.image_create(self.context, extra_fixture) - - url = '/images?sort_key=container_format&sort_dir=desc' - res = self.get_api_response_ext(http.OK, url=url) - self.assertEqualImages(res, (UUID2, UUID4, UUID3)) - - def test_get_index_sort_size_asc(self): - """ - Tests that the /images registry API returns list of - public images sorted by size in ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, disk_format='ami', - container_format='ami', size=100) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, disk_format='iso', - container_format='bare', size=2) - - db_api.image_create(self.context, extra_fixture) - - url = '/images?sort_key=size&sort_dir=asc' - res = self.get_api_response_ext(http.OK, url=url) - self.assertEqualImages(res, (UUID4, UUID2, UUID3)) - - def test_get_index_sort_created_at_asc(self): - """ - Tests that the /images registry API returns list of - public images sorted by created_at in ascending order. - """ - now = timeutils.utcnow() - time1 = now + datetime.timedelta(seconds=5) - time2 = now - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, created_at=time1, size=19) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, created_at=time2) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, url=( - '/images?sort_key=created_at&sort_dir=asc')) - self.assertEqualImages(res, (UUID2, UUID4, UUID3)) - - def test_get_index_sort_updated_at_desc(self): - """ - Tests that the /images registry API returns list of - public images sorted by updated_at in descending order. - """ - now = timeutils.utcnow() - time1 = now + datetime.timedelta(seconds=5) - time2 = now - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, size=19, created_at=None, - updated_at=time1) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, created_at=None, - updated_at=time2) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, url=( - '/images?sort_key=updated_at&sort_dir=desc')) - self.assertEqualImages(res, (UUID3, UUID4, UUID2)) - - def test_get_details(self): - """ - Tests that the /images/detail registry API returns - a mapping containing a list of detailed image information - """ - fixture = {'id': UUID2, - 'name': 'fake image #2', - 'is_public': True, - 'size': 19, - 'min_disk': 5, - 'min_ram': 256, - 'checksum': None, - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'status': 'active'} - - res = self.get_api_response_ext(http.OK, url='/images/detail') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(1, len(images)) - - for k, v in six.iteritems(fixture): - self.assertEqual(v, images[0][k]) - - def test_get_details_limit_marker(self): - """ - Tests that the /images/details registry API returns list of - public images that conforms to limit and marker query params. - This functionality is tested more thoroughly on /images, this is - just a sanity check - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, size=20) - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid()) - - db_api.image_create(self.context, extra_fixture) - - url = '/images/detail?marker=%s&limit=1' % UUID3 - res = self.get_api_response_ext(http.OK, url=url) - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(1, len(images)) - - # expect list to be sorted by created_at desc - self.assertEqual(UUID2, images[0]['id']) - - def test_get_details_invalid_marker(self): - """ - Tests that the /images/detail registry API returns a 400 - when an invalid marker is provided - """ - url = '/images/detail?marker=%s' % _gen_uuid() - self.get_api_response_ext(http.BAD_REQUEST, url=url) - - def test_get_details_malformed_marker(self): - """ - Tests that the /images/detail registry API returns a 400 - when a malformed marker is provided - """ - res = self.get_api_response_ext(http.BAD_REQUEST, - url='/images/detail?marker=4') - self.assertIn(b'marker', res.body) - - def test_get_details_forbidden_marker(self): - """ - Tests that the /images/detail registry API returns a 400 - when a forbidden marker is provided - """ - test_rserv = rserver.API(self.mapper) - api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) - self.get_api_response_ext(http.BAD_REQUEST, api=api, - url='/images/detail?marker=%s' % UUID1) - - def test_get_details_filter_name(self): - """ - Tests that the /images/detail registry API returns list of - public images that have a specific name - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), - name='new name! #123', size=20) - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), - name='new name! #123') - - db_api.image_create(self.context, extra_fixture) - - url = '/images/detail?name=new name! #123' - res = self.get_api_response_ext(http.OK, url=url) - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(2, len(images)) - - for image in images: - self.assertEqual('new name! #123', image['name']) - - def test_get_details_filter_status(self): - """ - Tests that the /images/detail registry API returns list of - public images that have a specific status - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), size=19, - status='active') - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, - url='/images/detail?status=saving') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(1, len(images)) - - for image in images: - self.assertEqual('saving', image['status']) - - def test_get_details_filter_container_format(self): - """ - Tests that the /images/detail registry API returns list of - public images that have a specific container_format - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='vdi', - size=19) - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', - container_format='ami', size=19) - - db_api.image_create(self.context, extra_fixture) - - url = '/images/detail?container_format=ovf' - res = self.get_api_response_ext(http.OK, url=url) - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(2, len(images)) - - for image in images: - self.assertEqual('ovf', image['container_format']) - - def test_get_details_filter_min_disk(self): - """ - Tests that the /images/detail registry API returns list of - public images that have a specific min_disk - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), min_disk=7, size=19) - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', - container_format='ami', size=19) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, - url='/images/detail?min_disk=7') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(1, len(images)) - - for image in images: - self.assertEqual(7, image['min_disk']) - - def test_get_details_filter_min_ram(self): - """ - Tests that the /images/detail registry API returns list of - public images that have a specific min_ram - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), min_ram=514, size=19) - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', - container_format='ami', size=19) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, - url='/images/detail?min_ram=514') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(1, len(images)) - - for image in images: - self.assertEqual(514, image['min_ram']) - - def test_get_details_filter_disk_format(self): - """ - Tests that the /images/detail registry API returns list of - public images that have a specific disk_format - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), size=19) - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', - container_format='ami', size=19) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, - url='/images/detail?disk_format=vhd') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(2, len(images)) - - for image in images: - self.assertEqual('vhd', image['disk_format']) - - def test_get_details_filter_size_min(self): - """ - Tests that the /images/detail registry API returns list of - public images that have a size greater than or equal to size_min - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), size=18) - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', - container_format='ami') - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, - url='/images/detail?size_min=19') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(2, len(images)) - - for image in images: - self.assertGreaterEqual(image['size'], 19) - - def test_get_details_filter_size_max(self): - """ - Tests that the /images/detail registry API returns list of - public images that have a size less than or equal to size_max - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), size=18) - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', - container_format='ami') - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, - url='/images/detail?size_max=19') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(2, len(images)) - - for image in images: - self.assertLessEqual(image['size'], 19) - - def test_get_details_filter_size_min_max(self): - """ - Tests that the /images/detail registry API returns list of - public images that have a size less than or equal to size_max - and greater than or equal to size_min - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), size=18) - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', - container_format='ami') - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), size=6) - - db_api.image_create(self.context, extra_fixture) - - url = '/images/detail?size_min=18&size_max=19' - res = self.get_api_response_ext(http.OK, url=url) - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(2, len(images)) - - for image in images: - self.assertTrue(18 <= image['size'] <= 19) - - def test_get_details_filter_changes_since(self): - """ - Tests that the /images/detail registry API returns list of - images that changed since the time defined by changes-since - """ - dt1 = timeutils.utcnow() - datetime.timedelta(1) - iso1 = timeutils.isotime(dt1) - - date_only1 = dt1.strftime('%Y-%m-%d') - date_only2 = dt1.strftime('%Y%m%d') - date_only3 = dt1.strftime('%Y-%m%d') - - dt2 = timeutils.utcnow() + datetime.timedelta(1) - iso2 = timeutils.isotime(dt2) - - image_ts = timeutils.utcnow() + datetime.timedelta(2) - hour_before = image_ts.strftime('%Y-%m-%dT%H:%M:%S%%2B01:00') - hour_after = image_ts.strftime('%Y-%m-%dT%H:%M:%S-01:00') - - dt4 = timeutils.utcnow() + datetime.timedelta(3) - iso4 = timeutils.isotime(dt4) - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, size=18) - - db_api.image_create(self.context, extra_fixture) - db_api.image_destroy(self.context, UUID3) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, - disk_format='ami', - container_format='ami', - created_at=image_ts, - updated_at=image_ts) - - db_api.image_create(self.context, extra_fixture) - - # Check a standard list, 4 images in db (2 deleted) - res = self.get_api_response_ext(http.OK, url='/images/detail') - self.assertEqualImages(res, (UUID4, UUID2)) - - # Expect 3 images (1 deleted) - res = self.get_api_response_ext(http.OK, url=( - '/images/detail?changes-since=%s' % iso1)) - self.assertEqualImages(res, (UUID4, UUID3, UUID2)) - - # Expect 1 images (0 deleted) - res = self.get_api_response_ext(http.OK, url=( - '/images/detail?changes-since=%s' % iso2)) - self.assertEqualImages(res, (UUID4,)) - - # Expect 1 images (0 deleted) - res = self.get_api_response_ext(http.OK, url=( - '/images/detail?changes-since=%s' % hour_before)) - self.assertEqualImages(res, (UUID4,)) - - # Expect 0 images (0 deleted) - res = self.get_api_response_ext(http.OK, url=( - '/images/detail?changes-since=%s' % hour_after)) - self.assertEqualImages(res, ()) - - # Expect 0 images (0 deleted) - res = self.get_api_response_ext(http.OK, url=( - '/images/detail?changes-since=%s' % iso4)) - self.assertEqualImages(res, ()) - - for param in [date_only1, date_only2, date_only3]: - # Expect 3 images (1 deleted) - res = self.get_api_response_ext(http.OK, url=( - '/images/detail?changes-since=%s' % param)) - self.assertEqualImages(res, (UUID4, UUID3, UUID2)) - - # Bad request (empty changes-since param) - self.get_api_response_ext(http.BAD_REQUEST, - url='/images/detail?changes-since=') - - def test_get_details_filter_property(self): - """ - Tests that the /images/detail registry API returns list of - public images that have a specific custom property - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), size=19, - properties={'prop_123': 'v a'}) - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), size=19, - disk_format='ami', - container_format='ami', - properties={'prop_123': 'v b'}) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, url=( - '/images/detail?property-prop_123=v%20a')) - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(1, len(images)) - - for image in images: - self.assertEqual('v a', image['properties']['prop_123']) - - def test_get_details_filter_public_none(self): - """ - Tests that the /images/detail registry API returns list of - all images if is_public none is passed - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), - is_public=False, size=18) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, - url='/images/detail?is_public=None') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(3, len(images)) - - def test_get_details_filter_public_false(self): - """ - Tests that the /images/detail registry API returns list of - private images if is_public false is passed - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), - is_public=False, size=18) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, - url='/images/detail?is_public=False') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(2, len(images)) - - for image in images: - self.assertEqual(False, image['is_public']) - - def test_get_details_filter_public_true(self): - """ - Tests that the /images/detail registry API returns list of - public images if is_public true is passed (same as default) - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), - is_public=False, size=18) - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, - url='/images/detail?is_public=True') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(1, len(images)) - - for image in images: - self.assertTrue(image['is_public']) - - def test_get_details_filter_public_string_format(self): - """ - Tests that the /images/detail registry - API returns 400 Bad error for filter is_public with wrong format - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), - is_public='true', size=18) - - db_api.image_create(self.context, extra_fixture) - - self.get_api_response_ext(http.BAD_REQUEST, - url='/images/detail?is_public=public') - - def test_get_details_filter_deleted_false(self): - """ - Test that the /images/detail registry - API return list of images with deleted filter = false - - """ - extra_fixture = {'id': _gen_uuid(), - 'status': 'active', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'test deleted filter 1', - 'size': 18, - 'deleted': False, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, - url='/images/detail?deleted=False') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - - for image in images: - self.assertFalse(image['deleted']) - - def test_get_filter_no_public_with_no_admin(self): - """ - Tests that the /images/detail registry API returns list of - public images if is_public true is passed (same as default) - """ - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, - is_public=False, size=18) - - db_api.image_create(self.context, extra_fixture) - test_rserv = rserver.API(self.mapper) - api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) - res = self.get_api_response_ext(http.OK, api=api, - url='/images/detail?is_public=False') - res_dict = jsonutils.loads(res.body) - - images = res_dict['images'] - self.assertEqual(1, len(images)) - # Check that for non admin user only is_public = True images returns - for image in images: - self.assertTrue(image['is_public']) - - def test_get_filter_protected_with_None_value(self): - """ - Tests that the /images/detail registry API returns 400 error - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), size=18, - protected="False") - - db_api.image_create(self.context, extra_fixture) - self.get_api_response_ext(http.BAD_REQUEST, - url='/images/detail?protected=') - - def test_get_filter_protected_with_True_value(self): - """ - Tests that the /images/detail registry API returns 400 error - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), - size=18, protected="True") - - db_api.image_create(self.context, extra_fixture) - self.get_api_response_ext(http.OK, url='/images/detail?protected=True') - - def test_get_details_sort_name_asc(self): - """ - Tests that the /images/details registry API returns list of - public images sorted alphabetically by name in - ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', size=19) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='xyz') - - db_api.image_create(self.context, extra_fixture) - - res = self.get_api_response_ext(http.OK, url=( - '/images/detail?sort_key=name&sort_dir=asc')) - self.assertEqualImages(res, (UUID3, UUID2, UUID4)) - - def test_create_image(self): - """Tests that the /images POST registry API creates the image""" - - fixture = self.get_minimal_fixture(is_public=True) - body = jsonutils.dump_as_bytes(dict(image=fixture)) - - res = self.get_api_response_ext(http.OK, body=body, - method='POST', content_type='json') - res_dict = jsonutils.loads(res.body) - - for k, v in six.iteritems(fixture): - self.assertEqual(v, res_dict['image'][k]) - - # Test status was updated properly - self.assertEqual('active', res_dict['image']['status']) - - def test_create_image_with_min_disk(self): - """Tests that the /images POST registry API creates the image""" - fixture = self.get_minimal_fixture(min_disk=5) - body = jsonutils.dump_as_bytes(dict(image=fixture)) - - res = self.get_api_response_ext(http.OK, body=body, - method='POST', content_type='json') - res_dict = jsonutils.loads(res.body) - - self.assertEqual(5, res_dict['image']['min_disk']) - - def test_create_image_with_min_ram(self): - """Tests that the /images POST registry API creates the image""" - fixture = self.get_minimal_fixture(min_ram=256) - body = jsonutils.dump_as_bytes(dict(image=fixture)) - - res = self.get_api_response_ext(http.OK, body=body, - method='POST', content_type='json') - res_dict = jsonutils.loads(res.body) - - self.assertEqual(256, res_dict['image']['min_ram']) - - def test_create_image_with_min_ram_default(self): - """Tests that the /images POST registry API creates the image""" - fixture = self.get_minimal_fixture() - body = jsonutils.dump_as_bytes(dict(image=fixture)) - - res = self.get_api_response_ext(http.OK, body=body, - method='POST', content_type='json') - res_dict = jsonutils.loads(res.body) - - self.assertEqual(0, res_dict['image']['min_ram']) - - def test_create_image_with_min_disk_default(self): - """Tests that the /images POST registry API creates the image""" - fixture = self.get_minimal_fixture() - body = jsonutils.dump_as_bytes(dict(image=fixture)) - - res = self.get_api_response_ext(http.OK, body=body, - method='POST', content_type='json') - res_dict = jsonutils.loads(res.body) - - self.assertEqual(0, res_dict['image']['min_disk']) - - def test_create_image_with_bad_status(self): - """Tests proper exception is raised if a bad status is set""" - fixture = self.get_minimal_fixture(id=_gen_uuid(), status='bad status') - body = jsonutils.dump_as_bytes(dict(image=fixture)) - - res = self.get_api_response_ext(http.BAD_REQUEST, body=body, - method='POST', content_type='json') - self.assertIn(b'Invalid image status', res.body) - - def test_create_image_with_bad_id(self): - """Tests proper exception is raised if a bad disk_format is set""" - fixture = self.get_minimal_fixture(id='asdf') - - body = jsonutils.dump_as_bytes(dict(image=fixture)) - self.get_api_response_ext(http.BAD_REQUEST, content_type='json', - method='POST', body=body) - - def test_create_image_with_image_id_in_log(self): - """Tests correct image id in log message when creating image""" - fixture = self.get_minimal_fixture( - id='0564c64c-3545-4e34-abfb-9d18e5f2f2f9') - self.log_image_id = False - - def fake_log_info(msg, image_data): - if ('0564c64c-3545-4e34-abfb-9d18e5f2f2f9' == image_data['id'] and - 'Successfully created image' in msg): - self.log_image_id = True - - self.stubs.Set(rserver.images.LOG, 'info', fake_log_info) - - body = jsonutils.dump_as_bytes(dict(image=fixture)) - self.get_api_response_ext(http.OK, content_type='json', method='POST', - body=body) - self.assertTrue(self.log_image_id) - - def test_update_image(self): - """Tests that the /images PUT registry API updates the image""" - fixture = {'name': 'fake public image #2', - 'min_disk': 5, - 'min_ram': 256, - 'disk_format': 'raw'} - body = jsonutils.dump_as_bytes(dict(image=fixture)) - - res = self.get_api_response_ext(http.OK, url='/images/%s' % UUID2, - body=body, method='PUT', - content_type='json') - - res_dict = jsonutils.loads(res.body) - - self.assertNotEqual(res_dict['image']['created_at'], - res_dict['image']['updated_at']) - - for k, v in six.iteritems(fixture): - self.assertEqual(v, res_dict['image'][k]) - - @mock.patch.object(rserver.images.LOG, 'debug') - def test_update_image_not_log_sensitive_info(self, log_debug): - """ - Tests that there is no any sensitive info of image location - was logged in glance during the image update operation. - """ - - def fake_log_debug(fmt_str, image_meta): - self.assertNotIn("'locations'", fmt_str % image_meta) - - fixture = {'name': 'fake public image #2', - 'min_disk': 5, - 'min_ram': 256, - 'disk_format': 'raw', - 'location': 'fake://image'} - body = jsonutils.dump_as_bytes(dict(image=fixture)) - - log_debug.side_effect = fake_log_debug - - res = self.get_api_response_ext(http.OK, url='/images/%s' % UUID2, - body=body, method='PUT', - content_type='json') - - res_dict = jsonutils.loads(res.body) - - self.assertNotEqual(res_dict['image']['created_at'], - res_dict['image']['updated_at']) - - for k, v in six.iteritems(fixture): - self.assertEqual(v, res_dict['image'][k]) - - def test_update_image_not_existing(self): - """ - Tests proper exception is raised if attempt to update - non-existing image - """ - fixture = {'status': 'killed'} - body = jsonutils.dump_as_bytes(dict(image=fixture)) - - self.get_api_response_ext(http.NOT_FOUND, - url='/images/%s' % _gen_uuid(), - method='PUT', body=body, content_type='json') - - def test_update_image_with_bad_status(self): - """Tests that exception raised trying to set a bad status""" - fixture = {'status': 'invalid'} - body = jsonutils.dump_as_bytes(dict(image=fixture)) - - res = self.get_api_response_ext(http.BAD_REQUEST, method='PUT', - body=body, - url='/images/%s' % UUID2, - content_type='json') - self.assertIn(b'Invalid image status', res.body) - - def test_update_private_image_no_admin(self): - """ - Tests proper exception is raised if attempt to update - private image with non admin user, that not belongs to it - """ - UUID8 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID8, size=19, is_public=False, - protected=True, owner='test user') - - db_api.image_create(self.context, extra_fixture) - test_rserv = rserver.API(self.mapper) - api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) - body = jsonutils.dump_as_bytes(dict(image=extra_fixture)) - self.get_api_response_ext(http.NOT_FOUND, body=body, api=api, - url='/images/%s' % UUID8, method='PUT', - content_type='json') - - def test_delete_image(self): - """Tests that the /images DELETE registry API deletes the image""" - - # Grab the original number of images - res = self.get_api_response_ext(http.OK) - res_dict = jsonutils.loads(res.body) - - orig_num_images = len(res_dict['images']) - - # Delete image #2 - self.get_api_response_ext(http.OK, url='/images/%s' % UUID2, - method='DELETE') - - # Verify one less image - res = self.get_api_response_ext(http.OK) - res_dict = jsonutils.loads(res.body) - - new_num_images = len(res_dict['images']) - self.assertEqual(orig_num_images - 1, new_num_images) - - def test_delete_image_response(self): - """Tests that the registry API delete returns the image metadata""" - - image = self.FIXTURES[0] - res = self.get_api_response_ext(http.OK, - url='/images/%s' % image['id'], - method='DELETE') - deleted_image = jsonutils.loads(res.body)['image'] - - self.assertEqual(image['id'], deleted_image['id']) - self.assertTrue(deleted_image['deleted']) - self.assertTrue(deleted_image['deleted_at']) - - def test_delete_image_not_existing(self): - """ - Tests proper exception is raised if attempt to delete - non-existing image - """ - self.get_api_response_ext(http.NOT_FOUND, - url='/images/%s' % _gen_uuid(), - method='DELETE') - - def test_delete_public_image_no_admin(self): - """ - Tests proper exception is raised if attempt to delete - public image with non admin user - """ - UUID8 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID8, size=19, protected=True, - owner='test user') - - db_api.image_create(self.context, extra_fixture) - test_rserv = rserver.API(self.mapper) - api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) - self.get_api_response_ext(http.FORBIDDEN, url='/images/%s' % UUID8, - method='DELETE', api=api) - - def test_delete_private_image_no_admin(self): - """ - Tests proper exception is raised if attempt to delete - private image with non admin user, that not belongs to it - """ - UUID8 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID8, is_public=False, size=19, - protected=True, owner='test user') - - db_api.image_create(self.context, extra_fixture) - test_rserv = rserver.API(self.mapper) - api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) - self.get_api_response_ext(http.NOT_FOUND, url='/images/%s' % UUID8, - method='DELETE', api=api) - - def test_get_image_members(self): - """ - Tests members listing for existing images - """ - res = self.get_api_response_ext(http.OK, - url='/images/%s/members' % UUID2, - method='GET') - - memb_list = jsonutils.loads(res.body) - num_members = len(memb_list['members']) - self.assertEqual(0, num_members) - - def test_get_image_members_not_existing(self): - """ - Tests proper exception is raised if attempt to get members of - non-existing image - """ - self.get_api_response_ext(http.NOT_FOUND, method='GET', - url='/images/%s/members' % _gen_uuid()) - - def test_get_image_members_forbidden(self): - """ - Tests proper exception is raised if attempt to get members of - non-existing image - - """ - UUID8 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID8, is_public=False, size=19, - protected=True, owner='test user') - - db_api.image_create(self.context, extra_fixture) - test_rserv = rserver.API(self.mapper) - api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) - self.get_api_response_ext(http.NOT_FOUND, - url='/images/%s/members' % UUID8, - method='GET', api=api) - - def test_get_member_images(self): - """ - Tests image listing for members - """ - res = self.get_api_response_ext(http.OK, - url='/shared-images/pattieblack', - method='GET') - - memb_list = jsonutils.loads(res.body) - num_members = len(memb_list['shared_images']) - self.assertEqual(0, num_members) - - def test_replace_members(self): - """ - Tests replacing image members raises right exception - """ - self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), - is_admin=False) - fixture = dict(member_id='pattieblack') - body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) - - self.get_api_response_ext(http.UNAUTHORIZED, method='PUT', body=body, - url='/images/%s/members' % UUID2, - content_type='json') - - def test_update_all_image_members_non_existing_image_id(self): - """ - Test update image members raises right exception - """ - # Update all image members - fixture = dict(member_id='test1') - req = webob.Request.blank('/images/%s/members' % _gen_uuid()) - req.method = 'PUT' - self.context.tenant = 'test2' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) - res = req.get_response(self.api) - self.assertEqual(http.NOT_FOUND, res.status_int) - - def test_update_all_image_members_invalid_membership_association(self): - """ - Test update image members raises right exception - """ - UUID8 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, - owner='test user') - - db_api.image_create(self.context, extra_fixture) - - # Add several members to image - req = webob.Request.blank('/images/%s/members/test1' % UUID8) - req.method = 'PUT' - res = req.get_response(self.api) - # Get all image members: - res = self.get_api_response_ext(http.OK, - url='/images/%s/members' % UUID8, - method='GET') - - memb_list = jsonutils.loads(res.body) - num_members = len(memb_list['members']) - self.assertEqual(1, num_members) - - fixture = dict(member_id='test1') - body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) - self.get_api_response_ext(http.BAD_REQUEST, - url='/images/%s/members' % UUID8, - method='PUT', body=body, - content_type='json') - - def test_update_all_image_members_non_shared_image_forbidden(self): - """ - Test update image members raises right exception - """ - test_rserv = rserver.API(self.mapper) - api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) - UUID9 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID9, size=19, protected=False) - - db_api.image_create(self.context, extra_fixture) - fixture = dict(member_id='test1') - req = webob.Request.blank('/images/%s/members' % UUID9) - req.headers['X-Auth-Token'] = 'test1:test1:' - req.method = 'PUT' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) - - res = req.get_response(api) - self.assertEqual(http.FORBIDDEN, res.status_int) - - def test_update_all_image_members(self): - """ - Test update non existing image members - """ - UUID8 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, - owner='test user') - - db_api.image_create(self.context, extra_fixture) - - # Add several members to image - req = webob.Request.blank('/images/%s/members/test1' % UUID8) - req.method = 'PUT' - req.get_response(self.api) - - fixture = [dict(member_id='test2', can_share=True)] - body = jsonutils.dump_as_bytes(dict(memberships=fixture)) - self.get_api_response_ext(http.NO_CONTENT, - url='/images/%s/members' % UUID8, - method='PUT', body=body, - content_type='json') - - def test_update_all_image_members_bad_request(self): - """ - Test that right exception is raises - in case if wrong memberships association is supplied - """ - UUID8 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, - owner='test user') - - db_api.image_create(self.context, extra_fixture) - - # Add several members to image - req = webob.Request.blank('/images/%s/members/test1' % UUID8) - req.method = 'PUT' - req.get_response(self.api) - fixture = dict(member_id='test3') - body = jsonutils.dump_as_bytes(dict(memberships=fixture)) - self.get_api_response_ext(http.BAD_REQUEST, - url='/images/%s/members' % UUID8, - method='PUT', body=body, - content_type='json') - - def test_update_all_image_existing_members(self): - """ - Test update existing image members - """ - UUID8 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, - owner='test user') - - db_api.image_create(self.context, extra_fixture) - - # Add several members to image - req = webob.Request.blank('/images/%s/members/test1' % UUID8) - req.method = 'PUT' - req.get_response(self.api) - - fixture = [dict(member_id='test1', can_share=False)] - body = jsonutils.dump_as_bytes(dict(memberships=fixture)) - self.get_api_response_ext(http.NO_CONTENT, - url='/images/%s/members' % UUID8, - method='PUT', body=body, - content_type='json') - - def test_update_all_image_existing_deleted_members(self): - """ - Test update existing image members - """ - UUID8 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, - owner='test user') - - db_api.image_create(self.context, extra_fixture) - - # Add a new member to an image - req = webob.Request.blank('/images/%s/members/test1' % UUID8) - req.method = 'PUT' - req.get_response(self.api) - - # Delete the existing member - self.get_api_response_ext(http.NO_CONTENT, method='DELETE', - url='/images/%s/members/test1' % UUID8) - - # Re-add the deleted member by replacing membership list - fixture = [dict(member_id='test1', can_share=False)] - body = jsonutils.dump_as_bytes(dict(memberships=fixture)) - self.get_api_response_ext(http.NO_CONTENT, - url='/images/%s/members' % UUID8, - method='PUT', body=body, - content_type='json') - memb_list = db_api.image_member_find(self.context, image_id=UUID8) - self.assertEqual(1, len(memb_list)) - - def test_add_member(self): - """ - Tests adding image members raises right exception - """ - self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), - is_admin=False) - self.get_api_response_ext(http.UNAUTHORIZED, method='PUT', - url=('/images/%s/members/pattieblack' % - UUID2)) - - def test_add_member_to_image_positive(self): - """ - Test check that member can be successfully added - """ - UUID8 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, - owner='test user') - - db_api.image_create(self.context, extra_fixture) - fixture = dict(can_share=True) - test_uri = '/images/%s/members/test_add_member_positive' - body = jsonutils.dump_as_bytes(dict(member=fixture)) - self.get_api_response_ext(http.NO_CONTENT, url=test_uri % UUID8, - method='PUT', body=body, - content_type='json') - - def test_add_member_to_non_exist_image(self): - """ - Test check that member can't be added for - non exist image - """ - fixture = dict(can_share=True) - test_uri = '/images/%s/members/test_add_member_positive' - body = jsonutils.dump_as_bytes(dict(member=fixture)) - self.get_api_response_ext(http.NOT_FOUND, url=test_uri % _gen_uuid(), - method='PUT', body=body, - content_type='json') - - def test_add_image_member_non_shared_image_forbidden(self): - """ - Test update image members raises right exception - """ - test_rserver_api = rserver.API(self.mapper) - api = test_utils.FakeAuthMiddleware( - test_rserver_api, is_admin=False) - UUID9 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID9, size=19, protected=False) - db_api.image_create(self.context, extra_fixture) - fixture = dict(can_share=True) - test_uri = '/images/%s/members/test_add_member_to_non_share_image' - req = webob.Request.blank(test_uri % UUID9) - req.headers['X-Auth-Token'] = 'test1:test1:' - req.method = 'PUT' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(member=fixture)) - - res = req.get_response(api) - self.assertEqual(http.FORBIDDEN, res.status_int) - - def test_add_member_to_image_bad_request(self): - """ - Test check right status code is returned - """ - UUID8 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, - owner='test user') - - db_api.image_create(self.context, extra_fixture) - - fixture = [dict(can_share=True)] - test_uri = '/images/%s/members/test_add_member_bad_request' - body = jsonutils.dump_as_bytes(dict(member=fixture)) - self.get_api_response_ext(http.BAD_REQUEST, url=test_uri % UUID8, - method='PUT', body=body, - content_type='json') - - def test_delete_member(self): - """ - Tests deleting image members raises right exception - """ - self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), - is_admin=False) - self.get_api_response_ext(http.UNAUTHORIZED, method='DELETE', - url=('/images/%s/members/pattieblack' % - UUID2)) - - def test_delete_member_invalid(self): - """ - Tests deleting a invalid/non existing member raises right exception - """ - self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), - is_admin=True) - res = self.get_api_response_ext( - http.NOT_FOUND, method='DELETE', - url=('/images/%s/members/pattieblack' % UUID2)) - self.assertIn(b'Membership could not be found', res.body) - - def test_delete_member_from_non_exist_image(self): - """ - Tests deleting image members raises right exception - """ - test_rserver_api = rserver.API(self.mapper) - self.api = test_utils.FakeAuthMiddleware( - test_rserver_api, is_admin=True) - test_uri = '/images/%s/members/pattieblack' - self.get_api_response_ext(http.NOT_FOUND, method='DELETE', - url=test_uri % _gen_uuid()) - - def test_delete_image_member_non_shared_image_forbidden(self): - """ - Test delete image members raises right exception - """ - test_rserver_api = rserver.API(self.mapper) - api = test_utils.FakeAuthMiddleware( - test_rserver_api, is_admin=False) - UUID9 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID9, size=19, protected=False) - - db_api.image_create(self.context, extra_fixture) - test_uri = '/images/%s/members/test_add_member_to_non_share_image' - req = webob.Request.blank(test_uri % UUID9) - req.headers['X-Auth-Token'] = 'test1:test1:' - req.method = 'DELETE' - req.content_type = 'application/json' - - res = req.get_response(api) - self.assertEqual(http.FORBIDDEN, res.status_int) - - def test_add_member_delete_create(self): - """ - Test check that the same member can be successfully added after delete - it, and the same record will be reused for the same membership. - """ - # add a member - UUID8 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, - owner='test user') - - db_api.image_create(self.context, extra_fixture) - fixture = dict(can_share=True) - test_uri = '/images/%s/members/test_add_member_delete_create' - body = jsonutils.dump_as_bytes(dict(member=fixture)) - self.get_api_response_ext(http.NO_CONTENT, url=test_uri % UUID8, - method='PUT', body=body, - content_type='json') - memb_list = db_api.image_member_find(self.context, image_id=UUID8) - self.assertEqual(1, len(memb_list)) - memb_list2 = db_api.image_member_find(self.context, - image_id=UUID8, - include_deleted=True) - self.assertEqual(1, len(memb_list2)) - # delete the member - self.get_api_response_ext(http.NO_CONTENT, method='DELETE', - url=test_uri % UUID8) - memb_list = db_api.image_member_find(self.context, image_id=UUID8) - self.assertEqual(0, len(memb_list)) - memb_list2 = db_api.image_member_find(self.context, - image_id=UUID8, - include_deleted=True) - self.assertEqual(1, len(memb_list2)) - # create it again - self.get_api_response_ext(http.NO_CONTENT, url=test_uri % UUID8, - method='PUT', body=body, - content_type='json') - memb_list = db_api.image_member_find(self.context, image_id=UUID8) - self.assertEqual(1, len(memb_list)) - memb_list2 = db_api.image_member_find(self.context, - image_id=UUID8, - include_deleted=True) - self.assertEqual(1, len(memb_list2)) - - def test_get_on_image_member(self): - """ - Test GET on image members raises 404 and produces correct Allow headers - """ - self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), - is_admin=False) - uri = '/images/%s/members/123' % UUID1 - req = webob.Request.blank(uri) - req.method = 'GET' - res = req.get_response(self.api) - self.assertEqual(http.METHOD_NOT_ALLOWED, res.status_int) - self.assertIn(('Allow', 'PUT, DELETE'), res.headerlist) - - def test_get_images_bad_urls(self): - """Check that routes collections are not on (LP bug 1185828)""" - self.get_api_response_ext(http.NOT_FOUND, url='/images/detail.xxx') - - self.get_api_response_ext(http.NOT_FOUND, url='/images.xxx') - - self.get_api_response_ext(http.NOT_FOUND, url='/images/new') - - self.get_api_response_ext(http.OK, url='/images/%s/members' % UUID1) - - self.get_api_response_ext(http.NOT_FOUND, - url='/images/%s/members.xxx' % UUID1) - - -class TestRegistryAPILocations(base.IsolatedUnitTest, - test_utils.RegistryAPIMixIn): - def setUp(self): - """Establish a clean test environment""" - super(TestRegistryAPILocations, self).setUp() - self.mapper = routes.Mapper() - self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), - is_admin=True) - - def _get_extra_fixture(id, name, **kwargs): - return self.get_extra_fixture( - id, name, - locations=[{'url': "file:///%s/%s" % (self.test_dir, id), - 'metadata': {}, 'status': 'active'}], **kwargs) - - self.FIXTURES = [ - _get_extra_fixture(UUID1, 'fake image #1', is_public=False, - disk_format='ami', container_format='ami', - min_disk=0, min_ram=0, owner=123, - size=13, properties={'type': 'kernel'}), - _get_extra_fixture(UUID2, 'fake image #2', - min_disk=5, min_ram=256, - size=19, properties={})] - self.context = context.RequestContext(is_admin=True) - db_api.get_engine() - self.destroy_fixtures() - self.create_fixtures() - - def tearDown(self): - """Clear the test environment""" - super(TestRegistryAPILocations, self).tearDown() - self.destroy_fixtures() - - def test_show_from_locations(self): - req = webob.Request.blank('/images/%s' % UUID1) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body) - image = res_dict['image'] - self.assertIn('id', image['location_data'][0]) - image['location_data'][0].pop('id') - self.assertEqual(self.FIXTURES[0]['locations'][0], - image['location_data'][0]) - self.assertEqual(self.FIXTURES[0]['locations'][0]['url'], - image['location_data'][0]['url']) - self.assertEqual(self.FIXTURES[0]['locations'][0]['metadata'], - image['location_data'][0]['metadata']) - - def test_show_from_location_data(self): - req = webob.Request.blank('/images/%s' % UUID2) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body) - image = res_dict['image'] - self.assertIn('id', image['location_data'][0]) - image['location_data'][0].pop('id') - self.assertEqual(self.FIXTURES[1]['locations'][0], - image['location_data'][0]) - self.assertEqual(self.FIXTURES[1]['locations'][0]['url'], - image['location_data'][0]['url']) - self.assertEqual(self.FIXTURES[1]['locations'][0]['metadata'], - image['location_data'][0]['metadata']) - - def test_create_from_location_data_with_encryption(self): - encryption_key = '1234567890123456' - location_url1 = "file:///%s/%s" % (self.test_dir, _gen_uuid()) - location_url2 = "file:///%s/%s" % (self.test_dir, _gen_uuid()) - encrypted_location_url1 = crypt.urlsafe_encrypt(encryption_key, - location_url1, 64) - encrypted_location_url2 = crypt.urlsafe_encrypt(encryption_key, - location_url2, 64) - fixture = {'name': 'fake image #3', - 'status': 'active', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'is_public': True, - 'checksum': None, - 'min_disk': 5, - 'min_ram': 256, - 'size': 19, - 'location': encrypted_location_url1, - 'location_data': [{'url': encrypted_location_url1, - 'metadata': {'key': 'value'}, - 'status': 'active'}, - {'url': encrypted_location_url2, - 'metadata': {'key': 'value'}, - 'status': 'active'}]} - - self.config(metadata_encryption_key=encryption_key) - req = webob.Request.blank('/images') - req.method = 'POST' - req.content_type = 'application/json' - req.body = jsonutils.dump_as_bytes(dict(image=fixture)) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body) - image = res_dict['image'] - # NOTE(zhiyan) _normalize_image_location_for_db() function will - # not re-encrypted the url within location. - self.assertEqual(fixture['location'], image['location']) - self.assertEqual(2, len(image['location_data'])) - self.assertEqual(fixture['location_data'][0]['url'], - image['location_data'][0]['url']) - self.assertEqual(fixture['location_data'][0]['metadata'], - image['location_data'][0]['metadata']) - self.assertEqual(fixture['location_data'][1]['url'], - image['location_data'][1]['url']) - self.assertEqual(fixture['location_data'][1]['metadata'], - image['location_data'][1]['metadata']) - - image_entry = db_api.image_get(self.context, image['id']) - self.assertEqual(encrypted_location_url1, - image_entry['locations'][0]['url']) - self.assertEqual(encrypted_location_url2, - image_entry['locations'][1]['url']) - decrypted_location_url1 = crypt.urlsafe_decrypt( - encryption_key, image_entry['locations'][0]['url']) - decrypted_location_url2 = crypt.urlsafe_decrypt( - encryption_key, image_entry['locations'][1]['url']) - self.assertEqual(location_url1, decrypted_location_url1) - self.assertEqual(location_url2, decrypted_location_url2) - - -class TestSharability(test_utils.BaseTestCase): - def setUp(self): - super(TestSharability, self).setUp() - self.setup_db() - self.controller = glance.registry.api.v1.members.Controller() - - def setup_db(self): - db_api.get_engine() - db_models.unregister_models(db_api.get_engine()) - db_models.register_models(db_api.get_engine()) - - def test_is_image_sharable_as_admin(self): - TENANT1 = str(uuid.uuid4()) - TENANT2 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1, - owner_is_tenant=True) - ctxt2 = context.RequestContext(is_admin=True, user=TENANT2, - auth_token='user:%s:admin' % TENANT2, - owner_is_tenant=False) - UUIDX = str(uuid.uuid4()) - # We need private image and context.owner should not match image - # owner - image = db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'is_public': False, - 'owner': TENANT1}) - - result = self.controller.is_image_sharable(ctxt2, image) - self.assertTrue(result) - - def test_is_image_sharable_owner_can_share(self): - TENANT1 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1, - owner_is_tenant=True) - UUIDX = str(uuid.uuid4()) - # We need private image and context.owner should not match image - # owner - image = db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'is_public': False, - 'owner': TENANT1}) - - result = self.controller.is_image_sharable(ctxt1, image) - self.assertTrue(result) - - def test_is_image_sharable_non_owner_cannot_share(self): - TENANT1 = str(uuid.uuid4()) - TENANT2 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1, - owner_is_tenant=True) - ctxt2 = context.RequestContext(is_admin=False, user=TENANT2, - auth_token='user:%s:user' % TENANT2, - owner_is_tenant=False) - UUIDX = str(uuid.uuid4()) - # We need private image and context.owner should not match image - # owner - image = db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'is_public': False, - 'owner': TENANT1}) - - result = self.controller.is_image_sharable(ctxt2, image) - self.assertFalse(result) - - def test_is_image_sharable_non_owner_can_share_as_image_member(self): - TENANT1 = str(uuid.uuid4()) - TENANT2 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1, - owner_is_tenant=True) - ctxt2 = context.RequestContext(is_admin=False, user=TENANT2, - auth_token='user:%s:user' % TENANT2, - owner_is_tenant=False) - UUIDX = str(uuid.uuid4()) - # We need private image and context.owner should not match image - # owner - image = db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'is_public': False, - 'owner': TENANT1}) - - membership = {'can_share': True, - 'member': TENANT2, - 'image_id': UUIDX} - - db_api.image_member_create(ctxt1, membership) - - result = self.controller.is_image_sharable(ctxt2, image) - self.assertTrue(result) - - def test_is_image_sharable_non_owner_as_image_member_without_sharing(self): - TENANT1 = str(uuid.uuid4()) - TENANT2 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1, - owner_is_tenant=True) - ctxt2 = context.RequestContext(is_admin=False, user=TENANT2, - auth_token='user:%s:user' % TENANT2, - owner_is_tenant=False) - UUIDX = str(uuid.uuid4()) - # We need private image and context.owner should not match image - # owner - image = db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'is_public': False, - 'owner': TENANT1}) - - membership = {'can_share': False, - 'member': TENANT2, - 'image_id': UUIDX} - - db_api.image_member_create(ctxt1, membership) - - result = self.controller.is_image_sharable(ctxt2, image) - self.assertFalse(result) - - def test_is_image_sharable_owner_is_none(self): - TENANT1 = str(uuid.uuid4()) - ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, - auth_token='user:%s:user' % TENANT1, - owner_is_tenant=True) - ctxt2 = context.RequestContext(is_admin=False, tenant=None, - auth_token='user:%s:user' % TENANT1, - owner_is_tenant=True) - UUIDX = str(uuid.uuid4()) - # We need private image and context.owner should not match image - # owner - image = db_api.image_create(ctxt1, {'id': UUIDX, - 'status': 'queued', - 'is_public': False, - 'owner': TENANT1}) - - result = self.controller.is_image_sharable(ctxt2, image) - self.assertFalse(result) diff --git a/glance/tests/unit/v1/test_registry_client.py b/glance/tests/unit/v1/test_registry_client.py deleted file mode 100644 index 011782cd..00000000 --- a/glance/tests/unit/v1/test_registry_client.py +++ /dev/null @@ -1,982 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import datetime -import os -import uuid - -from mock import patch -from six.moves import http_client as http -from six.moves import reload_module -import testtools - -from glance.api.v1.images import Controller as acontroller -from glance.common import client as test_client -from glance.common import config -from glance.common import exception -from glance.common import timeutils -from glance import context -from glance.db.sqlalchemy import api as db_api -from glance.registry.api.v1.images import Controller as rcontroller -import glance.registry.client.v1.api as rapi -from glance.registry.client.v1.api import client as rclient -from glance.tests.unit import base -from glance.tests import utils as test_utils -import webob - -_gen_uuid = lambda: str(uuid.uuid4()) - -UUID1 = _gen_uuid() -UUID2 = _gen_uuid() - -# NOTE(bcwaldon): needed to init config_dir cli opt -config.parse_args(args=[]) - - -class TestRegistryV1Client(base.IsolatedUnitTest, test_utils.RegistryAPIMixIn): - - """ - Test proper actions made for both valid and invalid requests - against a Registry service - """ - - def setUp(self): - """Establish a clean test environment""" - super(TestRegistryV1Client, self).setUp() - db_api.get_engine() - self.context = context.RequestContext(is_admin=True) - - self.FIXTURES = [ - self.get_fixture( - id=UUID1, name='fake image #1', is_public=False, - disk_format='ami', container_format='ami', size=13, - location="swift://user:passwd@acct/container/obj.tar.0", - properties={'type': 'kernel'}), - self.get_fixture(id=UUID2, name='fake image #2', properties={}, - size=19, location="file:///tmp/glance-tests/2")] - self.destroy_fixtures() - self.create_fixtures() - self.client = rclient.RegistryClient("0.0.0.0") - - def tearDown(self): - """Clear the test environment""" - super(TestRegistryV1Client, self).tearDown() - self.destroy_fixtures() - - def test_get_image_index(self): - """Test correct set of public image returned""" - fixture = { - 'id': UUID2, - 'name': 'fake image #2' - } - images = self.client.get_images() - self.assertEqualImages(images, (UUID2,), unjsonify=False) - - for k, v in fixture.items(): - self.assertEqual(v, images[0][k]) - - def test_create_image_with_null_min_disk_min_ram(self): - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, min_disk=None, min_ram=None) - db_api.image_create(self.context, extra_fixture) - image = self.client.get_image(UUID3) - self.assertEqual(0, image["min_ram"]) - self.assertEqual(0, image["min_disk"]) - - def test_get_index_sort_name_asc(self): - """ - Tests that the /images registry API returns list of - public images sorted alphabetically by name in - ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf') - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='xyz') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images(sort_key='name', sort_dir='asc') - - self.assertEqualImages(images, (UUID3, UUID2, UUID4), unjsonify=False) - - def test_get_index_sort_status_desc(self): - """ - Tests that the /images registry API returns list of - public images sorted alphabetically by status in - descending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', - status='queued') - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='xyz') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images(sort_key='status', sort_dir='desc') - - self.assertEqualImages(images, (UUID3, UUID4, UUID2), unjsonify=False) - - def test_get_index_sort_disk_format_asc(self): - """ - Tests that the /images registry API returns list of - public images sorted alphabetically by disk_format in - ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', - disk_format='ami', - container_format='ami') - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='xyz', - disk_format='vdi') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images(sort_key='disk_format', - sort_dir='asc') - - self.assertEqualImages(images, (UUID3, UUID4, UUID2), unjsonify=False) - - def test_get_index_sort_container_format_desc(self): - """ - Tests that the /images registry API returns list of - public images sorted alphabetically by container_format in - descending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', - disk_format='ami', - container_format='ami') - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='xyz', - disk_format='iso', - container_format='bare') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images(sort_key='container_format', - sort_dir='desc') - - self.assertEqualImages(images, (UUID2, UUID4, UUID3), unjsonify=False) - - def test_get_index_sort_size_asc(self): - """ - Tests that the /images registry API returns list of - public images sorted by size in ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', - disk_format='ami', - container_format='ami', size=100) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='asdf', - disk_format='iso', - container_format='bare', size=2) - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images(sort_key='size', sort_dir='asc') - - self.assertEqualImages(images, (UUID4, UUID2, UUID3), unjsonify=False) - - def test_get_index_sort_created_at_asc(self): - """ - Tests that the /images registry API returns list of - public images sorted by created_at in ascending order. - """ - now = timeutils.utcnow() - time1 = now + datetime.timedelta(seconds=5) - time2 = now - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, created_at=time1) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, created_at=time2) - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images(sort_key='created_at', sort_dir='asc') - - self.assertEqualImages(images, (UUID2, UUID4, UUID3), unjsonify=False) - - def test_get_index_sort_updated_at_desc(self): - """ - Tests that the /images registry API returns list of - public images sorted by updated_at in descending order. - """ - now = timeutils.utcnow() - time1 = now + datetime.timedelta(seconds=5) - time2 = now - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, created_at=None, - updated_at=time1) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, created_at=None, - updated_at=time2) - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images(sort_key='updated_at', sort_dir='desc') - - self.assertEqualImages(images, (UUID3, UUID4, UUID2), unjsonify=False) - - def test_get_image_index_marker(self): - """Test correct set of images returned with marker param.""" - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='new name! #123', - status='saving') - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='new name! #125', - status='saving') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images(marker=UUID4) - - self.assertEqualImages(images, (UUID3, UUID2), unjsonify=False) - - def test_get_image_index_invalid_marker(self): - """Test exception is raised when marker is invalid""" - self.assertRaises(exception.Invalid, - self.client.get_images, - marker=_gen_uuid()) - - def test_get_image_index_forbidden_marker(self): - """Test exception is raised when marker is forbidden""" - UUID5 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID5, owner='0123', - status='saving', is_public=False) - - db_api.image_create(self.context, extra_fixture) - - def non_admin_get_images(self, context, *args, **kwargs): - """Convert to non-admin context""" - context.is_admin = False - rcontroller.__get_images(self, context, *args, **kwargs) - - rcontroller.__get_images = rcontroller._get_images - self.stubs.Set(rcontroller, '_get_images', non_admin_get_images) - self.assertRaises(exception.Invalid, - self.client.get_images, - marker=UUID5) - - def test_get_image_index_private_marker(self): - """Test exception is not raised if private non-owned marker is used""" - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, owner='1234', - status='saving', is_public=False) - - db_api.image_create(self.context, extra_fixture) - - try: - self.client.get_images(marker=UUID4) - except Exception as e: - self.fail("Unexpected exception '%s'" % e) - - def test_get_image_index_limit(self): - """Test correct number of images returned with limit param.""" - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images(limit=2) - self.assertEqual(2, len(images)) - - def test_get_image_index_marker_limit(self): - """Test correct set of images returned with marker/limit params.""" - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='new name! #123', - status='saving') - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='new name! #125', - status='saving') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images(marker=UUID3, limit=1) - - self.assertEqualImages(images, (UUID2,), unjsonify=False) - - def test_get_image_index_limit_None(self): - """Test correct set of images returned with limit param == None.""" - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images(limit=None) - self.assertEqual(3, len(images)) - - def test_get_image_index_by_name(self): - """ - Test correct set of public, name-filtered image returned. This - is just a sanity check, we test the details call more in-depth. - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images(filters={'name': 'new name! #123'}) - self.assertEqual(1, len(images)) - - for image in images: - self.assertEqual('new name! #123', image['name']) - - def test_get_image_details(self): - """Tests that the detailed info about public images returned""" - fixture = self.get_fixture(id=UUID2, name='fake image #2', - properties={}, size=19, is_public=True) - - images = self.client.get_images_detailed() - - self.assertEqual(1, len(images)) - for k, v in fixture.items(): - self.assertEqual(v, images[0][k]) - - def test_get_image_details_marker_limit(self): - """Test correct set of images returned with marker/limit params.""" - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, status='saving') - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images_detailed(marker=UUID3, limit=1) - - self.assertEqualImages(images, (UUID2,), unjsonify=False) - - def test_get_image_details_invalid_marker(self): - """Test exception is raised when marker is invalid""" - self.assertRaises(exception.Invalid, - self.client.get_images_detailed, - marker=_gen_uuid()) - - def test_get_image_details_forbidden_marker(self): - """Test exception is raised when marker is forbidden""" - UUID5 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID5, is_public=False, - owner='0123', status='saving') - - db_api.image_create(self.context, extra_fixture) - - def non_admin_get_images(self, context, *args, **kwargs): - """Convert to non-admin context""" - context.is_admin = False - rcontroller.__get_images(self, context, *args, **kwargs) - - rcontroller.__get_images = rcontroller._get_images - self.stubs.Set(rcontroller, '_get_images', non_admin_get_images) - self.assertRaises(exception.Invalid, - self.client.get_images_detailed, - marker=UUID5) - - def test_get_image_details_private_marker(self): - """Test exception is not raised if private non-owned marker is used""" - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, is_public=False, - owner='1234', status='saving') - - db_api.image_create(self.context, extra_fixture) - - try: - self.client.get_images_detailed(marker=UUID4) - except Exception as e: - self.fail("Unexpected exception '%s'" % e) - - def test_get_image_details_by_name(self): - """Tests that a detailed call can be filtered by name""" - extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123') - - db_api.image_create(self.context, extra_fixture) - - filters = {'name': 'new name! #123'} - images = self.client.get_images_detailed(filters=filters) - - self.assertEqual(1, len(images)) - for image in images: - self.assertEqual('new name! #123', image['name']) - - def test_get_image_details_by_status(self): - """Tests that a detailed call can be filtered by status""" - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images_detailed(filters={'status': 'saving'}) - - self.assertEqual(1, len(images)) - for image in images: - self.assertEqual('saving', image['status']) - - def test_get_image_details_by_container_format(self): - """Tests that a detailed call can be filtered by container_format""" - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') - - db_api.image_create(self.context, extra_fixture) - - filters = {'container_format': 'ovf'} - images = self.client.get_images_detailed(filters=filters) - - self.assertEqual(2, len(images)) - for image in images: - self.assertEqual('ovf', image['container_format']) - - def test_get_image_details_by_disk_format(self): - """Tests that a detailed call can be filtered by disk_format""" - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') - - db_api.image_create(self.context, extra_fixture) - - filters = {'disk_format': 'vhd'} - images = self.client.get_images_detailed(filters=filters) - - self.assertEqual(2, len(images)) - for image in images: - self.assertEqual('vhd', image['disk_format']) - - def test_get_image_details_with_maximum_size(self): - """Tests that a detailed call can be filtered by size_max""" - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving', - size=21) - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images_detailed(filters={'size_max': 20}) - - self.assertEqual(1, len(images)) - for image in images: - self.assertLessEqual(image['size'], 20) - - def test_get_image_details_with_minimum_size(self): - """Tests that a detailed call can be filtered by size_min""" - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images_detailed(filters={'size_min': 20}) - - self.assertEqual(1, len(images)) - for image in images: - self.assertGreaterEqual(image['size'], 20) - - def test_get_image_details_with_changes_since(self): - """Tests that a detailed call can be filtered by changes-since""" - dt1 = timeutils.utcnow() - datetime.timedelta(1) - iso1 = timeutils.isotime(dt1) - - dt2 = timeutils.utcnow() + datetime.timedelta(1) - iso2 = timeutils.isotime(dt2) - - dt3 = timeutils.utcnow() + datetime.timedelta(2) - - dt4 = timeutils.utcnow() + datetime.timedelta(3) - iso4 = timeutils.isotime(dt4) - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='fake image #3') - - db_api.image_create(self.context, extra_fixture) - db_api.image_destroy(self.context, UUID3) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='fake image #4', - created_at=dt3, updated_at=dt3) - - db_api.image_create(self.context, extra_fixture) - - # Check a standard list, 4 images in db (2 deleted) - images = self.client.get_images_detailed(filters={}) - self.assertEqualImages(images, (UUID4, UUID2), unjsonify=False) - - # Expect 3 images (1 deleted) - filters = {'changes-since': iso1} - images = self.client.get_images(filters=filters) - self.assertEqualImages(images, (UUID4, UUID3, UUID2), unjsonify=False) - - # Expect 1 images (0 deleted) - filters = {'changes-since': iso2} - images = self.client.get_images_detailed(filters=filters) - self.assertEqualImages(images, (UUID4,), unjsonify=False) - - # Expect 0 images (0 deleted) - filters = {'changes-since': iso4} - images = self.client.get_images(filters=filters) - self.assertEqualImages(images, (), unjsonify=False) - - def test_get_image_details_with_size_min(self): - """Tests that a detailed call can be filtered by size_min""" - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images_detailed(filters={'size_min': 20}) - self.assertEqual(1, len(images)) - - for image in images: - self.assertGreaterEqual(image['size'], 20) - - def test_get_image_details_by_property(self): - """Tests that a detailed call can be filtered by a property""" - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving', - properties={'p a': 'v a'}) - - db_api.image_create(self.context, extra_fixture) - - filters = {'property-p a': 'v a'} - images = self.client.get_images_detailed(filters=filters) - self.assertEqual(1, len(images)) - - for image in images: - self.assertEqual('v a', image['properties']['p a']) - - def test_get_image_is_public_v1(self): - """Tests that a detailed call can be filtered by a property""" - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving', - properties={'is_public': 'avalue'}) - - context = copy.copy(self.context) - db_api.image_create(context, extra_fixture) - - filters = {'property-is_public': 'avalue'} - images = self.client.get_images_detailed(filters=filters) - self.assertEqual(1, len(images)) - - for image in images: - self.assertEqual('avalue', image['properties']['is_public']) - - def test_get_image_details_sort_disk_format_asc(self): - """ - Tests that a detailed call returns list of - public images sorted alphabetically by disk_format in - ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', - disk_format='ami', - container_format='ami') - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='xyz', - disk_format='vdi') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.get_images_detailed(sort_key='disk_format', - sort_dir='asc') - - self.assertEqualImages(images, (UUID3, UUID4, UUID2), unjsonify=False) - - def test_get_image(self): - """Tests that the detailed info about an image returned""" - fixture = self.get_fixture(id=UUID1, name='fake image #1', - disk_format='ami', container_format='ami', - is_public=False, size=13, - properties={'type': 'kernel'}) - - data = self.client.get_image(UUID1) - - for k, v in fixture.items(): - el = data[k] - self.assertEqual(v, data[k], - "Failed v != data[k] where v = %(v)s and " - "k = %(k)s and data[k] = %(el)s" % {'v': v, - 'k': k, - 'el': el}) - - def test_get_image_non_existing(self): - """Tests that NotFound is raised when getting a non-existing image""" - self.assertRaises(exception.NotFound, - self.client.get_image, - _gen_uuid()) - - def test_add_image_basic(self): - """Tests that we can add image metadata and returns the new id""" - fixture = self.get_fixture(is_public=True) - - new_image = self.client.add_image(fixture) - - # Test all other attributes set - data = self.client.get_image(new_image['id']) - - for k, v in fixture.items(): - self.assertEqual(v, data[k]) - - # Test status was updated properly - self.assertIn('status', data.keys()) - self.assertEqual('active', data['status']) - - def test_add_image_with_properties(self): - """Tests that we can add image metadata with properties""" - fixture = self.get_fixture(location="file:///tmp/glance-tests/2", - properties={'distro': 'Ubuntu 10.04 LTS'}, - is_public=True) - - new_image = self.client.add_image(fixture) - - del fixture['location'] - for k, v in fixture.items(): - self.assertEqual(v, new_image[k]) - - # Test status was updated properly - self.assertIn('status', new_image.keys()) - self.assertEqual('active', new_image['status']) - - def test_add_image_with_location_data(self): - """Tests that we can add image metadata with properties""" - location = "file:///tmp/glance-tests/2" - loc_meta = {'key': 'value'} - fixture = self.get_fixture(location_data=[{'url': location, - 'metadata': loc_meta, - 'status': 'active'}], - properties={'distro': 'Ubuntu 10.04 LTS'}) - - new_image = self.client.add_image(fixture) - - self.assertEqual(location, new_image['location']) - self.assertEqual(location, new_image['location_data'][0]['url']) - self.assertEqual(loc_meta, new_image['location_data'][0]['metadata']) - - def test_add_image_with_location_data_with_encryption(self): - """Tests that we can add image metadata with properties and - enable encryption. - """ - self.client.metadata_encryption_key = '1234567890123456' - - location = "file:///tmp/glance-tests/%d" - loc_meta = {'key': 'value'} - fixture = {'name': 'fake public image', - 'is_public': True, - 'disk_format': 'vmdk', - 'container_format': 'ovf', - 'size': 19, - 'location_data': [{'url': location % 1, - 'metadata': loc_meta, - 'status': 'active'}, - {'url': location % 2, - 'metadata': {}, - 'status': 'active'}], - 'properties': {'distro': 'Ubuntu 10.04 LTS'}} - - new_image = self.client.add_image(fixture) - - self.assertEqual(location % 1, new_image['location']) - self.assertEqual(2, len(new_image['location_data'])) - self.assertEqual(location % 1, new_image['location_data'][0]['url']) - self.assertEqual(loc_meta, new_image['location_data'][0]['metadata']) - self.assertEqual(location % 2, new_image['location_data'][1]['url']) - self.assertEqual({}, new_image['location_data'][1]['metadata']) - - self.client.metadata_encryption_key = None - - def test_add_image_already_exists(self): - """Tests proper exception is raised if image with ID already exists""" - fixture = self.get_fixture(id=UUID2, - location="file:///tmp/glance-tests/2") - - self.assertRaises(exception.Duplicate, - self.client.add_image, - fixture) - - def test_add_image_with_bad_status(self): - """Tests proper exception is raised if a bad status is set""" - fixture = self.get_fixture(status='bad status', - location="file:///tmp/glance-tests/2") - - self.assertRaises(exception.Invalid, - self.client.add_image, - fixture) - - def test_update_image(self): - """Tests that the /images PUT registry API updates the image""" - fixture = {'name': 'fake public image #2', - 'disk_format': 'vmdk'} - - self.assertTrue(self.client.update_image(UUID2, fixture)) - - # Test all other attributes set - data = self.client.get_image(UUID2) - - for k, v in fixture.items(): - self.assertEqual(v, data[k]) - - def test_update_image_public(self): - """Tests that the /images PUT registry API updates the image""" - fixture = {'name': 'fake public image #2', - 'is_public': True, - 'disk_format': 'vmdk'} - - self.assertTrue(self.client.update_image(UUID2, fixture)) - - # Test all other attributes set - data = self.client.get_image(UUID2) - - for k, v in fixture.items(): - self.assertEqual(v, data[k]) - - def test_update_image_private(self): - """Tests that the /images PUT registry API updates the image""" - fixture = {'name': 'fake public image #2', - 'is_public': False, - 'disk_format': 'vmdk'} - - self.assertTrue(self.client.update_image(UUID2, fixture)) - - # Test all other attributes set - data = self.client.get_image(UUID2) - - for k, v in fixture.items(): - self.assertEqual(v, data[k]) - - def test_update_image_not_existing(self): - """Tests non existing image update doesn't work""" - fixture = self.get_fixture(status='bad status') - - self.assertRaises(exception.NotFound, - self.client.update_image, - _gen_uuid(), - fixture) - - def test_delete_image(self): - """Tests that image metadata is deleted properly""" - # Grab the original number of images - orig_num_images = len(self.client.get_images()) - - # Delete image #2 - image = self.FIXTURES[1] - deleted_image = self.client.delete_image(image['id']) - self.assertTrue(deleted_image) - self.assertEqual(image['id'], deleted_image['id']) - self.assertTrue(deleted_image['deleted']) - self.assertTrue(deleted_image['deleted_at']) - - # Verify one less image - new_num_images = len(self.client.get_images()) - - self.assertEqual(orig_num_images - 1, new_num_images) - - def test_delete_image_not_existing(self): - """Check that one cannot delete non-existing image.""" - self.assertRaises(exception.NotFound, - self.client.delete_image, - _gen_uuid()) - - def test_get_image_members(self): - """Test getting image members.""" - memb_list = self.client.get_image_members(UUID2) - num_members = len(memb_list) - self.assertEqual(0, num_members) - - def test_get_image_members_not_existing(self): - """Test getting non-existent image members.""" - self.assertRaises(exception.NotFound, - self.client.get_image_members, - _gen_uuid()) - - def test_get_member_images(self): - """Test getting member images.""" - memb_list = self.client.get_member_images('pattieblack') - num_members = len(memb_list) - self.assertEqual(0, num_members) - - def test_add_replace_members(self): - """Test replacing image members.""" - self.assertTrue(self.client.add_member(UUID2, 'pattieblack')) - self.assertTrue(self.client.replace_members(UUID2, - dict(member_id='pattie' - 'black2'))) - - def test_add_delete_member(self): - """Tests deleting image members""" - self.client.add_member(UUID2, 'pattieblack') - self.assertTrue(self.client.delete_member(UUID2, 'pattieblack')) - - -class TestBaseClient(testtools.TestCase): - - """ - Test proper actions made for both valid and invalid requests - against a Registry service - """ - def test_connect_kwargs_default_values(self): - actual = test_client.BaseClient('127.0.0.1').get_connect_kwargs() - self.assertEqual({'timeout': None}, actual) - - def test_connect_kwargs(self): - base_client = test_client.BaseClient( - host='127.0.0.1', port=80, timeout=1, use_ssl=True) - actual = base_client.get_connect_kwargs() - expected = {'insecure': False, - 'key_file': None, - 'cert_file': None, - 'timeout': 1} - for k in expected.keys(): - self.assertEqual(expected[k], actual[k]) - - -class TestRegistryV1ClientApi(base.IsolatedUnitTest): - - def setUp(self): - """Establish a clean test environment.""" - super(TestRegistryV1ClientApi, self).setUp() - self.context = context.RequestContext() - reload_module(rapi) - - def test_get_registry_client(self): - actual_client = rapi.get_registry_client(self.context) - self.assertIsNone(actual_client.identity_headers) - - def test_get_registry_client_with_identity_headers(self): - self.config(send_identity_headers=True) - expected_identity_headers = { - 'X-User-Id': '', - 'X-Tenant-Id': '', - 'X-Roles': ','.join(self.context.roles), - 'X-Identity-Status': 'Confirmed', - 'X-Service-Catalog': 'null', - } - actual_client = rapi.get_registry_client(self.context) - self.assertEqual(expected_identity_headers, - actual_client.identity_headers) - - def test_configure_registry_client_not_using_use_user_token(self): - self.config(use_user_token=False) - with patch.object(rapi, 'configure_registry_admin_creds') as mock_rapi: - rapi.configure_registry_client() - mock_rapi.assert_called_once_with() - - def _get_fake_config_creds(self, auth_url='auth_url', strategy='keystone'): - return { - 'user': 'user', - 'password': 'password', - 'username': 'user', - 'tenant': 'tenant', - 'auth_url': auth_url, - 'strategy': strategy, - 'region': 'region' - } - - def test_configure_registry_admin_creds(self): - expected = self._get_fake_config_creds(auth_url=None, - strategy='configured_strategy') - self.config(admin_user=expected['user']) - self.config(admin_password=expected['password']) - self.config(admin_tenant_name=expected['tenant']) - self.config(auth_strategy=expected['strategy']) - self.config(auth_region=expected['region']) - self.stubs.Set(os, 'getenv', lambda x: None) - - self.assertIsNone(rapi._CLIENT_CREDS) - rapi.configure_registry_admin_creds() - self.assertEqual(expected, rapi._CLIENT_CREDS) - - def test_configure_registry_admin_creds_with_auth_url(self): - expected = self._get_fake_config_creds() - self.config(admin_user=expected['user']) - self.config(admin_password=expected['password']) - self.config(admin_tenant_name=expected['tenant']) - self.config(auth_url=expected['auth_url']) - self.config(auth_strategy='test_strategy') - self.config(auth_region=expected['region']) - - self.assertIsNone(rapi._CLIENT_CREDS) - rapi.configure_registry_admin_creds() - self.assertEqual(expected, rapi._CLIENT_CREDS) - - -class FakeResponse(object): - status = http.ACCEPTED - - def getheader(*args, **kwargs): - return None - - -class TestRegistryV1ClientRequests(base.IsolatedUnitTest): - - def setUp(self): - super(TestRegistryV1ClientRequests, self).setUp() - - def test_do_request_with_identity_headers(self): - identity_headers = {'foo': 'bar'} - self.client = rclient.RegistryClient("0.0.0.0", - identity_headers=identity_headers) - - with patch.object(test_client.BaseClient, 'do_request', - return_value=FakeResponse()) as mock_do_request: - self.client.do_request("GET", "/images") - mock_do_request.assert_called_once_with("GET", "/images", - headers=identity_headers) - - def test_do_request(self): - self.client = rclient.RegistryClient("0.0.0.0") - - with patch.object(test_client.BaseClient, 'do_request', - return_value=FakeResponse()) as mock_do_request: - self.client.do_request("GET", "/images") - mock_do_request.assert_called_once_with("GET", "/images", - headers={}) - - def test_registry_invalid_token_exception_handling(self): - self.image_controller = acontroller() - request = webob.Request.blank('/images') - request.method = 'GET' - request.context = context.RequestContext() - - with patch.object(rapi, 'get_images_detail') as mock_detail: - mock_detail.side_effect = exception.NotAuthenticated() - self.assertRaises(webob.exc.HTTPUnauthorized, - self.image_controller.detail, request) diff --git a/glance/tests/unit/v1/test_upload_utils.py b/glance/tests/unit/v1/test_upload_utils.py deleted file mode 100644 index 60cb4d26..00000000 --- a/glance/tests/unit/v1/test_upload_utils.py +++ /dev/null @@ -1,345 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from contextlib import contextmanager - -import glance_store -import mock -from mock import patch -import webob.exc - -from glance.api.v1 import upload_utils -from glance.common import exception -from glance.common import store_utils -from glance.common import utils -import glance.registry.client.v1.api as registry -from glance.tests.unit import base -import glance.tests.unit.utils as unit_test_utils - - -class TestUploadUtils(base.StoreClearingUnitTest): - def setUp(self): - super(TestUploadUtils, self).setUp() - self.config(debug=True) - - def test_initiate_delete(self): - req = unit_test_utils.get_fake_request() - location = {"url": "file://foo/bar", - "metadata": {}, - "status": "active"} - id = unit_test_utils.UUID1 - - with patch.object(store_utils, - "safe_delete_from_backend") as mock_store_utils: - upload_utils.initiate_deletion(req, location, id) - mock_store_utils.assert_called_once_with(req.context, - id, - location) - - def test_initiate_delete_with_delayed_delete(self): - self.config(delayed_delete=True) - req = unit_test_utils.get_fake_request() - location = {"url": "file://foo/bar", - "metadata": {}, - "status": "active"} - id = unit_test_utils.UUID1 - - with patch.object(store_utils, "schedule_delayed_delete_from_backend", - return_value=True) as mock_store_utils: - upload_utils.initiate_deletion(req, location, id) - mock_store_utils.assert_called_once_with(req.context, - id, - location) - - def test_safe_kill(self): - req = unit_test_utils.get_fake_request() - id = unit_test_utils.UUID1 - - with patch.object(registry, "update_image_metadata") as mock_registry: - upload_utils.safe_kill(req, id, 'saving') - mock_registry.assert_called_once_with(req.context, id, - {'status': 'killed'}, - from_state='saving') - - def test_safe_kill_with_error(self): - req = unit_test_utils.get_fake_request() - id = unit_test_utils.UUID1 - - with patch.object(registry, "update_image_metadata", - side_effect=Exception()) as mock_registry: - upload_utils.safe_kill(req, id, 'saving') - mock_registry.assert_called_once_with(req.context, id, - {'status': 'killed'}, - from_state='saving') - - @contextmanager - def _get_store_and_notifier(self, image_size=10, ext_update_data=None, - ret_checksum="checksum", exc_class=None): - location = "file://foo/bar" - checksum = "checksum" - size = 10 - update_data = {'checksum': checksum} - if ext_update_data is not None: - update_data.update(ext_update_data) - image_meta = {'id': unit_test_utils.UUID1, - 'size': image_size} - image_data = "blah" - - store = mock.MagicMock() - notifier = mock.MagicMock() - - if exc_class is not None: - store.add.side_effect = exc_class - else: - store.add.return_value = (location, size, ret_checksum, {}) - yield (location, checksum, image_meta, image_data, store, notifier, - update_data) - - def test_upload_data_to_store(self): - # 'user_storage_quota' is not set - def store_add(image_id, data, size, **kwargs): - # Check if 'data' is instance of 'CooperativeReader' when - # 'user_storage_quota' is disabled. - self.assertIsInstance(data, utils.CooperativeReader) - return location, 10, "checksum", {} - - req = unit_test_utils.get_fake_request() - with self._get_store_and_notifier( - ext_update_data={'size': 10}, - exc_class=store_add) as (location, checksum, image_meta, - image_data, store, notifier, - update_data): - ret = image_meta.update(update_data) - with patch.object(registry, 'update_image_metadata', - return_value=ret) as mock_update_image_metadata: - actual_meta, location_data = upload_utils.upload_data_to_store( - req, image_meta, image_data, store, notifier) - - self.assertEqual(location, location_data['url']) - self.assertEqual(image_meta.update(update_data), actual_meta) - mock_update_image_metadata.assert_called_once_with( - req.context, image_meta['id'], update_data, - from_state='saving') - - def test_upload_data_to_store_user_storage_quota_enabled(self): - # Enable user_storage_quota - self.config(user_storage_quota='100B') - - def store_add(image_id, data, size, **kwargs): - # Check if 'data' is instance of 'LimitingReader' when - # 'user_storage_quota' is enabled. - self.assertIsInstance(data, utils.LimitingReader) - return location, 10, "checksum", {} - - req = unit_test_utils.get_fake_request() - with self._get_store_and_notifier( - ext_update_data={'size': 10}, - exc_class=store_add) as (location, checksum, image_meta, - image_data, store, notifier, - update_data): - ret = image_meta.update(update_data) - # mock 'check_quota' - mock_check_quota = patch('glance.api.common.check_quota', - return_value=100) - mock_check_quota.start() - self.addCleanup(mock_check_quota.stop) - with patch.object(registry, 'update_image_metadata', - return_value=ret) as mock_update_image_metadata: - actual_meta, location_data = upload_utils.upload_data_to_store( - req, image_meta, image_data, store, notifier) - - self.assertEqual(location, location_data['url']) - self.assertEqual(image_meta.update(update_data), actual_meta) - mock_update_image_metadata.assert_called_once_with( - req.context, image_meta['id'], update_data, - from_state='saving') - # 'check_quota' is called two times - check_quota_call_count = ( - mock_check_quota.target.check_quota.call_count) - self.assertEqual(2, check_quota_call_count) - - def test_upload_data_to_store_mismatch_size(self): - req = unit_test_utils.get_fake_request() - - with self._get_store_and_notifier( - image_size=11) as (location, checksum, image_meta, image_data, - store, notifier, update_data): - ret = image_meta.update(update_data) - with patch.object(registry, 'update_image_metadata', - return_value=ret) as mock_update_image_metadata: - self.assertRaises(webob.exc.HTTPBadRequest, - upload_utils.upload_data_to_store, - req, image_meta, image_data, store, - notifier) - mock_update_image_metadata.assert_called_with( - req.context, image_meta['id'], {'status': 'killed'}, - from_state='saving') - - def test_upload_data_to_store_mismatch_checksum(self): - req = unit_test_utils.get_fake_request() - - with self._get_store_and_notifier( - ret_checksum='fake') as (location, checksum, image_meta, - image_data, store, notifier, update_data): - ret = image_meta.update(update_data) - with patch.object(registry, "update_image_metadata", - return_value=ret) as mock_update_image_metadata: - self.assertRaises(webob.exc.HTTPBadRequest, - upload_utils.upload_data_to_store, - req, image_meta, image_data, store, - notifier) - mock_update_image_metadata.assert_called_with( - req.context, image_meta['id'], {'status': 'killed'}, - from_state='saving') - - def _test_upload_data_to_store_exception(self, exc_class, expected_class): - req = unit_test_utils.get_fake_request() - - with self._get_store_and_notifier( - exc_class=exc_class) as (location, checksum, image_meta, - image_data, store, notifier, update_data): - with patch.object(upload_utils, 'safe_kill') as mock_safe_kill: - self.assertRaises(expected_class, - upload_utils.upload_data_to_store, - req, image_meta, image_data, store, notifier) - mock_safe_kill.assert_called_once_with( - req, image_meta['id'], 'saving') - - def _test_upload_data_to_store_exception_with_notify(self, - exc_class, - expected_class, - image_killed=True): - req = unit_test_utils.get_fake_request() - - with self._get_store_and_notifier( - exc_class=exc_class) as (location, checksum, image_meta, - image_data, store, notifier, update_data): - with patch.object(upload_utils, 'safe_kill') as mock_safe_kill: - self.assertRaises(expected_class, - upload_utils.upload_data_to_store, - req, image_meta, image_data, store, - notifier) - if image_killed: - mock_safe_kill.assert_called_with(req, image_meta['id'], - 'saving') - - def test_upload_data_to_store_raises_store_disabled(self): - """Test StoreDisabled exception is raised while uploading data""" - self._test_upload_data_to_store_exception_with_notify( - glance_store.StoreAddDisabled, - webob.exc.HTTPGone, - image_killed=True) - - def test_upload_data_to_store_duplicate(self): - """See note in glance.api.v1.upload_utils on why we don't want image to - be deleted in this case. - """ - self._test_upload_data_to_store_exception_with_notify( - exception.Duplicate, - webob.exc.HTTPConflict, - image_killed=False) - - def test_upload_data_to_store_forbidden(self): - self._test_upload_data_to_store_exception_with_notify( - exception.Forbidden, - webob.exc.HTTPForbidden) - - def test_upload_data_to_store_storage_full(self): - self._test_upload_data_to_store_exception_with_notify( - glance_store.StorageFull, - webob.exc.HTTPRequestEntityTooLarge) - - def test_upload_data_to_store_storage_write_denied(self): - self._test_upload_data_to_store_exception_with_notify( - glance_store.StorageWriteDenied, - webob.exc.HTTPServiceUnavailable) - - def test_upload_data_to_store_size_limit_exceeded(self): - self._test_upload_data_to_store_exception_with_notify( - exception.ImageSizeLimitExceeded, - webob.exc.HTTPRequestEntityTooLarge) - - def test_upload_data_to_store_http_error(self): - self._test_upload_data_to_store_exception_with_notify( - webob.exc.HTTPError, - webob.exc.HTTPError) - - def test_upload_data_to_store_client_disconnect(self): - self._test_upload_data_to_store_exception( - ValueError, - webob.exc.HTTPBadRequest) - - def test_upload_data_to_store_client_disconnect_ioerror(self): - self._test_upload_data_to_store_exception( - IOError, - webob.exc.HTTPBadRequest) - - def test_upload_data_to_store_exception(self): - self._test_upload_data_to_store_exception_with_notify( - Exception, - webob.exc.HTTPInternalServerError) - - def test_upload_data_to_store_not_found_after_upload(self): - req = unit_test_utils.get_fake_request() - - with self._get_store_and_notifier( - ext_update_data={'size': 10}) as (location, checksum, image_meta, - image_data, store, notifier, - update_data): - exc = exception.ImageNotFound - with patch.object(registry, 'update_image_metadata', - side_effect=exc) as mock_update_image_metadata: - with patch.object(upload_utils, - "initiate_deletion") as mock_initiate_del: - with patch.object(upload_utils, - "safe_kill") as mock_safe_kill: - self.assertRaises(webob.exc.HTTPPreconditionFailed, - upload_utils.upload_data_to_store, - req, image_meta, image_data, store, - notifier) - mock_update_image_metadata.assert_called_once_with( - req.context, image_meta['id'], update_data, - from_state='saving') - mock_initiate_del.assert_called_once_with( - req, {'url': location, 'status': 'active', - 'metadata': {}}, image_meta['id']) - mock_safe_kill.assert_called_once_with( - req, image_meta['id'], 'saving') - - @mock.patch.object(registry, 'update_image_metadata', - side_effect=exception.NotAuthenticated) - @mock.patch.object(upload_utils, 'initiate_deletion') - def test_activate_image_with_expired_token( - self, mocked_delete, mocked_update): - """Test token expiration during image upload. - - If users token expired before image was uploaded then if auth error - was caught from registry during changing image status from 'saving' - to 'active' then it's required to delete all image data. - """ - context = mock.Mock() - req = mock.Mock() - req.context = context - with self._get_store_and_notifier() as (location, checksum, image_meta, - image_data, store, notifier, - update_data): - self.assertRaises(webob.exc.HTTPUnauthorized, - upload_utils.upload_data_to_store, - req, image_meta, image_data, store, notifier) - self.assertEqual(2, mocked_update.call_count) - mocked_delete.assert_called_once_with( - req, - {'url': 'file://foo/bar', 'status': 'active', 'metadata': {}}, - 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d') diff --git a/glance/tests/unit/v2/__init__.py b/glance/tests/unit/v2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glance/tests/unit/v2/test_discovery_image_import.py b/glance/tests/unit/v2/test_discovery_image_import.py deleted file mode 100644 index 82d3a189..00000000 --- a/glance/tests/unit/v2/test_discovery_image_import.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2017 RedHat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glance.api.v2.discovery -import glance.tests.unit.utils as unit_test_utils -import glance.tests.utils as test_utils - - -class TestInfoControllers(test_utils.BaseTestCase): - - def setUp(self): - super(TestInfoControllers, self).setUp() - self.controller = glance.api.v2.discovery.InfoController() - - def test_get_image_import(self): - req = unit_test_utils.get_fake_request() - output = self.controller.get_image_import(req) - self.assertIn('import-methods', output) - self.assertEqual([], output['import-methods']['value']) diff --git a/glance/tests/unit/v2/test_image_actions_resource.py b/glance/tests/unit/v2/test_image_actions_resource.py deleted file mode 100644 index 92e1c631..00000000 --- a/glance/tests/unit/v2/test_image_actions_resource.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2015 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import glance_store as store -import webob - -import glance.api.v2.image_actions as image_actions -import glance.context -from glance.tests.unit import base -import glance.tests.unit.utils as unit_test_utils - - -BASE_URI = unit_test_utils.BASE_URI - -USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -CHKSUM = '93264c3edf5972c9f1cb309543d38a5c' - - -def _db_fixture(id, **kwargs): - obj = { - 'id': id, - 'name': None, - 'visibility': 'shared', - 'properties': {}, - 'checksum': None, - 'owner': None, - 'status': 'queued', - 'tags': [], - 'size': None, - 'virtual_size': None, - 'locations': [], - 'protected': False, - 'disk_format': None, - 'container_format': None, - 'deleted': False, - 'min_ram': None, - 'min_disk': None, - } - obj.update(kwargs) - return obj - - -class TestImageActionsController(base.IsolatedUnitTest): - def setUp(self): - super(TestImageActionsController, self).setUp() - self.db = unit_test_utils.FakeDB(initialize=False) - self.policy = unit_test_utils.FakePolicyEnforcer() - self.notifier = unit_test_utils.FakeNotifier() - self.store = unit_test_utils.FakeStoreAPI() - for i in range(1, 4): - self.store.data['%s/fake_location_%i' % (BASE_URI, i)] = ('Z', 1) - self.store_utils = unit_test_utils.FakeStoreUtils(self.store) - self.controller = image_actions.ImageActionsController( - self.db, - self.policy, - self.notifier, - self.store) - self.controller.gateway.store_utils = self.store_utils - store.create_stores() - - def _get_fake_context(self, user=USER1, tenant=TENANT1, roles=None, - is_admin=False): - if roles is None: - roles = ['member'] - - kwargs = { - 'user': user, - 'tenant': tenant, - 'roles': roles, - 'is_admin': is_admin, - } - - context = glance.context.RequestContext(**kwargs) - return context - - def _create_image(self, status): - self.images = [ - _db_fixture(UUID1, owner=TENANT1, checksum=CHKSUM, - name='1', size=256, virtual_size=1024, - visibility='public', - locations=[{'url': '%s/%s' % (BASE_URI, UUID1), - 'metadata': {}, 'status': 'active'}], - disk_format='raw', - container_format='bare', - status=status), - ] - context = self._get_fake_context() - [self.db.image_create(context, image) for image in self.images] - - def test_deactivate_from_active(self): - self._create_image('active') - - request = unit_test_utils.get_fake_request() - self.controller.deactivate(request, UUID1) - - image = self.db.image_get(request.context, UUID1) - - self.assertEqual('deactivated', image['status']) - - def test_deactivate_from_deactivated(self): - self._create_image('deactivated') - - request = unit_test_utils.get_fake_request() - self.controller.deactivate(request, UUID1) - - image = self.db.image_get(request.context, UUID1) - - self.assertEqual('deactivated', image['status']) - - def _test_deactivate_from_wrong_status(self, status): - - # deactivate will yield an error if the initial status is anything - # other than 'active' or 'deactivated' - self._create_image(status) - - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.deactivate, - request, UUID1) - - def test_deactivate_from_queued(self): - self._test_deactivate_from_wrong_status('queued') - - def test_deactivate_from_saving(self): - self._test_deactivate_from_wrong_status('saving') - - def test_deactivate_from_killed(self): - self._test_deactivate_from_wrong_status('killed') - - def test_deactivate_from_pending_delete(self): - self._test_deactivate_from_wrong_status('pending_delete') - - def test_deactivate_from_deleted(self): - self._test_deactivate_from_wrong_status('deleted') - - def test_reactivate_from_active(self): - self._create_image('active') - - request = unit_test_utils.get_fake_request() - self.controller.reactivate(request, UUID1) - - image = self.db.image_get(request.context, UUID1) - - self.assertEqual('active', image['status']) - - def test_reactivate_from_deactivated(self): - self._create_image('deactivated') - - request = unit_test_utils.get_fake_request() - self.controller.reactivate(request, UUID1) - - image = self.db.image_get(request.context, UUID1) - - self.assertEqual('active', image['status']) - - def _test_reactivate_from_wrong_status(self, status): - - # reactivate will yield an error if the initial status is anything - # other than 'active' or 'deactivated' - self._create_image(status) - - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.reactivate, - request, UUID1) - - def test_reactivate_from_queued(self): - self._test_reactivate_from_wrong_status('queued') - - def test_reactivate_from_saving(self): - self._test_reactivate_from_wrong_status('saving') - - def test_reactivate_from_killed(self): - self._test_reactivate_from_wrong_status('killed') - - def test_reactivate_from_pending_delete(self): - self._test_reactivate_from_wrong_status('pending_delete') - - def test_reactivate_from_deleted(self): - self._test_reactivate_from_wrong_status('deleted') diff --git a/glance/tests/unit/v2/test_image_data_resource.py b/glance/tests/unit/v2/test_image_data_resource.py deleted file mode 100644 index 3f68ddff..00000000 --- a/glance/tests/unit/v2/test_image_data_resource.py +++ /dev/null @@ -1,821 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import uuid - -from cursive import exception as cursive_exception -import glance_store -import mock -import six -from six.moves import http_client as http -import webob - -import glance.api.policy -import glance.api.v2.image_data -from glance.common import exception -from glance.common import wsgi -from glance.tests.unit import base -import glance.tests.unit.utils as unit_test_utils -import glance.tests.utils as test_utils - - -class Raise(object): - def __init__(self, exc): - self.exc = exc - - def __call__(self, *args, **kwargs): - raise self.exc - - -class FakeImage(object): - def __init__(self, image_id=None, data=None, checksum=None, size=0, - virtual_size=0, locations=None, container_format='bear', - disk_format='rawr', status=None): - self.image_id = image_id - self.data = data - self.checksum = checksum - self.size = size - self.virtual_size = virtual_size - self.locations = locations - self.container_format = container_format - self.disk_format = disk_format - self._status = status - - @property - def status(self): - return self._status - - @status.setter - def status(self, value): - if isinstance(self._status, BaseException): - raise self._status - else: - self._status = value - - def get_data(self, offset=0, chunk_size=None): - if chunk_size: - return self.data[offset:offset + chunk_size] - return self.data[offset:] - - def set_data(self, data, size=None): - self.data = ''.join(data) - self.size = size - self.status = 'modified-by-fake' - - -class FakeImageRepo(object): - def __init__(self, result=None): - self.result = result - - def get(self, image_id): - if isinstance(self.result, BaseException): - raise self.result - else: - return self.result - - def save(self, image, from_state=None): - self.saved_image = image - - -class FakeGateway(object): - def __init__(self, repo): - self.repo = repo - - def get_repo(self, context): - return self.repo - - -class TestImagesController(base.StoreClearingUnitTest): - def setUp(self): - super(TestImagesController, self).setUp() - - self.config(debug=True) - self.image_repo = FakeImageRepo() - self.gateway = FakeGateway(self.image_repo) - self.controller = glance.api.v2.image_data.ImageDataController( - gateway=self.gateway) - - def test_download(self): - request = unit_test_utils.get_fake_request() - image = FakeImage('abcd', - locations=[{'url': 'http://example.com/image', - 'metadata': {}, 'status': 'active'}]) - self.image_repo.result = image - image = self.controller.download(request, unit_test_utils.UUID1) - self.assertEqual('abcd', image.image_id) - - def test_download_deactivated(self): - request = unit_test_utils.get_fake_request() - image = FakeImage('abcd', - status='deactivated', - locations=[{'url': 'http://example.com/image', - 'metadata': {}, 'status': 'active'}]) - self.image_repo.result = image - self.assertRaises(webob.exc.HTTPForbidden, self.controller.download, - request, str(uuid.uuid4())) - - def test_download_no_location(self): - # NOTE(mclaren): NoContent will be raised by the ResponseSerializer - # That's tested below. - request = unit_test_utils.get_fake_request() - self.image_repo.result = FakeImage('abcd') - image = self.controller.download(request, unit_test_utils.UUID2) - self.assertEqual('abcd', image.image_id) - - def test_download_non_existent_image(self): - request = unit_test_utils.get_fake_request() - self.image_repo.result = exception.NotFound() - self.assertRaises(webob.exc.HTTPNotFound, self.controller.download, - request, str(uuid.uuid4())) - - def test_download_forbidden(self): - request = unit_test_utils.get_fake_request() - self.image_repo.result = exception.Forbidden() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.download, - request, str(uuid.uuid4())) - - def test_download_ok_when_get_image_location_forbidden(self): - class ImageLocations(object): - def __len__(self): - raise exception.Forbidden() - - request = unit_test_utils.get_fake_request() - image = FakeImage('abcd') - self.image_repo.result = image - image.locations = ImageLocations() - image = self.controller.download(request, unit_test_utils.UUID1) - self.assertEqual('abcd', image.image_id) - - def test_upload(self): - request = unit_test_utils.get_fake_request() - image = FakeImage('abcd') - self.image_repo.result = image - self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) - self.assertEqual('YYYY', image.data) - self.assertEqual(4, image.size) - - def test_upload_status(self): - request = unit_test_utils.get_fake_request() - image = FakeImage('abcd') - self.image_repo.result = image - insurance = {'called': False} - - def read_data(): - insurance['called'] = True - self.assertEqual('saving', self.image_repo.saved_image.status) - yield 'YYYY' - - self.controller.upload(request, unit_test_utils.UUID2, - read_data(), None) - self.assertTrue(insurance['called']) - self.assertEqual('modified-by-fake', - self.image_repo.saved_image.status) - - def test_upload_no_size(self): - request = unit_test_utils.get_fake_request() - image = FakeImage('abcd') - self.image_repo.result = image - self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', None) - self.assertEqual('YYYY', image.data) - self.assertIsNone(image.size) - - def test_upload_invalid(self): - request = unit_test_utils.get_fake_request() - image = FakeImage('abcd') - image.status = ValueError() - self.image_repo.result = image - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.upload, - request, unit_test_utils.UUID1, 'YYYY', 4) - - def test_upload_with_expired_token(self): - def side_effect(image, from_state=None): - if from_state == 'saving': - raise exception.NotAuthenticated() - - mocked_save = mock.Mock(side_effect=side_effect) - mocked_delete = mock.Mock() - request = unit_test_utils.get_fake_request() - image = FakeImage('abcd') - image.delete = mocked_delete - self.image_repo.result = image - self.image_repo.save = mocked_save - self.assertRaises(webob.exc.HTTPUnauthorized, self.controller.upload, - request, unit_test_utils.UUID1, 'YYYY', 4) - self.assertEqual(3, mocked_save.call_count) - mocked_delete.assert_called_once_with() - - def test_upload_non_existent_image_during_save_initiates_deletion(self): - def fake_save_not_found(self, from_state=None): - raise exception.ImageNotFound() - - def fake_save_conflict(self, from_state=None): - raise exception.Conflict() - - for fun in [fake_save_not_found, fake_save_conflict]: - request = unit_test_utils.get_fake_request() - image = FakeImage('abcd', locations=['http://example.com/image']) - self.image_repo.result = image - self.image_repo.save = fun - image.delete = mock.Mock() - self.assertRaises(webob.exc.HTTPGone, self.controller.upload, - request, str(uuid.uuid4()), 'ABC', 3) - self.assertTrue(image.delete.called) - - def test_upload_non_existent_image_raises_image_not_found_exception(self): - def fake_save(self, from_state=None): - raise exception.ImageNotFound() - - def fake_delete(): - raise exception.ImageNotFound() - - request = unit_test_utils.get_fake_request() - image = FakeImage('abcd', locations=['http://example.com/image']) - self.image_repo.result = image - self.image_repo.save = fake_save - image.delete = fake_delete - self.assertRaises(webob.exc.HTTPGone, self.controller.upload, - request, str(uuid.uuid4()), 'ABC', 3) - - def test_upload_non_existent_image_raises_store_not_found_exception(self): - def fake_save(self, from_state=None): - raise glance_store.NotFound() - - def fake_delete(): - raise exception.ImageNotFound() - - request = unit_test_utils.get_fake_request() - image = FakeImage('abcd', locations=['http://example.com/image']) - self.image_repo.result = image - self.image_repo.save = fake_save - image.delete = fake_delete - self.assertRaises(webob.exc.HTTPGone, self.controller.upload, - request, str(uuid.uuid4()), 'ABC', 3) - - def test_upload_non_existent_image_before_save(self): - request = unit_test_utils.get_fake_request() - self.image_repo.result = exception.NotFound() - self.assertRaises(webob.exc.HTTPNotFound, self.controller.upload, - request, str(uuid.uuid4()), 'ABC', 3) - - def test_upload_data_exists(self): - request = unit_test_utils.get_fake_request() - image = FakeImage() - exc = exception.InvalidImageStatusTransition(cur_status='active', - new_status='queued') - image.set_data = Raise(exc) - self.image_repo.result = image - self.assertRaises(webob.exc.HTTPConflict, self.controller.upload, - request, unit_test_utils.UUID1, 'YYYY', 4) - - def test_upload_storage_full(self): - request = unit_test_utils.get_fake_request() - image = FakeImage() - image.set_data = Raise(glance_store.StorageFull) - self.image_repo.result = image - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.upload, - request, unit_test_utils.UUID2, 'YYYYYYY', 7) - - def test_upload_signature_verification_fails(self): - request = unit_test_utils.get_fake_request() - image = FakeImage() - image.set_data = Raise(cursive_exception.SignatureVerificationError) - self.image_repo.result = image - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.upload, - request, unit_test_utils.UUID1, 'YYYY', 4) - self.assertEqual('killed', self.image_repo.saved_image.status) - - def test_image_size_limit_exceeded(self): - request = unit_test_utils.get_fake_request() - image = FakeImage() - image.set_data = Raise(exception.ImageSizeLimitExceeded) - self.image_repo.result = image - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.upload, - request, unit_test_utils.UUID1, 'YYYYYYY', 7) - - def test_upload_storage_quota_full(self): - request = unit_test_utils.get_fake_request() - self.image_repo.result = exception.StorageQuotaFull("message") - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.upload, - request, unit_test_utils.UUID1, 'YYYYYYY', 7) - - def test_upload_storage_forbidden(self): - request = unit_test_utils.get_fake_request(user=unit_test_utils.USER2) - image = FakeImage() - image.set_data = Raise(exception.Forbidden) - self.image_repo.result = image - self.assertRaises(webob.exc.HTTPForbidden, self.controller.upload, - request, unit_test_utils.UUID2, 'YY', 2) - - def test_upload_storage_internal_error(self): - request = unit_test_utils.get_fake_request() - self.image_repo.result = exception.ServerError() - self.assertRaises(exception.ServerError, - self.controller.upload, - request, unit_test_utils.UUID1, 'ABC', 3) - - def test_upload_storage_write_denied(self): - request = unit_test_utils.get_fake_request(user=unit_test_utils.USER3) - image = FakeImage() - image.set_data = Raise(glance_store.StorageWriteDenied) - self.image_repo.result = image - self.assertRaises(webob.exc.HTTPServiceUnavailable, - self.controller.upload, - request, unit_test_utils.UUID2, 'YY', 2) - - def test_upload_storage_store_disabled(self): - """Test that uploading an image file raises StoreDisabled exception""" - request = unit_test_utils.get_fake_request(user=unit_test_utils.USER3) - image = FakeImage() - image.set_data = Raise(glance_store.StoreAddDisabled) - self.image_repo.result = image - self.assertRaises(webob.exc.HTTPGone, - self.controller.upload, - request, unit_test_utils.UUID2, 'YY', 2) - - @mock.patch("glance.common.trust_auth.TokenRefresher") - def test_upload_with_trusts(self, mock_refresher): - """Test that uploading with registry correctly uses trusts""" - # initialize trust environment - self.config(data_api='glance.db.registry.api') - refresher = mock.MagicMock() - mock_refresher.return_value = refresher - refresher.refresh_token.return_value = "fake_token" - # request an image upload - request = unit_test_utils.get_fake_request() - request.environ['keystone.token_auth'] = mock.MagicMock() - request.environ['keystone.token_info'] = { - 'token': { - 'roles': [{'name': 'FakeRole', 'id': 'FakeID'}] - } - } - image = FakeImage('abcd') - self.image_repo.result = image - mock_fake_save = mock.Mock() - mock_fake_save.side_effect = [None, exception.NotAuthenticated, None] - temp_save = FakeImageRepo.save - # mocking save to raise NotAuthenticated on the second call - FakeImageRepo.save = mock_fake_save - self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) - # check image data - self.assertEqual('YYYY', image.data) - self.assertEqual(4, image.size) - FakeImageRepo.save = temp_save - # check that token has been correctly acquired and deleted - mock_refresher.assert_called_once_with( - request.environ['keystone.token_auth'], - request.context.tenant, ['FakeRole']) - refresher.refresh_token.assert_called_once_with() - refresher.release_resources.assert_called_once_with() - self.assertEqual("fake_token", request.context.auth_token) - - @mock.patch("glance.common.trust_auth.TokenRefresher") - def test_upload_with_trusts_fails(self, mock_refresher): - """Test upload with registry if trust was not successfully created""" - # initialize trust environment - self.config(data_api='glance.db.registry.api') - mock_refresher().side_effect = Exception() - # request an image upload - request = unit_test_utils.get_fake_request() - image = FakeImage('abcd') - self.image_repo.result = image - self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) - # check image data - self.assertEqual('YYYY', image.data) - self.assertEqual(4, image.size) - # check that the token has not been updated - self.assertEqual(0, mock_refresher().refresh_token.call_count) - - def _test_upload_download_prepare_notification(self): - request = unit_test_utils.get_fake_request() - self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) - output = self.controller.download(request, unit_test_utils.UUID2) - output_log = self.notifier.get_logs() - prepare_payload = output['meta'].copy() - prepare_payload['checksum'] = None - prepare_payload['size'] = None - prepare_payload['virtual_size'] = None - prepare_payload['location'] = None - prepare_payload['status'] = 'queued' - del prepare_payload['updated_at'] - prepare_log = { - 'notification_type': "INFO", - 'event_type': "image.prepare", - 'payload': prepare_payload, - } - self.assertEqual(3, len(output_log)) - prepare_updated_at = output_log[0]['payload']['updated_at'] - del output_log[0]['payload']['updated_at'] - self.assertLessEqual(prepare_updated_at, output['meta']['updated_at']) - self.assertEqual(prepare_log, output_log[0]) - - def _test_upload_download_upload_notification(self): - request = unit_test_utils.get_fake_request() - self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) - output = self.controller.download(request, unit_test_utils.UUID2) - output_log = self.notifier.get_logs() - upload_payload = output['meta'].copy() - upload_log = { - 'notification_type': "INFO", - 'event_type': "image.upload", - 'payload': upload_payload, - } - self.assertEqual(3, len(output_log)) - self.assertEqual(upload_log, output_log[1]) - - def _test_upload_download_activate_notification(self): - request = unit_test_utils.get_fake_request() - self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) - output = self.controller.download(request, unit_test_utils.UUID2) - output_log = self.notifier.get_logs() - activate_payload = output['meta'].copy() - activate_log = { - 'notification_type': "INFO", - 'event_type': "image.activate", - 'payload': activate_payload, - } - self.assertEqual(3, len(output_log)) - self.assertEqual(activate_log, output_log[2]) - - def test_restore_image_when_upload_failed(self): - request = unit_test_utils.get_fake_request() - image = FakeImage('fake') - image.set_data = Raise(glance_store.StorageWriteDenied) - self.image_repo.result = image - self.assertRaises(webob.exc.HTTPServiceUnavailable, - self.controller.upload, - request, unit_test_utils.UUID2, 'ZZZ', 3) - self.assertEqual('queued', self.image_repo.saved_image.status) - - -class TestImageDataDeserializer(test_utils.BaseTestCase): - - def setUp(self): - super(TestImageDataDeserializer, self).setUp() - self.deserializer = glance.api.v2.image_data.RequestDeserializer() - - def test_upload(self): - request = unit_test_utils.get_fake_request() - request.headers['Content-Type'] = 'application/octet-stream' - request.body = b'YYY' - request.headers['Content-Length'] = 3 - output = self.deserializer.upload(request) - data = output.pop('data') - self.assertEqual(b'YYY', data.read()) - expected = {'size': 3} - self.assertEqual(expected, output) - - def test_upload_chunked(self): - request = unit_test_utils.get_fake_request() - request.headers['Content-Type'] = 'application/octet-stream' - # If we use body_file, webob assumes we want to do a chunked upload, - # ignoring the Content-Length header - request.body_file = six.StringIO('YYY') - output = self.deserializer.upload(request) - data = output.pop('data') - self.assertEqual('YYY', data.read()) - expected = {'size': None} - self.assertEqual(expected, output) - - def test_upload_chunked_with_content_length(self): - request = unit_test_utils.get_fake_request() - request.headers['Content-Type'] = 'application/octet-stream' - request.body_file = six.BytesIO(b'YYY') - # The deserializer shouldn't care if the Content-Length is - # set when the user is attempting to send chunked data. - request.headers['Content-Length'] = 3 - output = self.deserializer.upload(request) - data = output.pop('data') - self.assertEqual(b'YYY', data.read()) - expected = {'size': 3} - self.assertEqual(expected, output) - - def test_upload_with_incorrect_content_length(self): - request = unit_test_utils.get_fake_request() - request.headers['Content-Type'] = 'application/octet-stream' - # The deserializer shouldn't care if the Content-Length and - # actual request body length differ. That job is left up - # to the controller - request.body = b'YYY' - request.headers['Content-Length'] = 4 - output = self.deserializer.upload(request) - data = output.pop('data') - self.assertEqual(b'YYY', data.read()) - expected = {'size': 4} - self.assertEqual(expected, output) - - def test_upload_wrong_content_type(self): - request = unit_test_utils.get_fake_request() - request.headers['Content-Type'] = 'application/json' - request.body = b'YYYYY' - self.assertRaises(webob.exc.HTTPUnsupportedMediaType, - self.deserializer.upload, request) - - request = unit_test_utils.get_fake_request() - request.headers['Content-Type'] = 'application/octet-st' - request.body = b'YYYYY' - self.assertRaises(webob.exc.HTTPUnsupportedMediaType, - self.deserializer.upload, request) - - -class TestImageDataSerializer(test_utils.BaseTestCase): - - def setUp(self): - super(TestImageDataSerializer, self).setUp() - self.serializer = glance.api.v2.image_data.ResponseSerializer() - - def test_download(self): - request = wsgi.Request.blank('/') - request.environ = {} - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) - self.serializer.download(response, image) - self.assertEqual(b'ZZZ', response.body) - self.assertEqual('3', response.headers['Content-Length']) - self.assertNotIn('Content-MD5', response.headers) - self.assertEqual('application/octet-stream', - response.headers['Content-Type']) - - def test_range_requests_for_image_downloads(self): - """ - Test partial download 'Range' requests for images (random image access) - """ - def download_successful_Range(d_range): - request = wsgi.Request.blank('/') - request.environ = {} - request.headers['Range'] = d_range - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=[b'X', b'Y', b'Z']) - self.serializer.download(response, image) - self.assertEqual(206, response.status_code) - self.assertEqual('2', response.headers['Content-Length']) - self.assertEqual('bytes 1-2/3', response.headers['Content-Range']) - self.assertEqual(b'YZ', response.body) - - download_successful_Range('bytes=1-2') - download_successful_Range('bytes=1-') - download_successful_Range('bytes=1-3') - download_successful_Range('bytes=-2') - download_successful_Range('bytes=1-100') - - def full_image_download_w_range(d_range): - request = wsgi.Request.blank('/') - request.environ = {} - request.headers['Range'] = d_range - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=[b'X', b'Y', b'Z']) - self.serializer.download(response, image) - self.assertEqual(206, response.status_code) - self.assertEqual('3', response.headers['Content-Length']) - self.assertEqual('bytes 0-2/3', response.headers['Content-Range']) - self.assertEqual(b'XYZ', response.body) - - full_image_download_w_range('bytes=0-') - full_image_download_w_range('bytes=0-2') - full_image_download_w_range('bytes=0-3') - full_image_download_w_range('bytes=-3') - full_image_download_w_range('bytes=-4') - full_image_download_w_range('bytes=0-100') - full_image_download_w_range('bytes=-100') - - def download_failures_Range(d_range): - request = wsgi.Request.blank('/') - request.environ = {} - request.headers['Range'] = d_range - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) - self.assertRaises(webob.exc.HTTPRequestRangeNotSatisfiable, - self.serializer.download, - response, image) - return - - download_failures_Range('bytes=4-1') - download_failures_Range('bytes=4-') - download_failures_Range('bytes=3-') - download_failures_Range('bytes=1') - download_failures_Range('bytes=100') - download_failures_Range('bytes=100-') - download_failures_Range('bytes=') - - def test_multi_range_requests_raises_bad_request_error(self): - request = wsgi.Request.blank('/') - request.environ = {} - request.headers['Range'] = 'bytes=0-0,-1' - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) - self.assertRaises(webob.exc.HTTPBadRequest, - self.serializer.download, - response, image) - - def test_download_failure_with_valid_range(self): - with mock.patch.object(glance.api.policy.ImageProxy, - 'get_data') as mock_get_data: - mock_get_data.side_effect = glance_store.NotFound(image="image") - request = wsgi.Request.blank('/') - request.environ = {} - request.headers['Range'] = 'bytes=1-2' - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) - image.get_data = mock_get_data - self.assertRaises(webob.exc.HTTPNoContent, - self.serializer.download, - response, image) - - def test_content_range_requests_for_image_downloads(self): - """ - Even though Content-Range is incorrect on requests, we support it - for backward compatibility with clients written for pre-Pike - Glance. - The following test is for 'Content-Range' requests, which we have - to ensure that we prevent regression. - """ - def download_successful_ContentRange(d_range): - request = wsgi.Request.blank('/') - request.environ = {} - request.headers['Content-Range'] = d_range - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=[b'X', b'Y', b'Z']) - self.serializer.download(response, image) - self.assertEqual(206, response.status_code) - self.assertEqual('2', response.headers['Content-Length']) - self.assertEqual('bytes 1-2/3', response.headers['Content-Range']) - self.assertEqual(b'YZ', response.body) - - download_successful_ContentRange('bytes 1-2/3') - download_successful_ContentRange('bytes 1-2/*') - - def download_failures_ContentRange(d_range): - request = wsgi.Request.blank('/') - request.environ = {} - request.headers['Content-Range'] = d_range - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) - self.assertRaises(webob.exc.HTTPRequestRangeNotSatisfiable, - self.serializer.download, - response, image) - return - - download_failures_ContentRange('bytes -3/3') - download_failures_ContentRange('bytes 1-/3') - download_failures_ContentRange('bytes 1-3/3') - download_failures_ContentRange('bytes 1-4/3') - download_failures_ContentRange('bytes 1-4/*') - download_failures_ContentRange('bytes 4-1/3') - download_failures_ContentRange('bytes 4-1/*') - download_failures_ContentRange('bytes 4-8/*') - download_failures_ContentRange('bytes 4-8/10') - download_failures_ContentRange('bytes 4-8/3') - - def test_download_failure_with_valid_content_range(self): - with mock.patch.object(glance.api.policy.ImageProxy, - 'get_data') as mock_get_data: - mock_get_data.side_effect = glance_store.NotFound(image="image") - request = wsgi.Request.blank('/') - request.environ = {} - request.headers['Content-Range'] = 'bytes %s-%s/3' % (1, 2) - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) - image.get_data = mock_get_data - self.assertRaises(webob.exc.HTTPNoContent, - self.serializer.download, - response, image) - - def test_download_with_checksum(self): - request = wsgi.Request.blank('/') - request.environ = {} - response = webob.Response() - response.request = request - checksum = '0745064918b49693cca64d6b6a13d28a' - image = FakeImage(size=3, checksum=checksum, data=[b'Z', b'Z', b'Z']) - self.serializer.download(response, image) - self.assertEqual(b'ZZZ', response.body) - self.assertEqual('3', response.headers['Content-Length']) - self.assertEqual(checksum, response.headers['Content-MD5']) - self.assertEqual('application/octet-stream', - response.headers['Content-Type']) - - def test_download_forbidden(self): - """Make sure the serializer can return 403 forbidden error instead of - 500 internal server error. - """ - def get_data(*args, **kwargs): - raise exception.Forbidden() - - self.stubs.Set(glance.api.policy.ImageProxy, - 'get_data', - get_data) - request = wsgi.Request.blank('/') - request.environ = {} - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=iter('ZZZ')) - image.get_data = get_data - self.assertRaises(webob.exc.HTTPForbidden, - self.serializer.download, - response, image) - - def test_download_no_content(self): - """Test image download returns HTTPNoContent - - Make sure that serializer returns 204 no content error in case of - image data is not available at specified location. - """ - with mock.patch.object(glance.api.policy.ImageProxy, - 'get_data') as mock_get_data: - mock_get_data.side_effect = glance_store.NotFound(image="image") - - request = wsgi.Request.blank('/') - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=iter('ZZZ')) - image.get_data = mock_get_data - self.assertRaises(webob.exc.HTTPNoContent, - self.serializer.download, - response, image) - - def test_download_service_unavailable(self): - """Test image download returns HTTPServiceUnavailable.""" - with mock.patch.object(glance.api.policy.ImageProxy, - 'get_data') as mock_get_data: - mock_get_data.side_effect = glance_store.RemoteServiceUnavailable() - - request = wsgi.Request.blank('/') - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=iter('ZZZ')) - image.get_data = mock_get_data - self.assertRaises(webob.exc.HTTPServiceUnavailable, - self.serializer.download, - response, image) - - def test_download_store_get_not_support(self): - """Test image download returns HTTPBadRequest. - - Make sure that serializer returns 400 bad request error in case of - getting images from this store is not supported at specified location. - """ - with mock.patch.object(glance.api.policy.ImageProxy, - 'get_data') as mock_get_data: - mock_get_data.side_effect = glance_store.StoreGetNotSupported() - - request = wsgi.Request.blank('/') - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=iter('ZZZ')) - image.get_data = mock_get_data - self.assertRaises(webob.exc.HTTPBadRequest, - self.serializer.download, - response, image) - - def test_download_store_random_get_not_support(self): - """Test image download returns HTTPBadRequest. - - Make sure that serializer returns 400 bad request error in case of - getting randomly images from this store is not supported at - specified location. - """ - with mock.patch.object(glance.api.policy.ImageProxy, - 'get_data') as m_get_data: - err = glance_store.StoreRandomGetNotSupported(offset=0, - chunk_size=0) - m_get_data.side_effect = err - - request = wsgi.Request.blank('/') - response = webob.Response() - response.request = request - image = FakeImage(size=3, data=iter('ZZZ')) - image.get_data = m_get_data - self.assertRaises(webob.exc.HTTPBadRequest, - self.serializer.download, - response, image) - - def test_upload(self): - request = webob.Request.blank('/') - request.environ = {} - response = webob.Response() - response.request = request - self.serializer.upload(response, {}) - self.assertEqual(http.NO_CONTENT, response.status_int) - self.assertEqual('0', response.headers['Content-Length']) diff --git a/glance/tests/unit/v2/test_image_members_resource.py b/glance/tests/unit/v2/test_image_members_resource.py deleted file mode 100644 index 6a4a12cf..00000000 --- a/glance/tests/unit/v2/test_image_members_resource.py +++ /dev/null @@ -1,572 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import glance_store -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves import http_client as http -import webob - -import glance.api.v2.image_members -import glance.tests.unit.utils as unit_test_utils -import glance.tests.utils as test_utils - -DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) -ISOTIME = '2012-05-16T15:27:36Z' - - -CONF = cfg.CONF - -BASE_URI = unit_test_utils.BASE_URI - - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' -UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' -UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' -UUID5 = '3eee7cc2-eae7-4c0f-b50d-a7160b0c62ed' - -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' -TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' -TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' - - -def _db_fixture(id, **kwargs): - obj = { - 'id': id, - 'name': None, - 'visibility': 'shared', - 'properties': {}, - 'checksum': None, - 'owner': None, - 'status': 'queued', - 'tags': [], - 'size': None, - 'locations': [], - 'protected': False, - 'disk_format': None, - 'container_format': None, - 'deleted': False, - 'min_ram': None, - 'min_disk': None, - } - obj.update(kwargs) - return obj - - -def _db_image_member_fixture(image_id, member_id, **kwargs): - obj = { - 'image_id': image_id, - 'member': member_id, - 'status': 'pending', - } - obj.update(kwargs) - return obj - - -def _domain_fixture(id, **kwargs): - properties = { - 'id': id, - } - properties.update(kwargs) - return glance.domain.ImageMembership(**properties) - - -class TestImageMembersController(test_utils.BaseTestCase): - - def setUp(self): - super(TestImageMembersController, self).setUp() - self.db = unit_test_utils.FakeDB(initialize=False) - self.store = unit_test_utils.FakeStoreAPI() - self.policy = unit_test_utils.FakePolicyEnforcer() - self.notifier = unit_test_utils.FakeNotifier() - self._create_images() - self._create_image_members() - self.controller = glance.api.v2.image_members.ImageMembersController( - self.db, - self.policy, - self.notifier, - self.store) - glance_store.register_opts(CONF) - - self.config(default_store='filesystem', - filesystem_store_datadir=self.test_dir, - group="glance_store") - - glance_store.create_stores() - - def _create_images(self): - self.images = [ - _db_fixture(UUID1, owner=TENANT1, name='1', size=256, - visibility='public', - locations=[{'url': '%s/%s' % (BASE_URI, UUID1), - 'metadata': {}, 'status': 'active'}]), - _db_fixture(UUID2, owner=TENANT1, name='2', size=512), - _db_fixture(UUID3, owner=TENANT3, name='3', size=512), - _db_fixture(UUID4, owner=TENANT4, name='4', size=1024), - _db_fixture(UUID5, owner=TENANT1, name='5', size=1024), - ] - [self.db.image_create(None, image) for image in self.images] - - self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) - - def _create_image_members(self): - self.image_members = [ - _db_image_member_fixture(UUID2, TENANT4), - _db_image_member_fixture(UUID3, TENANT4), - _db_image_member_fixture(UUID3, TENANT2), - _db_image_member_fixture(UUID4, TENANT1), - ] - [self.db.image_member_create(None, image_member) - for image_member in self.image_members] - - def test_index(self): - request = unit_test_utils.get_fake_request() - output = self.controller.index(request, UUID2) - self.assertEqual(1, len(output['members'])) - actual = set([image_member.member_id - for image_member in output['members']]) - expected = set([TENANT4]) - self.assertEqual(expected, actual) - - def test_index_no_members(self): - request = unit_test_utils.get_fake_request() - output = self.controller.index(request, UUID5) - self.assertEqual(0, len(output['members'])) - self.assertEqual({'members': []}, output) - - def test_index_member_view(self): - # UUID3 is a shared image owned by TENANT3 - # UUID3 has members TENANT2 and TENANT4 - # When TENANT4 lists members for UUID3, should not see TENANT2 - request = unit_test_utils.get_fake_request(tenant=TENANT4) - output = self.controller.index(request, UUID3) - self.assertEqual(1, len(output['members'])) - actual = set([image_member.member_id - for image_member in output['members']]) - expected = set([TENANT4]) - self.assertEqual(expected, actual) - - def test_index_private_image(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.index, - request, UUID5) - - def test_index_public_image(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.index, - request, UUID1) - - def test_index_private_image_visible_members_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - output = self.controller.index(request, UUID4) - self.assertEqual(1, len(output['members'])) - actual = set([image_member.member_id - for image_member in output['members']]) - expected = set([TENANT1]) - self.assertEqual(expected, actual) - - def test_index_allowed_by_get_members_policy(self): - rules = {"get_members": True} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - output = self.controller.index(request, UUID2) - self.assertEqual(1, len(output['members'])) - - def test_index_forbidden_by_get_members_policy(self): - rules = {"get_members": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.index, - request, image_id=UUID2) - - def test_show(self): - request = unit_test_utils.get_fake_request(tenant=TENANT1) - output = self.controller.show(request, UUID2, TENANT4) - expected = self.image_members[0] - self.assertEqual(expected['image_id'], output.image_id) - self.assertEqual(expected['member'], output.member_id) - self.assertEqual(expected['status'], output.status) - - def test_show_by_member(self): - request = unit_test_utils.get_fake_request(tenant=TENANT4) - output = self.controller.show(request, UUID2, TENANT4) - expected = self.image_members[0] - self.assertEqual(expected['image_id'], output.image_id) - self.assertEqual(expected['member'], output.member_id) - self.assertEqual(expected['status'], output.status) - - def test_show_forbidden(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, - request, UUID2, TENANT4) - - def test_show_not_found(self): - # one member should not be able to view status of another member - # of the same image - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, - request, UUID3, TENANT4) - - def test_create(self): - request = unit_test_utils.get_fake_request() - image_id = UUID2 - member_id = TENANT3 - output = self.controller.create(request, image_id=image_id, - member_id=member_id) - self.assertEqual(UUID2, output.image_id) - self.assertEqual(TENANT3, output.member_id) - - def test_create_allowed_by_add_policy(self): - rules = {"add_member": True} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - output = self.controller.create(request, image_id=UUID2, - member_id=TENANT3) - self.assertEqual(UUID2, output.image_id) - self.assertEqual(TENANT3, output.member_id) - - def test_create_forbidden_by_add_policy(self): - rules = {"add_member": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, - request, image_id=UUID2, member_id=TENANT3) - - def test_create_duplicate_member(self): - request = unit_test_utils.get_fake_request() - image_id = UUID2 - member_id = TENANT3 - output = self.controller.create(request, image_id=image_id, - member_id=member_id) - self.assertEqual(UUID2, output.image_id) - self.assertEqual(TENANT3, output.member_id) - - self.assertRaises(webob.exc.HTTPConflict, self.controller.create, - request, image_id=image_id, member_id=member_id) - - def test_create_overlimit(self): - self.config(image_member_quota=0) - request = unit_test_utils.get_fake_request() - image_id = UUID2 - member_id = TENANT3 - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, request, - image_id=image_id, member_id=member_id) - - def test_create_unlimited(self): - self.config(image_member_quota=-1) - request = unit_test_utils.get_fake_request() - image_id = UUID2 - member_id = TENANT3 - output = self.controller.create(request, image_id=image_id, - member_id=member_id) - self.assertEqual(UUID2, output.image_id) - self.assertEqual(TENANT3, output.member_id) - - def test_update_done_by_member(self): - request = unit_test_utils.get_fake_request(tenant=TENANT4) - image_id = UUID2 - member_id = TENANT4 - output = self.controller.update(request, image_id=image_id, - member_id=member_id, - status='accepted') - self.assertEqual(UUID2, output.image_id) - self.assertEqual(TENANT4, output.member_id) - self.assertEqual('accepted', output.status) - - def test_update_done_by_member_forbidden_by_policy(self): - rules = {"modify_member": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request(tenant=TENANT4) - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - request, image_id=UUID2, member_id=TENANT4, - status='accepted') - - def test_update_done_by_member_allowed_by_policy(self): - rules = {"modify_member": True} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request(tenant=TENANT4) - output = self.controller.update(request, image_id=UUID2, - member_id=TENANT4, - status='accepted') - self.assertEqual(UUID2, output.image_id) - self.assertEqual(TENANT4, output.member_id) - self.assertEqual('accepted', output.status) - - def test_update_done_by_owner(self): - request = unit_test_utils.get_fake_request(tenant=TENANT1) - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - request, UUID2, TENANT4, status='accepted') - - def test_update_non_existent_image(self): - request = unit_test_utils.get_fake_request(tenant=TENANT1) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, - request, '123', TENANT4, status='accepted') - - def test_update_invalid_status(self): - request = unit_test_utils.get_fake_request(tenant=TENANT4) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - request, UUID2, TENANT4, status='accept') - - def test_create_private_image(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, - request, UUID4, TENANT2) - - def test_create_public_image(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, - request, UUID1, TENANT2) - - def test_create_image_does_not_exist(self): - request = unit_test_utils.get_fake_request() - image_id = 'fake-image-id' - member_id = TENANT3 - self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, - request, image_id=image_id, member_id=member_id) - - def test_delete(self): - request = unit_test_utils.get_fake_request() - member_id = TENANT4 - image_id = UUID2 - res = self.controller.delete(request, image_id, member_id) - self.assertEqual(b'', res.body) - self.assertEqual(http.NO_CONTENT, res.status_code) - found_member = self.db.image_member_find( - request.context, image_id=image_id, member=member_id) - self.assertEqual([], found_member) - - def test_delete_by_member(self): - request = unit_test_utils.get_fake_request(tenant=TENANT4) - self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, - request, UUID2, TENANT4) - request = unit_test_utils.get_fake_request() - output = self.controller.index(request, UUID2) - self.assertEqual(1, len(output['members'])) - actual = set([image_member.member_id - for image_member in output['members']]) - expected = set([TENANT4]) - self.assertEqual(expected, actual) - - def test_delete_allowed_by_policies(self): - rules = {"get_member": True, "delete_member": True} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request(tenant=TENANT1) - output = self.controller.delete(request, image_id=UUID2, - member_id=TENANT4) - request = unit_test_utils.get_fake_request() - output = self.controller.index(request, UUID2) - self.assertEqual(0, len(output['members'])) - - def test_delete_forbidden_by_get_member_policy(self): - rules = {"get_member": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request(tenant=TENANT1) - self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, - request, UUID2, TENANT4) - - def test_delete_forbidden_by_delete_member_policy(self): - rules = {"delete_member": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request(tenant=TENANT1) - self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, - request, UUID2, TENANT4) - - def test_delete_private_image(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, - request, UUID4, TENANT1) - - def test_delete_public_image(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, - request, UUID1, TENANT1) - - def test_delete_image_does_not_exist(self): - request = unit_test_utils.get_fake_request() - member_id = TENANT2 - image_id = 'fake-image-id' - self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, - request, image_id, member_id) - - def test_delete_member_does_not_exist(self): - request = unit_test_utils.get_fake_request() - member_id = 'fake-member-id' - image_id = UUID2 - found_member = self.db.image_member_find( - request.context, image_id=image_id, member=member_id) - self.assertEqual([], found_member) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, - request, image_id, member_id) - - -class TestImageMembersSerializer(test_utils.BaseTestCase): - - def setUp(self): - super(TestImageMembersSerializer, self).setUp() - self.serializer = glance.api.v2.image_members.ResponseSerializer() - self.fixtures = [ - _domain_fixture(id='1', image_id=UUID2, member_id=TENANT1, - status='accepted', - created_at=DATETIME, updated_at=DATETIME), - _domain_fixture(id='2', image_id=UUID2, member_id=TENANT2, - status='pending', - created_at=DATETIME, updated_at=DATETIME), - ] - - def test_index(self): - expected = { - 'members': [ - { - 'image_id': UUID2, - 'member_id': TENANT1, - 'status': 'accepted', - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'schema': '/v2/schemas/member', - }, - { - 'image_id': UUID2, - 'member_id': TENANT2, - 'status': 'pending', - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'schema': '/v2/schemas/member', - }, - ], - 'schema': '/v2/schemas/members', - } - request = webob.Request.blank('/v2/images/%s/members' % UUID2) - response = webob.Response(request=request) - result = {'members': self.fixtures} - self.serializer.index(response, result) - actual = jsonutils.loads(response.body) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - def test_show(self): - expected = { - 'image_id': UUID2, - 'member_id': TENANT1, - 'status': 'accepted', - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'schema': '/v2/schemas/member', - } - request = webob.Request.blank('/v2/images/%s/members/%s' - % (UUID2, TENANT1)) - response = webob.Response(request=request) - result = self.fixtures[0] - self.serializer.show(response, result) - actual = jsonutils.loads(response.body) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - def test_create(self): - expected = {'image_id': UUID2, - 'member_id': TENANT1, - 'status': 'accepted', - 'schema': '/v2/schemas/member', - 'created_at': ISOTIME, - 'updated_at': ISOTIME} - request = webob.Request.blank('/v2/images/%s/members/%s' - % (UUID2, TENANT1)) - response = webob.Response(request=request) - result = self.fixtures[0] - self.serializer.create(response, result) - actual = jsonutils.loads(response.body) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - def test_update(self): - expected = {'image_id': UUID2, - 'member_id': TENANT1, - 'status': 'accepted', - 'schema': '/v2/schemas/member', - 'created_at': ISOTIME, - 'updated_at': ISOTIME} - request = webob.Request.blank('/v2/images/%s/members/%s' - % (UUID2, TENANT1)) - response = webob.Response(request=request) - result = self.fixtures[0] - self.serializer.update(response, result) - actual = jsonutils.loads(response.body) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - -class TestImagesDeserializer(test_utils.BaseTestCase): - - def setUp(self): - super(TestImagesDeserializer, self).setUp() - self.deserializer = glance.api.v2.image_members.RequestDeserializer() - - def test_create(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({'member': TENANT1}) - output = self.deserializer.create(request) - expected = {'member_id': TENANT1} - self.assertEqual(expected, output) - - def test_create_invalid(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({'mem': TENANT1}) - self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, - request) - - def test_create_no_body(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, - request) - - def test_create_member_empty(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({'member': ''}) - self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, - request) - - def test_create_list_return_error(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes([TENANT1]) - self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, - request) - - def test_update_list_return_error(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes([TENANT1]) - self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, - request) - - def test_update(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({'status': 'accepted'}) - output = self.deserializer.update(request) - expected = {'status': 'accepted'} - self.assertEqual(expected, output) - - def test_update_invalid(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({'mem': TENANT1}) - self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, - request) - - def test_update_no_body(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, - request) diff --git a/glance/tests/unit/v2/test_image_tags_resource.py b/glance/tests/unit/v2/test_image_tags_resource.py deleted file mode 100644 index 76841975..00000000 --- a/glance/tests/unit/v2/test_image_tags_resource.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from six.moves import http_client as http -import webob - -import glance.api.v2.image_tags -from glance.common import exception -from glance.tests.unit import base -import glance.tests.unit.utils as unit_test_utils -import glance.tests.unit.v2.test_image_data_resource as image_data_tests -import glance.tests.utils as test_utils - - -class TestImageTagsController(base.IsolatedUnitTest): - - def setUp(self): - super(TestImageTagsController, self).setUp() - self.db = unit_test_utils.FakeDB() - self.controller = glance.api.v2.image_tags.Controller(self.db) - - def test_create_tag(self): - request = unit_test_utils.get_fake_request() - self.controller.update(request, unit_test_utils.UUID1, 'dink') - context = request.context - tags = self.db.image_tag_get_all(context, unit_test_utils.UUID1) - self.assertEqual(1, len([tag for tag in tags if tag == 'dink'])) - - def test_create_too_many_tags(self): - self.config(image_tag_quota=0) - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.update, - request, unit_test_utils.UUID1, 'dink') - - def test_create_duplicate_tag_ignored(self): - request = unit_test_utils.get_fake_request() - self.controller.update(request, unit_test_utils.UUID1, 'dink') - self.controller.update(request, unit_test_utils.UUID1, 'dink') - context = request.context - tags = self.db.image_tag_get_all(context, unit_test_utils.UUID1) - self.assertEqual(1, len([tag for tag in tags if tag == 'dink'])) - - def test_update_tag_of_non_existing_image(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, - request, "abcd", "dink") - - def test_delete_tag_forbidden(self): - def fake_get(self): - raise exception.Forbidden() - - image_repo = image_data_tests.FakeImageRepo() - image_repo.get = fake_get - - def get_fake_repo(self): - return image_repo - - self.controller.gateway.get_repo = get_fake_repo - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - request, unit_test_utils.UUID1, "ping") - - def test_delete_tag(self): - request = unit_test_utils.get_fake_request() - self.controller.delete(request, unit_test_utils.UUID1, 'ping') - - def test_delete_tag_not_found(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, - request, unit_test_utils.UUID1, 'what') - - def test_delete_tag_of_non_existing_image(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, - request, "abcd", "dink") - - -class TestImagesSerializer(test_utils.BaseTestCase): - - def setUp(self): - super(TestImagesSerializer, self).setUp() - self.serializer = glance.api.v2.image_tags.ResponseSerializer() - - def test_create_tag(self): - response = webob.Response() - self.serializer.update(response, None) - self.assertEqual(http.NO_CONTENT, response.status_int) - - def test_delete_tag(self): - response = webob.Response() - self.serializer.delete(response, None) - self.assertEqual(http.NO_CONTENT, response.status_int) diff --git a/glance/tests/unit/v2/test_images_resource.py b/glance/tests/unit/v2/test_images_resource.py deleted file mode 100644 index 60fdfd22..00000000 --- a/glance/tests/unit/v2/test_images_resource.py +++ /dev/null @@ -1,3920 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -import glance_store as store -import mock -from oslo_serialization import jsonutils -import six -from six.moves import http_client as http -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range -import testtools -import webob - -import glance.api.v2.image_actions -import glance.api.v2.images -from glance.common import exception -from glance import domain -import glance.schema -from glance.tests.unit import base -import glance.tests.unit.utils as unit_test_utils -import glance.tests.utils as test_utils - -DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) -ISOTIME = '2012-05-16T15:27:36Z' - - -BASE_URI = unit_test_utils.BASE_URI - - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' -UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' -UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' - -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' -TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' -TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' - -CHKSUM = '93264c3edf5972c9f1cb309543d38a5c' -CHKSUM1 = '43254c3edf6972c9f1cb309543d38a8c' - - -def _db_fixture(id, **kwargs): - obj = { - 'id': id, - 'name': None, - 'visibility': 'shared', - 'properties': {}, - 'checksum': None, - 'owner': None, - 'status': 'queued', - 'tags': [], - 'size': None, - 'virtual_size': None, - 'locations': [], - 'protected': False, - 'disk_format': None, - 'container_format': None, - 'deleted': False, - 'min_ram': None, - 'min_disk': None, - } - obj.update(kwargs) - return obj - - -def _domain_fixture(id, **kwargs): - properties = { - 'image_id': id, - 'name': None, - 'visibility': 'private', - 'checksum': None, - 'owner': None, - 'status': 'queued', - 'size': None, - 'virtual_size': None, - 'locations': [], - 'protected': False, - 'disk_format': None, - 'container_format': None, - 'min_ram': None, - 'min_disk': None, - 'tags': [], - } - properties.update(kwargs) - return glance.domain.Image(**properties) - - -def _db_image_member_fixture(image_id, member_id, **kwargs): - obj = { - 'image_id': image_id, - 'member': member_id, - } - obj.update(kwargs) - return obj - - -class TestImagesController(base.IsolatedUnitTest): - - def setUp(self): - super(TestImagesController, self).setUp() - self.db = unit_test_utils.FakeDB(initialize=False) - self.policy = unit_test_utils.FakePolicyEnforcer() - self.notifier = unit_test_utils.FakeNotifier() - self.store = unit_test_utils.FakeStoreAPI() - for i in range(1, 4): - self.store.data['%s/fake_location_%i' % (BASE_URI, i)] = ('Z', 1) - self.store_utils = unit_test_utils.FakeStoreUtils(self.store) - self._create_images() - self._create_image_members() - self.controller = glance.api.v2.images.ImagesController(self.db, - self.policy, - self.notifier, - self.store) - self.action_controller = (glance.api.v2.image_actions. - ImageActionsController(self.db, - self.policy, - self.notifier, - self.store)) - self.controller.gateway.store_utils = self.store_utils - store.create_stores() - - def _create_images(self): - self.images = [ - _db_fixture(UUID1, owner=TENANT1, checksum=CHKSUM, - name='1', size=256, virtual_size=1024, - visibility='public', - locations=[{'url': '%s/%s' % (BASE_URI, UUID1), - 'metadata': {}, 'status': 'active'}], - disk_format='raw', - container_format='bare', - status='active'), - _db_fixture(UUID2, owner=TENANT1, checksum=CHKSUM1, - name='2', size=512, virtual_size=2048, - visibility='public', - disk_format='raw', - container_format='bare', - status='active', - tags=['redhat', '64bit', 'power'], - properties={'hypervisor_type': 'kvm', 'foo': 'bar', - 'bar': 'foo'}), - _db_fixture(UUID3, owner=TENANT3, checksum=CHKSUM1, - name='3', size=512, virtual_size=2048, - visibility='public', tags=['windows', '64bit', 'x86']), - _db_fixture(UUID4, owner=TENANT4, name='4', - size=1024, virtual_size=3072), - ] - [self.db.image_create(None, image) for image in self.images] - - self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) - - def _create_image_members(self): - self.image_members = [ - _db_image_member_fixture(UUID4, TENANT2), - _db_image_member_fixture(UUID4, TENANT3, - status='accepted'), - ] - [self.db.image_member_create(None, image_member) - for image_member in self.image_members] - - def test_index(self): - self.config(limit_param_default=1, api_limit_max=3) - request = unit_test_utils.get_fake_request() - output = self.controller.index(request) - self.assertEqual(1, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID3]) - self.assertEqual(expected, actual) - - def test_index_member_status_accepted(self): - self.config(limit_param_default=5, api_limit_max=5) - request = unit_test_utils.get_fake_request(tenant=TENANT2) - output = self.controller.index(request) - self.assertEqual(3, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID1, UUID2, UUID3]) - # can see only the public image - self.assertEqual(expected, actual) - - request = unit_test_utils.get_fake_request(tenant=TENANT3) - output = self.controller.index(request) - self.assertEqual(4, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID1, UUID2, UUID3, UUID4]) - self.assertEqual(expected, actual) - - def test_index_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - output = self.controller.index(request) - self.assertEqual(4, len(output['images'])) - - def test_index_admin_deleted_images_hidden(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.controller.delete(request, UUID1) - output = self.controller.index(request) - self.assertEqual(3, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID2, UUID3, UUID4]) - self.assertEqual(expected, actual) - - def test_index_return_parameters(self): - self.config(limit_param_default=1, api_limit_max=3) - request = unit_test_utils.get_fake_request() - output = self.controller.index(request, marker=UUID3, limit=1, - sort_key=['created_at'], - sort_dir=['desc']) - self.assertEqual(1, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID2]) - self.assertEqual(actual, expected) - self.assertEqual(UUID2, output['next_marker']) - - def test_index_next_marker(self): - self.config(limit_param_default=1, api_limit_max=3) - request = unit_test_utils.get_fake_request() - output = self.controller.index(request, marker=UUID3, limit=2) - self.assertEqual(2, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID2, UUID1]) - self.assertEqual(expected, actual) - self.assertEqual(UUID1, output['next_marker']) - - def test_index_no_next_marker(self): - self.config(limit_param_default=1, api_limit_max=3) - request = unit_test_utils.get_fake_request() - output = self.controller.index(request, marker=UUID1, limit=2) - self.assertEqual(0, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([]) - self.assertEqual(expected, actual) - self.assertNotIn('next_marker', output) - - def test_index_with_id_filter(self): - request = unit_test_utils.get_fake_request('/images?id=%s' % UUID1) - output = self.controller.index(request, filters={'id': UUID1}) - self.assertEqual(1, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID1]) - self.assertEqual(expected, actual) - - def test_index_with_checksum_filter_single_image(self): - req = unit_test_utils.get_fake_request('/images?checksum=%s' % CHKSUM) - output = self.controller.index(req, filters={'checksum': CHKSUM}) - self.assertEqual(1, len(output['images'])) - actual = list([image.image_id for image in output['images']]) - expected = [UUID1] - self.assertEqual(expected, actual) - - def test_index_with_checksum_filter_multiple_images(self): - req = unit_test_utils.get_fake_request('/images?checksum=%s' % CHKSUM1) - output = self.controller.index(req, filters={'checksum': CHKSUM1}) - self.assertEqual(2, len(output['images'])) - actual = list([image.image_id for image in output['images']]) - expected = [UUID3, UUID2] - self.assertEqual(expected, actual) - - def test_index_with_non_existent_checksum(self): - req = unit_test_utils.get_fake_request('/images?checksum=236231827') - output = self.controller.index(req, filters={'checksum': '236231827'}) - self.assertEqual(0, len(output['images'])) - - def test_index_size_max_filter(self): - request = unit_test_utils.get_fake_request('/images?size_max=512') - output = self.controller.index(request, filters={'size_max': 512}) - self.assertEqual(3, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID1, UUID2, UUID3]) - self.assertEqual(expected, actual) - - def test_index_size_min_filter(self): - request = unit_test_utils.get_fake_request('/images?size_min=512') - output = self.controller.index(request, filters={'size_min': 512}) - self.assertEqual(2, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID2, UUID3]) - self.assertEqual(expected, actual) - - def test_index_size_range_filter(self): - path = '/images?size_min=512&size_max=512' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, - filters={'size_min': 512, - 'size_max': 512}) - self.assertEqual(2, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID2, UUID3]) - self.assertEqual(expected, actual) - - def test_index_virtual_size_max_filter(self): - ref = '/images?virtual_size_max=2048' - request = unit_test_utils.get_fake_request(ref) - output = self.controller.index(request, - filters={'virtual_size_max': 2048}) - self.assertEqual(3, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID1, UUID2, UUID3]) - self.assertEqual(expected, actual) - - def test_index_virtual_size_min_filter(self): - ref = '/images?virtual_size_min=2048' - request = unit_test_utils.get_fake_request(ref) - output = self.controller.index(request, - filters={'virtual_size_min': 2048}) - self.assertEqual(2, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID2, UUID3]) - self.assertEqual(expected, actual) - - def test_index_virtual_size_range_filter(self): - path = '/images?virtual_size_min=512&virtual_size_max=2048' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, - filters={'virtual_size_min': 2048, - 'virtual_size_max': 2048}) - self.assertEqual(2, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID2, UUID3]) - self.assertEqual(expected, actual) - - def test_index_with_invalid_max_range_filter_value(self): - request = unit_test_utils.get_fake_request('/images?size_max=blah') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - request, - filters={'size_max': 'blah'}) - - def test_index_with_filters_return_many(self): - path = '/images?status=queued' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, filters={'status': 'queued'}) - self.assertEqual(1, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID3]) - self.assertEqual(expected, actual) - - def test_index_with_nonexistent_name_filter(self): - request = unit_test_utils.get_fake_request('/images?name=%s' % 'blah') - images = self.controller.index(request, - filters={'name': 'blah'})['images'] - self.assertEqual(0, len(images)) - - def test_index_with_non_default_is_public_filter(self): - private_uuid = str(uuid.uuid4()) - new_image = _db_fixture(private_uuid, - visibility='private', - owner=TENANT3) - self.db.image_create(None, new_image) - - path = '/images?visibility=private' - request = unit_test_utils.get_fake_request(path, is_admin=True) - output = self.controller.index(request, - filters={'visibility': 'private'}) - self.assertEqual(1, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([private_uuid]) - self.assertEqual(expected, actual) - - path = '/images?visibility=shared' - request = unit_test_utils.get_fake_request(path, is_admin=True) - output = self.controller.index(request, - filters={'visibility': 'shared'}) - self.assertEqual(1, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID4]) - self.assertEqual(expected, actual) - - def test_index_with_many_filters(self): - url = '/images?status=queued&name=3' - request = unit_test_utils.get_fake_request(url) - output = self.controller.index(request, - filters={ - 'status': 'queued', - 'name': '3', - }) - self.assertEqual(1, len(output['images'])) - actual = set([image.image_id for image in output['images']]) - expected = set([UUID3]) - self.assertEqual(expected, actual) - - def test_index_with_marker(self): - self.config(limit_param_default=1, api_limit_max=3) - path = '/images' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, marker=UUID3) - actual = set([image.image_id for image in output['images']]) - self.assertEqual(1, len(actual)) - self.assertIn(UUID2, actual) - - def test_index_with_limit(self): - path = '/images' - limit = 2 - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, limit=limit) - actual = set([image.image_id for image in output['images']]) - self.assertEqual(limit, len(actual)) - self.assertIn(UUID3, actual) - self.assertIn(UUID2, actual) - - def test_index_greater_than_limit_max(self): - self.config(limit_param_default=1, api_limit_max=3) - path = '/images' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, limit=4) - actual = set([image.image_id for image in output['images']]) - self.assertEqual(3, len(actual)) - self.assertNotIn(output['next_marker'], output) - - def test_index_default_limit(self): - self.config(limit_param_default=1, api_limit_max=3) - path = '/images' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request) - actual = set([image.image_id for image in output['images']]) - self.assertEqual(1, len(actual)) - - def test_index_with_sort_dir(self): - path = '/images' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, sort_dir=['asc'], limit=3) - actual = [image.image_id for image in output['images']] - self.assertEqual(3, len(actual)) - self.assertEqual(UUID1, actual[0]) - self.assertEqual(UUID2, actual[1]) - self.assertEqual(UUID3, actual[2]) - - def test_index_with_sort_key(self): - path = '/images' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, sort_key=['created_at'], - limit=3) - actual = [image.image_id for image in output['images']] - self.assertEqual(3, len(actual)) - self.assertEqual(UUID3, actual[0]) - self.assertEqual(UUID2, actual[1]) - self.assertEqual(UUID1, actual[2]) - - def test_index_with_multiple_sort_keys(self): - path = '/images' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, - sort_key=['created_at', 'name'], - limit=3) - actual = [image.image_id for image in output['images']] - self.assertEqual(3, len(actual)) - self.assertEqual(UUID3, actual[0]) - self.assertEqual(UUID2, actual[1]) - self.assertEqual(UUID1, actual[2]) - - def test_index_with_marker_not_found(self): - fake_uuid = str(uuid.uuid4()) - path = '/images' - request = unit_test_utils.get_fake_request(path) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, request, marker=fake_uuid) - - def test_index_invalid_sort_key(self): - path = '/images' - request = unit_test_utils.get_fake_request(path) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, request, sort_key=['foo']) - - def test_index_zero_images(self): - self.db.reset() - request = unit_test_utils.get_fake_request() - output = self.controller.index(request) - self.assertEqual([], output['images']) - - def test_index_with_tags(self): - path = '/images?tag=64bit' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, filters={'tags': ['64bit']}) - actual = [image.tags for image in output['images']] - self.assertEqual(2, len(actual)) - self.assertIn('64bit', actual[0]) - self.assertIn('64bit', actual[1]) - - def test_index_with_multi_tags(self): - path = '/images?tag=power&tag=64bit' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, - filters={'tags': ['power', '64bit']}) - actual = [image.tags for image in output['images']] - self.assertEqual(1, len(actual)) - self.assertIn('64bit', actual[0]) - self.assertIn('power', actual[0]) - - def test_index_with_multi_tags_and_nonexistent(self): - path = '/images?tag=power&tag=fake' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, - filters={'tags': ['power', 'fake']}) - actual = [image.tags for image in output['images']] - self.assertEqual(0, len(actual)) - - def test_index_with_tags_and_properties(self): - path = '/images?tag=64bit&hypervisor_type=kvm' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, - filters={'tags': ['64bit'], - 'hypervisor_type': 'kvm'}) - tags = [image.tags for image in output['images']] - properties = [image.extra_properties for image in output['images']] - self.assertEqual(len(tags), len(properties)) - self.assertIn('64bit', tags[0]) - self.assertEqual('kvm', properties[0]['hypervisor_type']) - - def test_index_with_multiple_properties(self): - path = '/images?foo=bar&hypervisor_type=kvm' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, - filters={'foo': 'bar', - 'hypervisor_type': 'kvm'}) - properties = [image.extra_properties for image in output['images']] - self.assertEqual('kvm', properties[0]['hypervisor_type']) - self.assertEqual('bar', properties[0]['foo']) - - def test_index_with_core_and_extra_property(self): - path = '/images?disk_format=raw&foo=bar' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, - filters={'foo': 'bar', - 'disk_format': 'raw'}) - properties = [image.extra_properties for image in output['images']] - self.assertEqual(1, len(output['images'])) - self.assertEqual('raw', output['images'][0].disk_format) - self.assertEqual('bar', properties[0]['foo']) - - def test_index_with_nonexistent_properties(self): - path = '/images?abc=xyz&pudding=banana' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, - filters={'abc': 'xyz', - 'pudding': 'banana'}) - self.assertEqual(0, len(output['images'])) - - def test_index_with_non_existent_tags(self): - path = '/images?tag=fake' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request, - filters={'tags': ['fake']}) - actual = [image.tags for image in output['images']] - self.assertEqual(0, len(actual)) - - def test_show(self): - request = unit_test_utils.get_fake_request() - output = self.controller.show(request, image_id=UUID2) - self.assertEqual(UUID2, output.image_id) - self.assertEqual('2', output.name) - - def test_show_deleted_properties(self): - """Ensure that the api filters out deleted image properties.""" - - # get the image properties into the odd state - image = { - 'id': str(uuid.uuid4()), - 'status': 'active', - 'properties': {'poo': 'bear'}, - } - self.db.image_create(None, image) - self.db.image_update(None, image['id'], - {'properties': {'yin': 'yang'}}, - purge_props=True) - - request = unit_test_utils.get_fake_request() - output = self.controller.show(request, image['id']) - self.assertEqual('yang', output.extra_properties['yin']) - - def test_show_non_existent(self): - request = unit_test_utils.get_fake_request() - image_id = str(uuid.uuid4()) - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.show, request, image_id) - - def test_show_deleted_image_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.controller.delete(request, UUID1) - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.show, request, UUID1) - - def test_show_not_allowed(self): - request = unit_test_utils.get_fake_request() - self.assertEqual(TENANT1, request.context.tenant) - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.show, request, UUID4) - - def test_create(self): - request = unit_test_utils.get_fake_request() - image = {'name': 'image-1'} - output = self.controller.create(request, image=image, - extra_properties={}, - tags=[]) - self.assertEqual('image-1', output.name) - self.assertEqual({}, output.extra_properties) - self.assertEqual(set([]), output.tags) - self.assertEqual('shared', output.visibility) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.create', output_log['event_type']) - self.assertEqual('image-1', output_log['payload']['name']) - - def test_create_disabled_notification(self): - self.config(disabled_notifications=["image.create"]) - request = unit_test_utils.get_fake_request() - image = {'name': 'image-1'} - output = self.controller.create(request, image=image, - extra_properties={}, - tags=[]) - self.assertEqual('image-1', output.name) - self.assertEqual({}, output.extra_properties) - self.assertEqual(set([]), output.tags) - self.assertEqual('shared', output.visibility) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_create_with_properties(self): - request = unit_test_utils.get_fake_request() - image_properties = {'foo': 'bar'} - image = {'name': 'image-1'} - output = self.controller.create(request, image=image, - extra_properties=image_properties, - tags=[]) - self.assertEqual('image-1', output.name) - self.assertEqual(image_properties, output.extra_properties) - self.assertEqual(set([]), output.tags) - self.assertEqual('shared', output.visibility) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.create', output_log['event_type']) - self.assertEqual('image-1', output_log['payload']['name']) - - def test_create_with_too_many_properties(self): - self.config(image_property_quota=1) - request = unit_test_utils.get_fake_request() - image_properties = {'foo': 'bar', 'foo2': 'bar'} - image = {'name': 'image-1'} - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, request, - image=image, - extra_properties=image_properties, - tags=[]) - - def test_create_with_bad_min_disk_size(self): - request = unit_test_utils.get_fake_request() - image = {'min_disk': -42, 'name': 'image-1'} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, request, - image=image, - extra_properties={}, - tags=[]) - - def test_create_with_bad_min_ram_size(self): - request = unit_test_utils.get_fake_request() - image = {'min_ram': -42, 'name': 'image-1'} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, request, - image=image, - extra_properties={}, - tags=[]) - - def test_create_public_image_as_admin(self): - request = unit_test_utils.get_fake_request() - image = {'name': 'image-1', 'visibility': 'public'} - output = self.controller.create(request, image=image, - extra_properties={}, tags=[]) - self.assertEqual('public', output.visibility) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.create', output_log['event_type']) - self.assertEqual(output.image_id, output_log['payload']['id']) - - def test_create_dup_id(self): - request = unit_test_utils.get_fake_request() - image = {'image_id': UUID4} - - self.assertRaises(webob.exc.HTTPConflict, - self.controller.create, - request, - image=image, - extra_properties={}, - tags=[]) - - def test_create_duplicate_tags(self): - request = unit_test_utils.get_fake_request() - tags = ['ping', 'ping'] - output = self.controller.create(request, image={}, - extra_properties={}, tags=tags) - self.assertEqual(set(['ping']), output.tags) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.create', output_log['event_type']) - self.assertEqual(output.image_id, output_log['payload']['id']) - - def test_create_with_too_many_tags(self): - self.config(image_tag_quota=1) - request = unit_test_utils.get_fake_request() - tags = ['ping', 'pong'] - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, - request, image={}, extra_properties={}, - tags=tags) - - def test_create_with_owner_non_admin(self): - request = unit_test_utils.get_fake_request() - request.context.is_admin = False - image = {'owner': '12345'} - self.assertRaises(webob.exc.HTTPForbidden, - self.controller.create, - request, image=image, extra_properties={}, - tags=[]) - - request = unit_test_utils.get_fake_request() - request.context.is_admin = False - image = {'owner': TENANT1} - output = self.controller.create(request, image=image, - extra_properties={}, tags=[]) - self.assertEqual(TENANT1, output.owner) - - def test_create_with_owner_admin(self): - request = unit_test_utils.get_fake_request() - request.context.is_admin = True - image = {'owner': '12345'} - output = self.controller.create(request, image=image, - extra_properties={}, tags=[]) - self.assertEqual('12345', output.owner) - - def test_create_with_duplicate_location(self): - request = unit_test_utils.get_fake_request() - location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} - image = {'name': 'image-1', 'locations': [location, location]} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - request, image=image, extra_properties={}, - tags=[]) - - def test_create_unexpected_property(self): - request = unit_test_utils.get_fake_request() - image_properties = {'unexpected': 'unexpected'} - image = {'name': 'image-1'} - with mock.patch.object(domain.ImageFactory, 'new_image', - side_effect=TypeError): - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, request, image=image, - extra_properties=image_properties, tags=[]) - - def test_create_reserved_property(self): - request = unit_test_utils.get_fake_request() - image_properties = {'reserved': 'reserved'} - image = {'name': 'image-1'} - with mock.patch.object(domain.ImageFactory, 'new_image', - side_effect=exception.ReservedProperty( - property='reserved')): - self.assertRaises(webob.exc.HTTPForbidden, - self.controller.create, request, image=image, - extra_properties=image_properties, tags=[]) - - def test_create_readonly_property(self): - request = unit_test_utils.get_fake_request() - image_properties = {'readonly': 'readonly'} - image = {'name': 'image-1'} - with mock.patch.object(domain.ImageFactory, 'new_image', - side_effect=exception.ReadonlyProperty( - property='readonly')): - self.assertRaises(webob.exc.HTTPForbidden, - self.controller.create, request, image=image, - extra_properties=image_properties, tags=[]) - - def test_update_no_changes(self): - request = unit_test_utils.get_fake_request() - output = self.controller.update(request, UUID1, changes=[]) - self.assertEqual(UUID1, output.image_id) - self.assertEqual(output.created_at, output.updated_at) - self.assertEqual(2, len(output.tags)) - self.assertIn('ping', output.tags) - self.assertIn('pong', output.tags) - output_logs = self.notifier.get_logs() - # NOTE(markwash): don't send a notification if nothing is updated - self.assertEqual(0, len(output_logs)) - - def test_update_with_bad_min_disk(self): - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['min_disk'], 'value': -42}] - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - request, UUID1, changes=changes) - - def test_update_with_bad_min_ram(self): - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['min_ram'], 'value': -42}] - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - request, UUID1, changes=changes) - - def test_update_image_doesnt_exist(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, - request, str(uuid.uuid4()), changes=[]) - - def test_update_deleted_image_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.controller.delete(request, UUID1) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, - request, UUID1, changes=[]) - - def test_update_with_too_many_properties(self): - self.config(show_multiple_locations=True) - self.config(user_storage_quota='1') - new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} - request = unit_test_utils.get_fake_request() - changes = [{'op': 'add', 'path': ['locations', '-'], - 'value': new_location}] - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.update, - request, UUID1, changes=changes) - - def test_update_replace_base_attribute(self): - self.db.image_update(None, UUID1, {'properties': {'foo': 'bar'}}) - request = unit_test_utils.get_fake_request() - request.context.is_admin = True - changes = [{'op': 'replace', 'path': ['name'], 'value': 'fedora'}, - {'op': 'replace', 'path': ['owner'], 'value': TENANT3}] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual('fedora', output.name) - self.assertEqual(TENANT3, output.owner) - self.assertEqual({'foo': 'bar'}, output.extra_properties) - self.assertNotEqual(output.created_at, output.updated_at) - - def test_update_replace_onwer_non_admin(self): - request = unit_test_utils.get_fake_request() - request.context.is_admin = False - changes = [{'op': 'replace', 'path': ['owner'], 'value': TENANT3}] - self.assertRaises(webob.exc.HTTPForbidden, - self.controller.update, request, UUID1, changes) - - def test_update_replace_tags(self): - request = unit_test_utils.get_fake_request() - changes = [ - {'op': 'replace', 'path': ['tags'], 'value': ['king', 'kong']}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual(2, len(output.tags)) - self.assertIn('king', output.tags) - self.assertIn('kong', output.tags) - self.assertNotEqual(output.created_at, output.updated_at) - - def test_update_replace_property(self): - request = unit_test_utils.get_fake_request() - properties = {'foo': 'bar', 'snitch': 'golden'} - self.db.image_update(None, UUID1, {'properties': properties}) - - output = self.controller.show(request, UUID1) - self.assertEqual('bar', output.extra_properties['foo']) - self.assertEqual('golden', output.extra_properties['snitch']) - - changes = [ - {'op': 'replace', 'path': ['foo'], 'value': 'baz'}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual('baz', output.extra_properties['foo']) - self.assertEqual('golden', output.extra_properties['snitch']) - self.assertNotEqual(output.created_at, output.updated_at) - - def test_update_add_too_many_properties(self): - self.config(image_property_quota=1) - request = unit_test_utils.get_fake_request() - - changes = [ - {'op': 'add', 'path': ['foo'], 'value': 'baz'}, - {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, - ] - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.update, request, - UUID1, changes) - - def test_update_add_and_remove_too_many_properties(self): - request = unit_test_utils.get_fake_request() - - changes = [ - {'op': 'add', 'path': ['foo'], 'value': 'baz'}, - {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, - ] - self.controller.update(request, UUID1, changes) - self.config(image_property_quota=1) - - # We must remove two properties to avoid being - # over the limit of 1 property - changes = [ - {'op': 'remove', 'path': ['foo']}, - {'op': 'add', 'path': ['fizz'], 'value': 'buzz'}, - ] - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.update, request, - UUID1, changes) - - def test_update_add_unlimited_properties(self): - self.config(image_property_quota=-1) - request = unit_test_utils.get_fake_request() - output = self.controller.show(request, UUID1) - - changes = [{'op': 'add', - 'path': ['foo'], - 'value': 'bar'}] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertNotEqual(output.created_at, output.updated_at) - - def test_update_format_properties(self): - statuses_for_immutability = ['active', 'saving', 'killed'] - request = unit_test_utils.get_fake_request(is_admin=True) - for status in statuses_for_immutability: - image = { - 'id': str(uuid.uuid4()), - 'status': status, - 'disk_format': 'ari', - 'container_format': 'ari', - } - self.db.image_create(None, image) - changes = [ - {'op': 'replace', 'path': ['disk_format'], 'value': 'ami'}, - ] - self.assertRaises(webob.exc.HTTPForbidden, - self.controller.update, - request, image['id'], changes) - changes = [ - {'op': 'replace', - 'path': ['container_format'], - 'value': 'ami'}, - ] - self.assertRaises(webob.exc.HTTPForbidden, - self.controller.update, - request, image['id'], changes) - self.db.image_update(None, image['id'], {'status': 'queued'}) - - changes = [ - {'op': 'replace', 'path': ['disk_format'], 'value': 'raw'}, - {'op': 'replace', 'path': ['container_format'], 'value': 'bare'}, - ] - resp = self.controller.update(request, image['id'], changes) - self.assertEqual('raw', resp.disk_format) - self.assertEqual('bare', resp.container_format) - - def test_update_remove_property_while_over_limit(self): - """Ensure that image properties can be removed. - - Image properties should be able to be removed as long as the image has - fewer than the limited number of image properties after the - transaction. - - """ - request = unit_test_utils.get_fake_request() - - changes = [ - {'op': 'add', 'path': ['foo'], 'value': 'baz'}, - {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, - {'op': 'add', 'path': ['fizz'], 'value': 'buzz'}, - ] - self.controller.update(request, UUID1, changes) - self.config(image_property_quota=1) - - # We must remove two properties to avoid being - # over the limit of 1 property - changes = [ - {'op': 'remove', 'path': ['foo']}, - {'op': 'remove', 'path': ['snitch']}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual(1, len(output.extra_properties)) - self.assertEqual('buzz', output.extra_properties['fizz']) - self.assertNotEqual(output.created_at, output.updated_at) - - def test_update_add_and_remove_property_under_limit(self): - """Ensure that image properties can be removed. - - Image properties should be able to be added and removed simultaneously - as long as the image has fewer than the limited number of image - properties after the transaction. - - """ - request = unit_test_utils.get_fake_request() - - changes = [ - {'op': 'add', 'path': ['foo'], 'value': 'baz'}, - {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, - ] - self.controller.update(request, UUID1, changes) - self.config(image_property_quota=1) - - # We must remove two properties to avoid being - # over the limit of 1 property - changes = [ - {'op': 'remove', 'path': ['foo']}, - {'op': 'remove', 'path': ['snitch']}, - {'op': 'add', 'path': ['fizz'], 'value': 'buzz'}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual(1, len(output.extra_properties)) - self.assertEqual('buzz', output.extra_properties['fizz']) - self.assertNotEqual(output.created_at, output.updated_at) - - def test_update_replace_missing_property(self): - request = unit_test_utils.get_fake_request() - - changes = [ - {'op': 'replace', 'path': 'foo', 'value': 'baz'}, - ] - self.assertRaises(webob.exc.HTTPConflict, - self.controller.update, request, UUID1, changes) - - def test_prop_protection_with_create_and_permitted_role(self): - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - created_image = self.controller.create(request, image=image, - extra_properties={}, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['member']) - changes = [ - {'op': 'add', 'path': ['x_owner_foo'], 'value': 'bar'}, - ] - output = self.controller.update(another_request, - created_image.image_id, changes) - self.assertEqual('bar', output.extra_properties['x_owner_foo']) - - def test_prop_protection_with_update_and_permitted_policy(self): - self.set_property_protections(use_policies=True) - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - request = unit_test_utils.get_fake_request(roles=['spl_role']) - image = {'name': 'image-1'} - extra_props = {'spl_creator_policy': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - self.assertEqual('bar', - created_image.extra_properties['spl_creator_policy']) - - another_request = unit_test_utils.get_fake_request(roles=['spl_role']) - changes = [ - {'op': 'replace', 'path': ['spl_creator_policy'], 'value': 'par'}, - ] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - another_request, created_image.image_id, changes) - another_request = unit_test_utils.get_fake_request(roles=['admin']) - output = self.controller.update(another_request, - created_image.image_id, changes) - self.assertEqual('par', - output.extra_properties['spl_creator_policy']) - - def test_prop_protection_with_create_with_patch_and_policy(self): - self.set_property_protections(use_policies=True) - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - request = unit_test_utils.get_fake_request(roles=['spl_role', 'admin']) - image = {'name': 'image-1'} - extra_props = {'spl_default_policy': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['fake_role']) - changes = [ - {'op': 'add', 'path': ['spl_creator_policy'], 'value': 'bar'}, - ] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - another_request, created_image.image_id, changes) - - another_request = unit_test_utils.get_fake_request(roles=['spl_role']) - output = self.controller.update(another_request, - created_image.image_id, changes) - self.assertEqual('bar', - output.extra_properties['spl_creator_policy']) - - def test_prop_protection_with_create_and_unpermitted_role(self): - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - created_image = self.controller.create(request, image=image, - extra_properties={}, - tags=[]) - roles = ['fake_member'] - another_request = unit_test_utils.get_fake_request(roles=roles) - changes = [ - {'op': 'add', 'path': ['x_owner_foo'], 'value': 'bar'}, - ] - self.assertRaises(webob.exc.HTTPForbidden, - self.controller.update, another_request, - created_image.image_id, changes) - - def test_prop_protection_with_show_and_permitted_role(self): - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_owner_foo': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['member']) - output = self.controller.show(another_request, created_image.image_id) - self.assertEqual('bar', output.extra_properties['x_owner_foo']) - - def test_prop_protection_with_show_and_unpermitted_role(self): - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['member']) - image = {'name': 'image-1'} - extra_props = {'x_owner_foo': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['fake_role']) - output = self.controller.show(another_request, created_image.image_id) - self.assertRaises(KeyError, output.extra_properties.__getitem__, - 'x_owner_foo') - - def test_prop_protection_with_update_and_permitted_role(self): - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_owner_foo': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['member']) - changes = [ - {'op': 'replace', 'path': ['x_owner_foo'], 'value': 'baz'}, - ] - output = self.controller.update(another_request, - created_image.image_id, changes) - self.assertEqual('baz', output.extra_properties['x_owner_foo']) - - def test_prop_protection_with_update_and_unpermitted_role(self): - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_owner_foo': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['fake_role']) - changes = [ - {'op': 'replace', 'path': ['x_owner_foo'], 'value': 'baz'}, - ] - self.assertRaises(webob.exc.HTTPConflict, self.controller.update, - another_request, created_image.image_id, changes) - - def test_prop_protection_with_delete_and_permitted_role(self): - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_owner_foo': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['member']) - changes = [ - {'op': 'remove', 'path': ['x_owner_foo']} - ] - output = self.controller.update(another_request, - created_image.image_id, changes) - self.assertRaises(KeyError, output.extra_properties.__getitem__, - 'x_owner_foo') - - def test_prop_protection_with_delete_and_unpermitted_role(self): - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_owner_foo': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['fake_role']) - changes = [ - {'op': 'remove', 'path': ['x_owner_foo']} - ] - self.assertRaises(webob.exc.HTTPConflict, self.controller.update, - another_request, created_image.image_id, changes) - - def test_create_protected_prop_case_insensitive(self): - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - created_image = self.controller.create(request, image=image, - extra_properties={}, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['member']) - changes = [ - {'op': 'add', 'path': ['x_case_insensitive'], 'value': '1'}, - ] - output = self.controller.update(another_request, - created_image.image_id, changes) - self.assertEqual('1', output.extra_properties['x_case_insensitive']) - - def test_read_protected_prop_case_insensitive(self): - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_case_insensitive': '1'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['member']) - output = self.controller.show(another_request, created_image.image_id) - self.assertEqual('1', output.extra_properties['x_case_insensitive']) - - def test_update_protected_prop_case_insensitive(self): - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_case_insensitive': '1'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['member']) - changes = [ - {'op': 'replace', 'path': ['x_case_insensitive'], 'value': '2'}, - ] - output = self.controller.update(another_request, - created_image.image_id, changes) - self.assertEqual('2', output.extra_properties['x_case_insensitive']) - - def test_delete_protected_prop_case_insensitive(self): - enforcer = glance.api.policy.Enforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - enforcer, - self.notifier, - self.store) - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_case_insensitive': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['member']) - changes = [ - {'op': 'remove', 'path': ['x_case_insensitive']} - ] - output = self.controller.update(another_request, - created_image.image_id, changes) - self.assertRaises(KeyError, output.extra_properties.__getitem__, - 'x_case_insensitive') - - def test_create_non_protected_prop(self): - """Property marked with special char @ creatable by an unknown role""" - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_all_permitted_1': '1'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - self.assertEqual('1', - created_image.extra_properties['x_all_permitted_1']) - another_request = unit_test_utils.get_fake_request(roles=['joe_soap']) - extra_props = {'x_all_permitted_2': '2'} - created_image = self.controller.create(another_request, image=image, - extra_properties=extra_props, - tags=[]) - self.assertEqual('2', - created_image.extra_properties['x_all_permitted_2']) - - def test_read_non_protected_prop(self): - """Property marked with special char @ readable by an unknown role""" - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_all_permitted': '1'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['joe_soap']) - output = self.controller.show(another_request, created_image.image_id) - self.assertEqual('1', output.extra_properties['x_all_permitted']) - - def test_update_non_protected_prop(self): - """Property marked with special char @ updatable by an unknown role""" - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_all_permitted': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['joe_soap']) - changes = [ - {'op': 'replace', 'path': ['x_all_permitted'], 'value': 'baz'}, - ] - output = self.controller.update(another_request, - created_image.image_id, changes) - self.assertEqual('baz', output.extra_properties['x_all_permitted']) - - def test_delete_non_protected_prop(self): - """Property marked with special char @ deletable by an unknown role""" - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_all_permitted': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['member']) - changes = [ - {'op': 'remove', 'path': ['x_all_permitted']} - ] - output = self.controller.update(another_request, - created_image.image_id, changes) - self.assertRaises(KeyError, output.extra_properties.__getitem__, - 'x_all_permitted') - - def test_create_locked_down_protected_prop(self): - """Property marked with special char ! creatable by no one""" - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - created_image = self.controller.create(request, image=image, - extra_properties={}, - tags=[]) - roles = ['fake_member'] - another_request = unit_test_utils.get_fake_request(roles=roles) - changes = [ - {'op': 'add', 'path': ['x_none_permitted'], 'value': 'bar'}, - ] - self.assertRaises(webob.exc.HTTPForbidden, - self.controller.update, another_request, - created_image.image_id, changes) - - def test_read_locked_down_protected_prop(self): - """Property marked with special char ! readable by no one""" - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['member']) - image = {'name': 'image-1'} - extra_props = {'x_none_read': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['fake_role']) - output = self.controller.show(another_request, created_image.image_id) - self.assertRaises(KeyError, output.extra_properties.__getitem__, - 'x_none_read') - - def test_update_locked_down_protected_prop(self): - """Property marked with special char ! updatable by no one""" - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_none_update': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['fake_role']) - changes = [ - {'op': 'replace', 'path': ['x_none_update'], 'value': 'baz'}, - ] - self.assertRaises(webob.exc.HTTPConflict, self.controller.update, - another_request, created_image.image_id, changes) - - def test_delete_locked_down_protected_prop(self): - """Property marked with special char ! deletable by no one""" - self.set_property_protections() - request = unit_test_utils.get_fake_request(roles=['admin']) - image = {'name': 'image-1'} - extra_props = {'x_none_delete': 'bar'} - created_image = self.controller.create(request, image=image, - extra_properties=extra_props, - tags=[]) - another_request = unit_test_utils.get_fake_request(roles=['fake_role']) - changes = [ - {'op': 'remove', 'path': ['x_none_delete']} - ] - self.assertRaises(webob.exc.HTTPConflict, self.controller.update, - another_request, created_image.image_id, changes) - - def test_update_replace_locations_non_empty(self): - self.config(show_multiple_locations=True) - new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['locations'], - 'value': [new_location]}] - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - request, UUID1, changes) - - def test_update_replace_locations_metadata_update(self): - self.config(show_multiple_locations=True) - location = {'url': '%s/%s' % (BASE_URI, UUID1), - 'metadata': {'a': 1}} - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['locations'], - 'value': [location]}] - output = self.controller.update(request, UUID1, changes) - self.assertEqual({'a': 1}, output.locations[0]['metadata']) - - def test_locations_actions_with_locations_invisible(self): - self.config(show_multiple_locations=False) - new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['locations'], - 'value': [new_location]}] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - request, UUID1, changes) - - def test_update_replace_locations_invalid(self): - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['locations'], 'value': []}] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - request, UUID1, changes) - - def test_update_add_property(self): - request = unit_test_utils.get_fake_request() - - changes = [ - {'op': 'add', 'path': ['foo'], 'value': 'baz'}, - {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual('baz', output.extra_properties['foo']) - self.assertEqual('golden', output.extra_properties['snitch']) - self.assertNotEqual(output.created_at, output.updated_at) - - def test_update_add_base_property_json_schema_version_4(self): - request = unit_test_utils.get_fake_request() - changes = [{ - 'json_schema_version': 4, 'op': 'add', - 'path': ['name'], 'value': 'fedora' - }] - self.assertRaises(webob.exc.HTTPConflict, self.controller.update, - request, UUID1, changes) - - def test_update_add_extra_property_json_schema_version_4(self): - self.db.image_update(None, UUID1, {'properties': {'foo': 'bar'}}) - request = unit_test_utils.get_fake_request() - changes = [{ - 'json_schema_version': 4, 'op': 'add', - 'path': ['foo'], 'value': 'baz' - }] - self.assertRaises(webob.exc.HTTPConflict, self.controller.update, - request, UUID1, changes) - - def test_update_add_base_property_json_schema_version_10(self): - request = unit_test_utils.get_fake_request() - changes = [{ - 'json_schema_version': 10, 'op': 'add', - 'path': ['name'], 'value': 'fedora' - }] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual('fedora', output.name) - - def test_update_add_extra_property_json_schema_version_10(self): - self.db.image_update(None, UUID1, {'properties': {'foo': 'bar'}}) - request = unit_test_utils.get_fake_request() - changes = [{ - 'json_schema_version': 10, 'op': 'add', - 'path': ['foo'], 'value': 'baz' - }] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual({'foo': 'baz'}, output.extra_properties) - - def test_update_add_property_already_present_json_schema_version_4(self): - request = unit_test_utils.get_fake_request() - properties = {'foo': 'bar'} - self.db.image_update(None, UUID1, {'properties': properties}) - - output = self.controller.show(request, UUID1) - self.assertEqual('bar', output.extra_properties['foo']) - - changes = [ - {'json_schema_version': 4, 'op': 'add', - 'path': ['foo'], 'value': 'baz'}, - ] - self.assertRaises(webob.exc.HTTPConflict, - self.controller.update, request, UUID1, changes) - - def test_update_add_property_already_present_json_schema_version_10(self): - request = unit_test_utils.get_fake_request() - properties = {'foo': 'bar'} - self.db.image_update(None, UUID1, {'properties': properties}) - - output = self.controller.show(request, UUID1) - self.assertEqual('bar', output.extra_properties['foo']) - - changes = [ - {'json_schema_version': 10, 'op': 'add', - 'path': ['foo'], 'value': 'baz'}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual({'foo': 'baz'}, output.extra_properties) - - def test_update_add_locations(self): - self.config(show_multiple_locations=True) - new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} - request = unit_test_utils.get_fake_request() - changes = [{'op': 'add', 'path': ['locations', '-'], - 'value': new_location}] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual(2, len(output.locations)) - self.assertEqual(new_location, output.locations[1]) - - def test_replace_location_possible_on_queued(self): - self.skipTest('This test is intermittently failing at the gate. ' - 'See bug #1649300') - self.config(show_multiple_locations=True) - self.images = [ - _db_fixture('1', owner=TENANT1, checksum=CHKSUM, - name='1', - is_public=True, - disk_format='raw', - container_format='bare', - status='queued'), - ] - self.db.image_create(None, self.images[0]) - request = unit_test_utils.get_fake_request() - new_location = {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}} - changes = [{'op': 'replace', 'path': ['locations'], - 'value': [new_location]}] - output = self.controller.update(request, '1', changes) - self.assertEqual('1', output.image_id) - self.assertEqual(1, len(output.locations)) - self.assertEqual(new_location, output.locations[0]) - - def test_add_location_possible_on_queued(self): - self.skipTest('This test is intermittently failing at the gate. ' - 'See bug #1649300') - self.config(show_multiple_locations=True) - self.images = [ - _db_fixture('1', owner=TENANT1, checksum=CHKSUM, - name='1', - is_public=True, - disk_format='raw', - container_format='bare', - status='queued'), - ] - self.db.image_create(None, self.images[0]) - request = unit_test_utils.get_fake_request() - new_location = {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}} - changes = [{'op': 'add', 'path': ['locations', '-'], - 'value': new_location}] - output = self.controller.update(request, '1', changes) - self.assertEqual('1', output.image_id) - self.assertEqual(1, len(output.locations)) - self.assertEqual(new_location, output.locations[0]) - - def _test_update_locations_status(self, image_status, update): - self.config(show_multiple_locations=True) - self.images = [ - _db_fixture('1', owner=TENANT1, checksum=CHKSUM, - name='1', - disk_format='raw', - container_format='bare', - status=image_status), - ] - request = unit_test_utils.get_fake_request() - if image_status == 'deactivated': - self.db.image_create(request.context, self.images[0]) - else: - self.db.image_create(None, self.images[0]) - new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} - changes = [{'op': update, 'path': ['locations', '-'], - 'value': new_location}] - self.assertRaises(webob.exc.HTTPConflict, - self.controller.update, request, '1', changes) - - def test_location_add_not_permitted_status_saving(self): - self._test_update_locations_status('saving', 'add') - - def test_location_add_not_permitted_status_deactivated(self): - self._test_update_locations_status('deactivated', 'add') - - def test_location_add_not_permitted_status_deleted(self): - self._test_update_locations_status('deleted', 'add') - - def test_location_add_not_permitted_status_pending_delete(self): - self._test_update_locations_status('pending_delete', 'add') - - def test_location_add_not_permitted_status_killed(self): - self._test_update_locations_status('killed', 'add') - - def test_location_remove_not_permitted_status_saving(self): - self._test_update_locations_status('saving', 'remove') - - def test_location_remove_not_permitted_status_deactivated(self): - self._test_update_locations_status('deactivated', 'remove') - - def test_location_remove_not_permitted_status_deleted(self): - self._test_update_locations_status('deleted', 'remove') - - def test_location_remove_not_permitted_status_pending_delete(self): - self._test_update_locations_status('pending_delete', 'remove') - - def test_location_remove_not_permitted_status_killed(self): - self._test_update_locations_status('killed', 'remove') - - def test_location_remove_not_permitted_status_queued(self): - self._test_update_locations_status('queued', 'remove') - - def test_location_replace_not_permitted_status_saving(self): - self._test_update_locations_status('saving', 'replace') - - def test_location_replace_not_permitted_status_deactivated(self): - self._test_update_locations_status('deactivated', 'replace') - - def test_location_replace_not_permitted_status_deleted(self): - self._test_update_locations_status('deleted', 'replace') - - def test_location_replace_not_permitted_status_pending_delete(self): - self._test_update_locations_status('pending_delete', 'replace') - - def test_location_replace_not_permitted_status_killed(self): - self._test_update_locations_status('killed', 'replace') - - def test_update_add_locations_insertion(self): - self.config(show_multiple_locations=True) - new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} - request = unit_test_utils.get_fake_request() - changes = [{'op': 'add', 'path': ['locations', '0'], - 'value': new_location}] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual(2, len(output.locations)) - self.assertEqual(new_location, output.locations[0]) - - def test_update_add_locations_list(self): - self.config(show_multiple_locations=True) - request = unit_test_utils.get_fake_request() - changes = [{'op': 'add', 'path': ['locations', '-'], - 'value': {'url': 'foo', 'metadata': {}}}] - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - request, UUID1, changes) - - def test_update_add_locations_invalid(self): - self.config(show_multiple_locations=True) - request = unit_test_utils.get_fake_request() - changes = [{'op': 'add', 'path': ['locations', '-'], - 'value': {'url': 'unknow://foo', 'metadata': {}}}] - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - request, UUID1, changes) - - changes = [{'op': 'add', 'path': ['locations', None], - 'value': {'url': 'unknow://foo', 'metadata': {}}}] - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - request, UUID1, changes) - - def test_update_add_duplicate_locations(self): - self.config(show_multiple_locations=True) - new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} - request = unit_test_utils.get_fake_request() - changes = [{'op': 'add', 'path': ['locations', '-'], - 'value': new_location}] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual(2, len(output.locations)) - self.assertEqual(new_location, output.locations[1]) - - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - request, UUID1, changes) - - def test_update_add_too_many_locations(self): - self.config(show_multiple_locations=True) - self.config(image_location_quota=1) - request = unit_test_utils.get_fake_request() - changes = [ - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location_1' % BASE_URI, - 'metadata': {}}}, - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location_2' % BASE_URI, - 'metadata': {}}}, - ] - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.update, request, - UUID1, changes) - - def test_update_add_and_remove_too_many_locations(self): - self.config(show_multiple_locations=True) - request = unit_test_utils.get_fake_request() - changes = [ - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location_1' % BASE_URI, - 'metadata': {}}}, - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location_2' % BASE_URI, - 'metadata': {}}}, - ] - self.controller.update(request, UUID1, changes) - self.config(image_location_quota=1) - - # We must remove two properties to avoid being - # over the limit of 1 property - changes = [ - {'op': 'remove', 'path': ['locations', '0']}, - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location_3' % BASE_URI, - 'metadata': {}}}, - ] - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.update, request, - UUID1, changes) - - def test_update_add_unlimited_locations(self): - self.config(show_multiple_locations=True) - self.config(image_location_quota=-1) - request = unit_test_utils.get_fake_request() - changes = [ - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location_1' % BASE_URI, - 'metadata': {}}}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertNotEqual(output.created_at, output.updated_at) - - def test_update_remove_location_while_over_limit(self): - """Ensure that image locations can be removed. - - Image locations should be able to be removed as long as the image has - fewer than the limited number of image locations after the - transaction. - """ - self.config(show_multiple_locations=True) - request = unit_test_utils.get_fake_request() - changes = [ - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location_1' % BASE_URI, - 'metadata': {}}}, - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location_2' % BASE_URI, - 'metadata': {}}}, - ] - self.controller.update(request, UUID1, changes) - self.config(image_location_quota=1) - self.config(show_multiple_locations=True) - - # We must remove two locations to avoid being over - # the limit of 1 location - changes = [ - {'op': 'remove', 'path': ['locations', '0']}, - {'op': 'remove', 'path': ['locations', '0']}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual(1, len(output.locations)) - self.assertIn('fake_location_2', output.locations[0]['url']) - self.assertNotEqual(output.created_at, output.updated_at) - - def test_update_add_and_remove_location_under_limit(self): - """Ensure that image locations can be removed. - - Image locations should be able to be added and removed simultaneously - as long as the image has fewer than the limited number of image - locations after the transaction. - """ - self.stubs.Set(store, 'get_size_from_backend', - unit_test_utils.fake_get_size_from_backend) - self.config(show_multiple_locations=True) - request = unit_test_utils.get_fake_request() - - changes = [ - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location_1' % BASE_URI, - 'metadata': {}}}, - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location_2' % BASE_URI, - 'metadata': {}}}, - ] - self.controller.update(request, UUID1, changes) - self.config(image_location_quota=2) - - # We must remove two properties to avoid being - # over the limit of 1 property - changes = [ - {'op': 'remove', 'path': ['locations', '0']}, - {'op': 'remove', 'path': ['locations', '0']}, - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location_3' % BASE_URI, - 'metadata': {}}}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual(2, len(output.locations)) - self.assertIn('fake_location_3', output.locations[1]['url']) - self.assertNotEqual(output.created_at, output.updated_at) - - def test_update_remove_base_property(self): - self.db.image_update(None, UUID1, {'properties': {'foo': 'bar'}}) - request = unit_test_utils.get_fake_request() - changes = [{'op': 'remove', 'path': ['name']}] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - request, UUID1, changes) - - def test_update_remove_property(self): - request = unit_test_utils.get_fake_request() - properties = {'foo': 'bar', 'snitch': 'golden'} - self.db.image_update(None, UUID1, {'properties': properties}) - - output = self.controller.show(request, UUID1) - self.assertEqual('bar', output.extra_properties['foo']) - self.assertEqual('golden', output.extra_properties['snitch']) - - changes = [ - {'op': 'remove', 'path': ['snitch']}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual({'foo': 'bar'}, output.extra_properties) - self.assertNotEqual(output.created_at, output.updated_at) - - def test_update_remove_missing_property(self): - request = unit_test_utils.get_fake_request() - - changes = [ - {'op': 'remove', 'path': ['foo']}, - ] - self.assertRaises(webob.exc.HTTPConflict, - self.controller.update, request, UUID1, changes) - - def test_update_remove_location(self): - self.config(show_multiple_locations=True) - self.stubs.Set(store, 'get_size_from_backend', - unit_test_utils.fake_get_size_from_backend) - - request = unit_test_utils.get_fake_request() - new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} - changes = [{'op': 'add', 'path': ['locations', '-'], - 'value': new_location}] - self.controller.update(request, UUID1, changes) - changes = [{'op': 'remove', 'path': ['locations', '0']}] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual(1, len(output.locations)) - self.assertEqual('active', output.status) - - def test_update_remove_location_invalid_pos(self): - self.config(show_multiple_locations=True) - request = unit_test_utils.get_fake_request() - changes = [ - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location' % BASE_URI, - 'metadata': {}}}] - self.controller.update(request, UUID1, changes) - changes = [{'op': 'remove', 'path': ['locations', None]}] - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - request, UUID1, changes) - changes = [{'op': 'remove', 'path': ['locations', '-1']}] - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - request, UUID1, changes) - changes = [{'op': 'remove', 'path': ['locations', '99']}] - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - request, UUID1, changes) - changes = [{'op': 'remove', 'path': ['locations', 'x']}] - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - request, UUID1, changes) - - def test_update_remove_location_store_exception(self): - self.config(show_multiple_locations=True) - - def fake_delete_image_location_from_backend(self, *args, **kwargs): - raise Exception('fake_backend_exception') - - self.stubs.Set(self.store_utils, 'delete_image_location_from_backend', - fake_delete_image_location_from_backend) - - request = unit_test_utils.get_fake_request() - changes = [ - {'op': 'add', 'path': ['locations', '-'], - 'value': {'url': '%s/fake_location' % BASE_URI, - 'metadata': {}}}] - self.controller.update(request, UUID1, changes) - changes = [{'op': 'remove', 'path': ['locations', '0']}] - self.assertRaises(webob.exc.HTTPInternalServerError, - self.controller.update, request, UUID1, changes) - - def test_update_multiple_changes(self): - request = unit_test_utils.get_fake_request() - properties = {'foo': 'bar', 'snitch': 'golden'} - self.db.image_update(None, UUID1, {'properties': properties}) - - changes = [ - {'op': 'replace', 'path': ['min_ram'], 'value': 128}, - {'op': 'replace', 'path': ['foo'], 'value': 'baz'}, - {'op': 'remove', 'path': ['snitch']}, - {'op': 'add', 'path': ['kb'], 'value': 'dvorak'}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(UUID1, output.image_id) - self.assertEqual(128, output.min_ram) - self.addDetail('extra_properties', - testtools.content.json_content( - jsonutils.dumps(output.extra_properties))) - self.assertEqual(2, len(output.extra_properties)) - self.assertEqual('baz', output.extra_properties['foo']) - self.assertEqual('dvorak', output.extra_properties['kb']) - self.assertNotEqual(output.created_at, output.updated_at) - - def test_update_invalid_operation(self): - request = unit_test_utils.get_fake_request() - change = {'op': 'test', 'path': 'options', 'value': 'puts'} - try: - self.controller.update(request, UUID1, [change]) - except AttributeError: - pass # AttributeError is the desired behavior - else: - self.fail('Failed to raise AssertionError on %s' % change) - - def test_update_duplicate_tags(self): - request = unit_test_utils.get_fake_request() - changes = [ - {'op': 'replace', 'path': ['tags'], 'value': ['ping', 'ping']}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual(1, len(output.tags)) - self.assertIn('ping', output.tags) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('image.update', output_log['event_type']) - self.assertEqual(UUID1, output_log['payload']['id']) - - def test_update_disabled_notification(self): - self.config(disabled_notifications=["image.update"]) - request = unit_test_utils.get_fake_request() - changes = [ - {'op': 'replace', 'path': ['name'], 'value': 'Ping Pong'}, - ] - output = self.controller.update(request, UUID1, changes) - self.assertEqual('Ping Pong', output.name) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - - def test_delete(self): - request = unit_test_utils.get_fake_request() - self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) - try: - self.controller.delete(request, UUID1) - output_logs = self.notifier.get_logs() - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual("image.delete", output_log['event_type']) - except Exception as e: - self.fail("Delete raised exception: %s" % e) - - deleted_img = self.db.image_get(request.context, UUID1, - force_show_deleted=True) - self.assertTrue(deleted_img['deleted']) - self.assertEqual('deleted', deleted_img['status']) - self.assertNotIn('%s/%s' % (BASE_URI, UUID1), self.store.data) - - def test_delete_with_tags(self): - request = unit_test_utils.get_fake_request() - changes = [ - {'op': 'replace', 'path': ['tags'], - 'value': ['many', 'cool', 'new', 'tags']}, - ] - self.controller.update(request, UUID1, changes) - self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) - self.controller.delete(request, UUID1) - output_logs = self.notifier.get_logs() - - # Get `delete` event from logs - output_delete_logs = [output_log for output_log in output_logs - if output_log['event_type'] == 'image.delete'] - - self.assertEqual(1, len(output_delete_logs)) - output_log = output_delete_logs[0] - - self.assertEqual('INFO', output_log['notification_type']) - - deleted_img = self.db.image_get(request.context, UUID1, - force_show_deleted=True) - self.assertTrue(deleted_img['deleted']) - self.assertEqual('deleted', deleted_img['status']) - self.assertNotIn('%s/%s' % (BASE_URI, UUID1), self.store.data) - - def test_delete_disabled_notification(self): - self.config(disabled_notifications=["image.delete"]) - request = unit_test_utils.get_fake_request() - self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) - try: - self.controller.delete(request, UUID1) - output_logs = self.notifier.get_logs() - self.assertEqual(0, len(output_logs)) - except Exception as e: - self.fail("Delete raised exception: %s" % e) - - deleted_img = self.db.image_get(request.context, UUID1, - force_show_deleted=True) - self.assertTrue(deleted_img['deleted']) - self.assertEqual('deleted', deleted_img['status']) - self.assertNotIn('%s/%s' % (BASE_URI, UUID1), self.store.data) - - def test_delete_queued_updates_status(self): - """Ensure status of queued image is updated (LP bug #1048851)""" - request = unit_test_utils.get_fake_request(is_admin=True) - image = self.db.image_create(request.context, {'status': 'queued'}) - image_id = image['id'] - self.controller.delete(request, image_id) - - image = self.db.image_get(request.context, image_id, - force_show_deleted=True) - self.assertTrue(image['deleted']) - self.assertEqual('deleted', image['status']) - - def test_delete_queued_updates_status_delayed_delete(self): - """Ensure status of queued image is updated (LP bug #1048851). - - Must be set to 'deleted' when delayed_delete isenabled. - """ - self.config(delayed_delete=True) - - request = unit_test_utils.get_fake_request(is_admin=True) - image = self.db.image_create(request.context, {'status': 'queued'}) - image_id = image['id'] - self.controller.delete(request, image_id) - - image = self.db.image_get(request.context, image_id, - force_show_deleted=True) - self.assertTrue(image['deleted']) - self.assertEqual('deleted', image['status']) - - def test_delete_not_in_store(self): - request = unit_test_utils.get_fake_request() - self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) - for k in self.store.data: - if UUID1 in k: - del self.store.data[k] - break - - self.controller.delete(request, UUID1) - deleted_img = self.db.image_get(request.context, UUID1, - force_show_deleted=True) - self.assertTrue(deleted_img['deleted']) - self.assertEqual('deleted', deleted_img['status']) - self.assertNotIn('%s/%s' % (BASE_URI, UUID1), self.store.data) - - def test_delayed_delete(self): - self.config(delayed_delete=True) - request = unit_test_utils.get_fake_request() - self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) - - self.controller.delete(request, UUID1) - deleted_img = self.db.image_get(request.context, UUID1, - force_show_deleted=True) - self.assertTrue(deleted_img['deleted']) - self.assertEqual('pending_delete', deleted_img['status']) - self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) - - def test_delete_non_existent(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, - request, str(uuid.uuid4())) - - def test_delete_already_deleted_image_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.controller.delete(request, UUID1) - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.delete, request, UUID1) - - def test_delete_not_allowed(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, - request, UUID4) - - def test_delete_in_use(self): - def fake_safe_delete_from_backend(self, *args, **kwargs): - raise store.exceptions.InUseByStore() - self.stubs.Set(self.store_utils, 'safe_delete_from_backend', - fake_safe_delete_from_backend) - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, - request, UUID1) - - def test_delete_has_snapshot(self): - def fake_safe_delete_from_backend(self, *args, **kwargs): - raise store.exceptions.HasSnapshot() - self.stubs.Set(self.store_utils, 'safe_delete_from_backend', - fake_safe_delete_from_backend) - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, - request, UUID1) - - def test_delete_to_unallowed_status(self): - # from deactivated to pending-delete - self.config(delayed_delete=True) - request = unit_test_utils.get_fake_request(is_admin=True) - self.action_controller.deactivate(request, UUID1) - - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, - request, UUID1) - - def test_index_with_invalid_marker(self): - fake_uuid = str(uuid.uuid4()) - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, request, marker=fake_uuid) - - def test_invalid_locations_op_pos(self): - pos = self.controller._get_locations_op_pos(None, 2, True) - self.assertIsNone(pos) - pos = self.controller._get_locations_op_pos('1', None, True) - self.assertIsNone(pos) - - -class TestImagesControllerPolicies(base.IsolatedUnitTest): - - def setUp(self): - super(TestImagesControllerPolicies, self).setUp() - self.db = unit_test_utils.FakeDB() - self.policy = unit_test_utils.FakePolicyEnforcer() - self.controller = glance.api.v2.images.ImagesController(self.db, - self.policy) - store = unit_test_utils.FakeStoreAPI() - self.store_utils = unit_test_utils.FakeStoreUtils(store) - - def test_index_unauthorized(self): - rules = {"get_images": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.index, - request) - - def test_show_unauthorized(self): - rules = {"get_image": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, - request, image_id=UUID2) - - def test_create_image_unauthorized(self): - rules = {"add_image": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - image = {'name': 'image-1'} - extra_properties = {} - tags = [] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, - request, image, extra_properties, tags) - - def test_create_public_image_unauthorized(self): - rules = {"publicize_image": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - image = {'name': 'image-1', 'visibility': 'public'} - extra_properties = {} - tags = [] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, - request, image, extra_properties, tags) - - def test_create_community_image_unauthorized(self): - rules = {"communitize_image": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - image = {'name': 'image-c1', 'visibility': 'community'} - extra_properties = {} - tags = [] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, - request, image, extra_properties, tags) - - def test_update_unauthorized(self): - rules = {"modify_image": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['name'], 'value': 'image-2'}] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - request, UUID1, changes) - - def test_update_publicize_image_unauthorized(self): - rules = {"publicize_image": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['visibility'], - 'value': 'public'}] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - request, UUID1, changes) - - def test_update_communitize_image_unauthorized(self): - rules = {"communitize_image": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['visibility'], - 'value': 'community'}] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - request, UUID1, changes) - - def test_update_depublicize_image_unauthorized(self): - rules = {"publicize_image": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['visibility'], - 'value': 'private'}] - output = self.controller.update(request, UUID1, changes) - self.assertEqual('private', output.visibility) - - def test_update_decommunitize_image_unauthorized(self): - rules = {"communitize_image": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['visibility'], - 'value': 'private'}] - output = self.controller.update(request, UUID1, changes) - self.assertEqual('private', output.visibility) - - def test_update_get_image_location_unauthorized(self): - rules = {"get_image_location": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['locations'], 'value': []}] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - request, UUID1, changes) - - def test_update_set_image_location_unauthorized(self): - def fake_delete_image_location_from_backend(self, *args, **kwargs): - pass - - rules = {"set_image_location": False} - self.policy.set_rules(rules) - new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} - request = unit_test_utils.get_fake_request() - changes = [{'op': 'add', 'path': ['locations', '-'], - 'value': new_location}] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - request, UUID1, changes) - - def test_update_delete_image_location_unauthorized(self): - rules = {"delete_image_location": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - changes = [{'op': 'replace', 'path': ['locations'], 'value': []}] - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - request, UUID1, changes) - - def test_delete_unauthorized(self): - rules = {"delete_image": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, - request, UUID1) - - -class TestImagesDeserializer(test_utils.BaseTestCase): - - def setUp(self): - super(TestImagesDeserializer, self).setUp() - self.deserializer = glance.api.v2.images.RequestDeserializer() - - def test_create_minimal(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({}) - output = self.deserializer.create(request) - expected = {'image': {}, 'extra_properties': {}, 'tags': []} - self.assertEqual(expected, output) - - def test_create_invalid_id(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({'id': 'gabe'}) - self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, - request) - - def test_create_id_to_image_id(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({'id': UUID4}) - output = self.deserializer.create(request) - expected = {'image': {'image_id': UUID4}, - 'extra_properties': {}, - 'tags': []} - self.assertEqual(expected, output) - - def test_create_no_body(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, - request) - - def test_create_full(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({ - 'id': UUID3, - 'name': 'image-1', - 'visibility': 'public', - 'tags': ['one', 'two'], - 'container_format': 'ami', - 'disk_format': 'ami', - 'min_ram': 128, - 'min_disk': 10, - 'foo': 'bar', - 'protected': True, - }) - output = self.deserializer.create(request) - properties = { - 'image_id': UUID3, - 'name': 'image-1', - 'visibility': 'public', - 'container_format': 'ami', - 'disk_format': 'ami', - 'min_ram': 128, - 'min_disk': 10, - 'protected': True, - } - self.maxDiff = None - expected = {'image': properties, - 'extra_properties': {'foo': 'bar'}, - 'tags': ['one', 'two']} - self.assertEqual(expected, output) - - def test_create_readonly_attributes_forbidden(self): - bodies = [ - {'direct_url': 'http://example.com'}, - {'self': 'http://example.com'}, - {'file': 'http://example.com'}, - {'schema': 'http://example.com'}, - ] - - for body in bodies: - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPForbidden, - self.deserializer.create, request) - - def _get_fake_patch_request(self, content_type_minor_version=1): - request = unit_test_utils.get_fake_request() - template = 'application/openstack-images-v2.%d-json-patch' - request.content_type = template % content_type_minor_version - return request - - def test_update_empty_body(self): - request = self._get_fake_patch_request() - request.body = jsonutils.dump_as_bytes([]) - output = self.deserializer.update(request) - expected = {'changes': []} - self.assertEqual(expected, output) - - def test_update_unsupported_content_type(self): - request = unit_test_utils.get_fake_request() - request.content_type = 'application/json-patch' - request.body = jsonutils.dump_as_bytes([]) - try: - self.deserializer.update(request) - except webob.exc.HTTPUnsupportedMediaType as e: - # desired result, but must have correct Accept-Patch header - accept_patch = ['application/openstack-images-v2.1-json-patch', - 'application/openstack-images-v2.0-json-patch'] - expected = ', '.join(sorted(accept_patch)) - self.assertEqual(expected, e.headers['Accept-Patch']) - else: - self.fail('Did not raise HTTPUnsupportedMediaType') - - def test_update_body_not_a_list(self): - bodies = [ - {'op': 'add', 'path': '/someprop', 'value': 'somevalue'}, - 'just some string', - 123, - True, - False, - None, - ] - for body in bodies: - request = self._get_fake_patch_request() - request.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.update, request) - - def test_update_invalid_changes(self): - changes = [ - ['a', 'list', 'of', 'stuff'], - 'just some string', - 123, - True, - False, - None, - {'op': 'invalid', 'path': '/name', 'value': 'fedora'} - ] - for change in changes: - request = self._get_fake_patch_request() - request.body = jsonutils.dump_as_bytes([change]) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.update, request) - - def test_update(self): - request = self._get_fake_patch_request() - body = [ - {'op': 'replace', 'path': '/name', 'value': 'fedora'}, - {'op': 'replace', 'path': '/tags', 'value': ['king', 'kong']}, - {'op': 'replace', 'path': '/foo', 'value': 'bar'}, - {'op': 'add', 'path': '/bebim', 'value': 'bap'}, - {'op': 'remove', 'path': '/sparks'}, - {'op': 'add', 'path': '/locations/-', - 'value': {'url': 'scheme3://path3', 'metadata': {}}}, - {'op': 'add', 'path': '/locations/10', - 'value': {'url': 'scheme4://path4', 'metadata': {}}}, - {'op': 'remove', 'path': '/locations/2'}, - {'op': 'replace', 'path': '/locations', 'value': []}, - {'op': 'replace', 'path': '/locations', - 'value': [{'url': 'scheme5://path5', 'metadata': {}}, - {'url': 'scheme6://path6', 'metadata': {}}]}, - ] - request.body = jsonutils.dump_as_bytes(body) - output = self.deserializer.update(request) - expected = {'changes': [ - {'json_schema_version': 10, 'op': 'replace', - 'path': ['name'], 'value': 'fedora'}, - {'json_schema_version': 10, 'op': 'replace', - 'path': ['tags'], 'value': ['king', 'kong']}, - {'json_schema_version': 10, 'op': 'replace', - 'path': ['foo'], 'value': 'bar'}, - {'json_schema_version': 10, 'op': 'add', - 'path': ['bebim'], 'value': 'bap'}, - {'json_schema_version': 10, 'op': 'remove', - 'path': ['sparks']}, - {'json_schema_version': 10, 'op': 'add', - 'path': ['locations', '-'], - 'value': {'url': 'scheme3://path3', 'metadata': {}}}, - {'json_schema_version': 10, 'op': 'add', - 'path': ['locations', '10'], - 'value': {'url': 'scheme4://path4', 'metadata': {}}}, - {'json_schema_version': 10, 'op': 'remove', - 'path': ['locations', '2']}, - {'json_schema_version': 10, 'op': 'replace', - 'path': ['locations'], 'value': []}, - {'json_schema_version': 10, 'op': 'replace', - 'path': ['locations'], - 'value': [{'url': 'scheme5://path5', 'metadata': {}}, - {'url': 'scheme6://path6', 'metadata': {}}]}, - ]} - self.assertEqual(expected, output) - - def test_update_v2_0_compatibility(self): - request = self._get_fake_patch_request(content_type_minor_version=0) - body = [ - {'replace': '/name', 'value': 'fedora'}, - {'replace': '/tags', 'value': ['king', 'kong']}, - {'replace': '/foo', 'value': 'bar'}, - {'add': '/bebim', 'value': 'bap'}, - {'remove': '/sparks'}, - {'add': '/locations/-', 'value': {'url': 'scheme3://path3', - 'metadata': {}}}, - {'add': '/locations/10', 'value': {'url': 'scheme4://path4', - 'metadata': {}}}, - {'remove': '/locations/2'}, - {'replace': '/locations', 'value': []}, - {'replace': '/locations', - 'value': [{'url': 'scheme5://path5', 'metadata': {}}, - {'url': 'scheme6://path6', 'metadata': {}}]}, - ] - request.body = jsonutils.dump_as_bytes(body) - output = self.deserializer.update(request) - expected = {'changes': [ - {'json_schema_version': 4, 'op': 'replace', - 'path': ['name'], 'value': 'fedora'}, - {'json_schema_version': 4, 'op': 'replace', - 'path': ['tags'], 'value': ['king', 'kong']}, - {'json_schema_version': 4, 'op': 'replace', - 'path': ['foo'], 'value': 'bar'}, - {'json_schema_version': 4, 'op': 'add', - 'path': ['bebim'], 'value': 'bap'}, - {'json_schema_version': 4, 'op': 'remove', 'path': ['sparks']}, - {'json_schema_version': 4, 'op': 'add', - 'path': ['locations', '-'], - 'value': {'url': 'scheme3://path3', 'metadata': {}}}, - {'json_schema_version': 4, 'op': 'add', - 'path': ['locations', '10'], - 'value': {'url': 'scheme4://path4', 'metadata': {}}}, - {'json_schema_version': 4, 'op': 'remove', - 'path': ['locations', '2']}, - {'json_schema_version': 4, 'op': 'replace', - 'path': ['locations'], 'value': []}, - {'json_schema_version': 4, 'op': 'replace', 'path': ['locations'], - 'value': [{'url': 'scheme5://path5', 'metadata': {}}, - {'url': 'scheme6://path6', 'metadata': {}}]}, - ]} - self.assertEqual(expected, output) - - def test_update_base_attributes(self): - request = self._get_fake_patch_request() - body = [ - {'op': 'replace', 'path': '/name', 'value': 'fedora'}, - {'op': 'replace', 'path': '/visibility', 'value': 'public'}, - {'op': 'replace', 'path': '/tags', 'value': ['king', 'kong']}, - {'op': 'replace', 'path': '/protected', 'value': True}, - {'op': 'replace', 'path': '/container_format', 'value': 'bare'}, - {'op': 'replace', 'path': '/disk_format', 'value': 'raw'}, - {'op': 'replace', 'path': '/min_ram', 'value': 128}, - {'op': 'replace', 'path': '/min_disk', 'value': 10}, - {'op': 'replace', 'path': '/locations', 'value': []}, - {'op': 'replace', 'path': '/locations', - 'value': [{'url': 'scheme5://path5', 'metadata': {}}, - {'url': 'scheme6://path6', 'metadata': {}}]} - ] - request.body = jsonutils.dump_as_bytes(body) - output = self.deserializer.update(request) - expected = {'changes': [ - {'json_schema_version': 10, 'op': 'replace', - 'path': ['name'], 'value': 'fedora'}, - {'json_schema_version': 10, 'op': 'replace', - 'path': ['visibility'], 'value': 'public'}, - {'json_schema_version': 10, 'op': 'replace', - 'path': ['tags'], 'value': ['king', 'kong']}, - {'json_schema_version': 10, 'op': 'replace', - 'path': ['protected'], 'value': True}, - {'json_schema_version': 10, 'op': 'replace', - 'path': ['container_format'], 'value': 'bare'}, - {'json_schema_version': 10, 'op': 'replace', - 'path': ['disk_format'], 'value': 'raw'}, - {'json_schema_version': 10, 'op': 'replace', - 'path': ['min_ram'], 'value': 128}, - {'json_schema_version': 10, 'op': 'replace', - 'path': ['min_disk'], 'value': 10}, - {'json_schema_version': 10, 'op': 'replace', - 'path': ['locations'], 'value': []}, - {'json_schema_version': 10, 'op': 'replace', 'path': ['locations'], - 'value': [{'url': 'scheme5://path5', 'metadata': {}}, - {'url': 'scheme6://path6', 'metadata': {}}]} - ]} - self.assertEqual(expected, output) - - def test_update_disallowed_attributes(self): - samples = { - 'direct_url': '/a/b/c/d', - 'self': '/e/f/g/h', - 'file': '/e/f/g/h/file', - 'schema': '/i/j/k', - } - - for key, value in samples.items(): - request = self._get_fake_patch_request() - body = [{'op': 'replace', 'path': '/%s' % key, 'value': value}] - request.body = jsonutils.dump_as_bytes(body) - try: - self.deserializer.update(request) - except webob.exc.HTTPForbidden: - pass # desired behavior - else: - self.fail("Updating %s did not result in HTTPForbidden" % key) - - def test_update_readonly_attributes(self): - samples = { - 'id': '00000000-0000-0000-0000-000000000000', - 'status': 'active', - 'checksum': 'abcdefghijklmnopqrstuvwxyz012345', - 'size': 9001, - 'virtual_size': 9001, - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - } - - for key, value in samples.items(): - request = self._get_fake_patch_request() - body = [{'op': 'replace', 'path': '/%s' % key, 'value': value}] - request.body = jsonutils.dump_as_bytes(body) - try: - self.deserializer.update(request) - except webob.exc.HTTPForbidden: - pass # desired behavior - else: - self.fail("Updating %s did not result in HTTPForbidden" % key) - - def test_update_reserved_attributes(self): - samples = { - 'deleted': False, - 'deleted_at': ISOTIME, - } - - for key, value in samples.items(): - request = self._get_fake_patch_request() - body = [{'op': 'replace', 'path': '/%s' % key, 'value': value}] - request.body = jsonutils.dump_as_bytes(body) - try: - self.deserializer.update(request) - except webob.exc.HTTPForbidden: - pass # desired behavior - else: - self.fail("Updating %s did not result in HTTPForbidden" % key) - - def test_update_invalid_attributes(self): - keys = [ - 'noslash', - '///twoslash', - '/two/ /slash', - '/ / ', - '/trailingslash/', - '/lone~tilde', - '/trailingtilde~' - ] - - for key in keys: - request = self._get_fake_patch_request() - body = [{'op': 'replace', 'path': '%s' % key, 'value': 'dummy'}] - request.body = jsonutils.dump_as_bytes(body) - try: - self.deserializer.update(request) - except webob.exc.HTTPBadRequest: - pass # desired behavior - else: - self.fail("Updating %s did not result in HTTPBadRequest" % key) - - def test_update_pointer_encoding(self): - samples = { - '/keywith~1slash': [u'keywith/slash'], - '/keywith~0tilde': [u'keywith~tilde'], - '/tricky~01': [u'tricky~1'], - } - - for encoded, decoded in samples.items(): - request = self._get_fake_patch_request() - doc = [{'op': 'replace', 'path': '%s' % encoded, 'value': 'dummy'}] - request.body = jsonutils.dump_as_bytes(doc) - output = self.deserializer.update(request) - self.assertEqual(decoded, output['changes'][0]['path']) - - def test_update_deep_limited_attributes(self): - samples = { - 'locations/1/2': [], - } - - for key, value in samples.items(): - request = self._get_fake_patch_request() - body = [{'op': 'replace', 'path': '/%s' % key, 'value': value}] - request.body = jsonutils.dump_as_bytes(body) - try: - self.deserializer.update(request) - except webob.exc.HTTPBadRequest: - pass # desired behavior - else: - self.fail("Updating %s did not result in HTTPBadRequest" % key) - - def test_update_v2_1_missing_operations(self): - request = self._get_fake_patch_request() - body = [{'path': '/colburn', 'value': 'arcata'}] - request.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.update, request) - - def test_update_v2_1_missing_value(self): - request = self._get_fake_patch_request() - body = [{'op': 'replace', 'path': '/colburn'}] - request.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.update, request) - - def test_update_v2_1_missing_path(self): - request = self._get_fake_patch_request() - body = [{'op': 'replace', 'value': 'arcata'}] - request.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.update, request) - - def test_update_v2_0_multiple_operations(self): - request = self._get_fake_patch_request(content_type_minor_version=0) - body = [{'replace': '/foo', 'add': '/bar', 'value': 'snore'}] - request.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.update, request) - - def test_update_v2_0_missing_operations(self): - request = self._get_fake_patch_request(content_type_minor_version=0) - body = [{'value': 'arcata'}] - request.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.update, request) - - def test_update_v2_0_missing_value(self): - request = self._get_fake_patch_request(content_type_minor_version=0) - body = [{'replace': '/colburn'}] - request.body = jsonutils.dump_as_bytes(body) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.update, request) - - def test_index(self): - marker = str(uuid.uuid4()) - path = '/images?limit=1&marker=%s&member_status=pending' % marker - request = unit_test_utils.get_fake_request(path) - expected = {'limit': 1, - 'marker': marker, - 'sort_key': ['created_at'], - 'sort_dir': ['desc'], - 'member_status': 'pending', - 'filters': {}} - output = self.deserializer.index(request) - self.assertEqual(expected, output) - - def test_index_with_filter(self): - name = 'My Little Image' - path = '/images?name=%s' % name - request = unit_test_utils.get_fake_request(path) - output = self.deserializer.index(request) - self.assertEqual(name, output['filters']['name']) - - def test_index_strip_params_from_filters(self): - name = 'My Little Image' - path = '/images?name=%s' % name - request = unit_test_utils.get_fake_request(path) - output = self.deserializer.index(request) - self.assertEqual(name, output['filters']['name']) - self.assertEqual(1, len(output['filters'])) - - def test_index_with_many_filter(self): - name = 'My Little Image' - instance_id = str(uuid.uuid4()) - path = ('/images?name=%(name)s&id=%(instance_id)s' % - {'name': name, 'instance_id': instance_id}) - request = unit_test_utils.get_fake_request(path) - output = self.deserializer.index(request) - self.assertEqual(name, output['filters']['name']) - self.assertEqual(instance_id, output['filters']['id']) - - def test_index_with_filter_and_limit(self): - name = 'My Little Image' - path = '/images?name=%s&limit=1' % name - request = unit_test_utils.get_fake_request(path) - output = self.deserializer.index(request) - self.assertEqual(name, output['filters']['name']) - self.assertEqual(1, output['limit']) - - def test_index_non_integer_limit(self): - request = unit_test_utils.get_fake_request('/images?limit=blah') - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_zero_limit(self): - request = unit_test_utils.get_fake_request('/images?limit=0') - expected = {'limit': 0, - 'sort_key': ['created_at'], - 'member_status': 'accepted', - 'sort_dir': ['desc'], - 'filters': {}} - output = self.deserializer.index(request) - self.assertEqual(expected, output) - - def test_index_negative_limit(self): - request = unit_test_utils.get_fake_request('/images?limit=-1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_fraction(self): - request = unit_test_utils.get_fake_request('/images?limit=1.1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_invalid_status(self): - path = '/images?member_status=blah' - request = unit_test_utils.get_fake_request(path) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_marker(self): - marker = str(uuid.uuid4()) - path = '/images?marker=%s' % marker - request = unit_test_utils.get_fake_request(path) - output = self.deserializer.index(request) - self.assertEqual(marker, output.get('marker')) - - def test_index_marker_not_specified(self): - request = unit_test_utils.get_fake_request('/images') - output = self.deserializer.index(request) - self.assertNotIn('marker', output) - - def test_index_limit_not_specified(self): - request = unit_test_utils.get_fake_request('/images') - output = self.deserializer.index(request) - self.assertNotIn('limit', output) - - def test_index_sort_key_id(self): - request = unit_test_utils.get_fake_request('/images?sort_key=id') - output = self.deserializer.index(request) - expected = { - 'sort_key': ['id'], - 'sort_dir': ['desc'], - 'member_status': 'accepted', - 'filters': {} - } - self.assertEqual(expected, output) - - def test_index_multiple_sort_keys(self): - request = unit_test_utils.get_fake_request('/images?' - 'sort_key=name&' - 'sort_key=size') - output = self.deserializer.index(request) - expected = { - 'sort_key': ['name', 'size'], - 'sort_dir': ['desc'], - 'member_status': 'accepted', - 'filters': {} - } - self.assertEqual(expected, output) - - def test_index_invalid_multiple_sort_keys(self): - # blah is an invalid sort key - request = unit_test_utils.get_fake_request('/images?' - 'sort_key=name&' - 'sort_key=blah') - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_sort_dir_asc(self): - request = unit_test_utils.get_fake_request('/images?sort_dir=asc') - output = self.deserializer.index(request) - expected = { - 'sort_key': ['created_at'], - 'sort_dir': ['asc'], - 'member_status': 'accepted', - 'filters': {}} - self.assertEqual(expected, output) - - def test_index_multiple_sort_dirs(self): - req_string = ('/images?sort_key=name&sort_dir=asc&' - 'sort_key=id&sort_dir=desc') - request = unit_test_utils.get_fake_request(req_string) - output = self.deserializer.index(request) - expected = { - 'sort_key': ['name', 'id'], - 'sort_dir': ['asc', 'desc'], - 'member_status': 'accepted', - 'filters': {}} - self.assertEqual(expected, output) - - def test_index_new_sorting_syntax_single_key_default_dir(self): - req_string = '/images?sort=name' - request = unit_test_utils.get_fake_request(req_string) - output = self.deserializer.index(request) - expected = { - 'sort_key': ['name'], - 'sort_dir': ['desc'], - 'member_status': 'accepted', - 'filters': {}} - self.assertEqual(expected, output) - - def test_index_new_sorting_syntax_single_key_desc_dir(self): - req_string = '/images?sort=name:desc' - request = unit_test_utils.get_fake_request(req_string) - output = self.deserializer.index(request) - expected = { - 'sort_key': ['name'], - 'sort_dir': ['desc'], - 'member_status': 'accepted', - 'filters': {}} - self.assertEqual(expected, output) - - def test_index_new_sorting_syntax_multiple_keys_default_dir(self): - req_string = '/images?sort=name,size' - request = unit_test_utils.get_fake_request(req_string) - output = self.deserializer.index(request) - expected = { - 'sort_key': ['name', 'size'], - 'sort_dir': ['desc', 'desc'], - 'member_status': 'accepted', - 'filters': {}} - self.assertEqual(expected, output) - - def test_index_new_sorting_syntax_multiple_keys_asc_dir(self): - req_string = '/images?sort=name:asc,size:asc' - request = unit_test_utils.get_fake_request(req_string) - output = self.deserializer.index(request) - expected = { - 'sort_key': ['name', 'size'], - 'sort_dir': ['asc', 'asc'], - 'member_status': 'accepted', - 'filters': {}} - self.assertEqual(expected, output) - - def test_index_new_sorting_syntax_multiple_keys_different_dirs(self): - req_string = '/images?sort=name:desc,size:asc' - request = unit_test_utils.get_fake_request(req_string) - output = self.deserializer.index(request) - expected = { - 'sort_key': ['name', 'size'], - 'sort_dir': ['desc', 'asc'], - 'member_status': 'accepted', - 'filters': {}} - self.assertEqual(expected, output) - - def test_index_new_sorting_syntax_multiple_keys_optional_dir(self): - req_string = '/images?sort=name:asc,size' - request = unit_test_utils.get_fake_request(req_string) - output = self.deserializer.index(request) - expected = { - 'sort_key': ['name', 'size'], - 'sort_dir': ['asc', 'desc'], - 'member_status': 'accepted', - 'filters': {}} - self.assertEqual(expected, output) - - req_string = '/images?sort=name,size:asc' - request = unit_test_utils.get_fake_request(req_string) - output = self.deserializer.index(request) - expected = { - 'sort_key': ['name', 'size'], - 'sort_dir': ['desc', 'asc'], - 'member_status': 'accepted', - 'filters': {}} - self.assertEqual(expected, output) - - req_string = '/images?sort=name,id:asc,size' - request = unit_test_utils.get_fake_request(req_string) - output = self.deserializer.index(request) - expected = { - 'sort_key': ['name', 'id', 'size'], - 'sort_dir': ['desc', 'asc', 'desc'], - 'member_status': 'accepted', - 'filters': {}} - self.assertEqual(expected, output) - - req_string = '/images?sort=name:asc,id,size:asc' - request = unit_test_utils.get_fake_request(req_string) - output = self.deserializer.index(request) - expected = { - 'sort_key': ['name', 'id', 'size'], - 'sort_dir': ['asc', 'desc', 'asc'], - 'member_status': 'accepted', - 'filters': {}} - self.assertEqual(expected, output) - - def test_index_sort_wrong_sort_dirs_number(self): - req_string = '/images?sort_key=name&sort_dir=asc&sort_dir=desc' - request = unit_test_utils.get_fake_request(req_string) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_sort_dirs_fewer_than_keys(self): - req_string = ('/images?sort_key=name&sort_dir=asc&sort_key=id&' - 'sort_dir=asc&sort_key=created_at') - request = unit_test_utils.get_fake_request(req_string) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_sort_wrong_sort_dirs_number_without_key(self): - req_string = '/images?sort_dir=asc&sort_dir=desc' - request = unit_test_utils.get_fake_request(req_string) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_sort_private_key(self): - request = unit_test_utils.get_fake_request('/images?sort_key=min_ram') - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_sort_key_invalid_value(self): - # blah is an invalid sort key - request = unit_test_utils.get_fake_request('/images?sort_key=blah') - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_sort_dir_invalid_value(self): - # foo is an invalid sort dir - request = unit_test_utils.get_fake_request('/images?sort_dir=foo') - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_new_sorting_syntax_invalid_request(self): - # 'blah' is not a supported sorting key - req_string = '/images?sort=blah' - request = unit_test_utils.get_fake_request(req_string) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - req_string = '/images?sort=name,blah' - request = unit_test_utils.get_fake_request(req_string) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - # 'foo' isn't a valid sort direction - req_string = '/images?sort=name:foo' - request = unit_test_utils.get_fake_request(req_string) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - # 'asc:desc' isn't a valid sort direction - req_string = '/images?sort=name:asc:desc' - request = unit_test_utils.get_fake_request(req_string) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_combined_sorting_syntax(self): - req_string = '/images?sort_dir=name&sort=name' - request = unit_test_utils.get_fake_request(req_string) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_with_tag(self): - path = '/images?tag=%s&tag=%s' % ('x86', '64bit') - request = unit_test_utils.get_fake_request(path) - output = self.deserializer.index(request) - self.assertEqual(sorted(['x86', '64bit']), - sorted(output['filters']['tags'])) - - -class TestImagesDeserializerWithExtendedSchema(test_utils.BaseTestCase): - - def setUp(self): - super(TestImagesDeserializerWithExtendedSchema, self).setUp() - self.config(allow_additional_image_properties=False) - custom_image_properties = { - 'pants': { - 'type': 'string', - 'enum': ['on', 'off'], - }, - } - schema = glance.api.v2.images.get_schema(custom_image_properties) - self.deserializer = glance.api.v2.images.RequestDeserializer(schema) - - def test_create(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({ - 'name': 'image-1', - 'pants': 'on' - }) - output = self.deserializer.create(request) - expected = { - 'image': {'name': 'image-1'}, - 'extra_properties': {'pants': 'on'}, - 'tags': [], - } - self.assertEqual(expected, output) - - def test_create_bad_data(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({ - 'name': 'image-1', - 'pants': 'borked' - }) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.create, request) - - def test_update(self): - request = unit_test_utils.get_fake_request() - request.content_type = 'application/openstack-images-v2.1-json-patch' - doc = [{'op': 'add', 'path': '/pants', 'value': 'off'}] - request.body = jsonutils.dump_as_bytes(doc) - output = self.deserializer.update(request) - expected = {'changes': [ - {'json_schema_version': 10, 'op': 'add', - 'path': ['pants'], 'value': 'off'}, - ]} - self.assertEqual(expected, output) - - def test_update_bad_data(self): - request = unit_test_utils.get_fake_request() - request.content_type = 'application/openstack-images-v2.1-json-patch' - doc = [{'op': 'add', 'path': '/pants', 'value': 'cutoffs'}] - request.body = jsonutils.dump_as_bytes(doc) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.update, - request) - - -class TestImagesDeserializerWithAdditionalProperties(test_utils.BaseTestCase): - - def setUp(self): - super(TestImagesDeserializerWithAdditionalProperties, self).setUp() - self.config(allow_additional_image_properties=True) - self.deserializer = glance.api.v2.images.RequestDeserializer() - - def test_create(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({'foo': 'bar'}) - output = self.deserializer.create(request) - expected = {'image': {}, - 'extra_properties': {'foo': 'bar'}, - 'tags': []} - self.assertEqual(expected, output) - - def test_create_with_numeric_property(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({'abc': 123}) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.create, request) - - def test_update_with_numeric_property(self): - request = unit_test_utils.get_fake_request() - request.content_type = 'application/openstack-images-v2.1-json-patch' - doc = [{'op': 'add', 'path': '/foo', 'value': 123}] - request.body = jsonutils.dump_as_bytes(doc) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.update, request) - - def test_create_with_list_property(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({'foo': ['bar']}) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.create, request) - - def test_update_with_list_property(self): - request = unit_test_utils.get_fake_request() - request.content_type = 'application/openstack-images-v2.1-json-patch' - doc = [{'op': 'add', 'path': '/foo', 'value': ['bar', 'baz']}] - request.body = jsonutils.dump_as_bytes(doc) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.update, request) - - def test_update(self): - request = unit_test_utils.get_fake_request() - request.content_type = 'application/openstack-images-v2.1-json-patch' - doc = [{'op': 'add', 'path': '/foo', 'value': 'bar'}] - request.body = jsonutils.dump_as_bytes(doc) - output = self.deserializer.update(request) - change = { - 'json_schema_version': 10, 'op': 'add', - 'path': ['foo'], 'value': 'bar' - } - self.assertEqual({'changes': [change]}, output) - - -class TestImagesDeserializerNoAdditionalProperties(test_utils.BaseTestCase): - - def setUp(self): - super(TestImagesDeserializerNoAdditionalProperties, self).setUp() - self.config(allow_additional_image_properties=False) - self.deserializer = glance.api.v2.images.RequestDeserializer() - - def test_create_with_additional_properties_disallowed(self): - self.config(allow_additional_image_properties=False) - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({'foo': 'bar'}) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.create, request) - - def test_update(self): - request = unit_test_utils.get_fake_request() - request.content_type = 'application/openstack-images-v2.1-json-patch' - doc = [{'op': 'add', 'path': '/foo', 'value': 'bar'}] - request.body = jsonutils.dump_as_bytes(doc) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.update, request) - - -class TestImagesSerializer(test_utils.BaseTestCase): - - def setUp(self): - super(TestImagesSerializer, self).setUp() - self.serializer = glance.api.v2.images.ResponseSerializer() - self.fixtures = [ - # NOTE(bcwaldon): This first fixture has every property defined - _domain_fixture(UUID1, name='image-1', size=1024, - virtual_size=3072, created_at=DATETIME, - updated_at=DATETIME, owner=TENANT1, - visibility='public', container_format='ami', - tags=['one', 'two'], disk_format='ami', - min_ram=128, min_disk=10, - checksum='ca425b88f047ce8ec45ee90e813ada91'), - - # NOTE(bcwaldon): This second fixture depends on default behavior - # and sets most values to None - _domain_fixture(UUID2, created_at=DATETIME, updated_at=DATETIME), - ] - - def test_index(self): - expected = { - 'images': [ - { - 'id': UUID1, - 'name': 'image-1', - 'status': 'queued', - 'visibility': 'public', - 'protected': False, - 'tags': set(['one', 'two']), - 'size': 1024, - 'virtual_size': 3072, - 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', - 'container_format': 'ami', - 'disk_format': 'ami', - 'min_ram': 128, - 'min_disk': 10, - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/images/%s' % UUID1, - 'file': '/v2/images/%s/file' % UUID1, - 'schema': '/v2/schemas/image', - 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', - }, - { - 'id': UUID2, - 'status': 'queued', - 'visibility': 'private', - 'protected': False, - 'tags': set([]), - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/images/%s' % UUID2, - 'file': '/v2/images/%s/file' % UUID2, - 'schema': '/v2/schemas/image', - 'size': None, - 'name': None, - 'owner': None, - 'min_ram': None, - 'min_disk': None, - 'checksum': None, - 'disk_format': None, - 'virtual_size': None, - 'container_format': None, - - }, - ], - 'first': '/v2/images', - 'schema': '/v2/schemas/images', - } - request = webob.Request.blank('/v2/images') - response = webob.Response(request=request) - result = {'images': self.fixtures} - self.serializer.index(response, result) - actual = jsonutils.loads(response.body) - for image in actual['images']: - image['tags'] = set(image['tags']) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - def test_index_next_marker(self): - request = webob.Request.blank('/v2/images') - response = webob.Response(request=request) - result = {'images': self.fixtures, 'next_marker': UUID2} - self.serializer.index(response, result) - output = jsonutils.loads(response.body) - self.assertEqual('/v2/images?marker=%s' % UUID2, output['next']) - - def test_index_carries_query_parameters(self): - url = '/v2/images?limit=10&sort_key=id&sort_dir=asc' - request = webob.Request.blank(url) - response = webob.Response(request=request) - result = {'images': self.fixtures, 'next_marker': UUID2} - self.serializer.index(response, result) - output = jsonutils.loads(response.body) - - expected_url = '/v2/images?limit=10&sort_dir=asc&sort_key=id' - self.assertEqual(unit_test_utils.sort_url_by_qs_keys(expected_url), - unit_test_utils.sort_url_by_qs_keys(output['first'])) - expect_next = '/v2/images?limit=10&marker=%s&sort_dir=asc&sort_key=id' - self.assertEqual(unit_test_utils.sort_url_by_qs_keys( - expect_next % UUID2), - unit_test_utils.sort_url_by_qs_keys(output['next'])) - - def test_index_forbidden_get_image_location(self): - """Make sure the serializer works fine. - - No mater if current user is authorized to get image location if the - show_multiple_locations is False. - - """ - class ImageLocations(object): - def __len__(self): - raise exception.Forbidden() - - self.config(show_multiple_locations=False) - self.config(show_image_direct_url=False) - url = '/v2/images?limit=10&sort_key=id&sort_dir=asc' - request = webob.Request.blank(url) - response = webob.Response(request=request) - result = {'images': self.fixtures} - self.assertEqual(http.OK, response.status_int) - - # The image index should work though the user is forbidden - result['images'][0].locations = ImageLocations() - self.serializer.index(response, result) - self.assertEqual(http.OK, response.status_int) - - def test_show_full_fixture(self): - expected = { - 'id': UUID1, - 'name': 'image-1', - 'status': 'queued', - 'visibility': 'public', - 'protected': False, - 'tags': set(['one', 'two']), - 'size': 1024, - 'virtual_size': 3072, - 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', - 'container_format': 'ami', - 'disk_format': 'ami', - 'min_ram': 128, - 'min_disk': 10, - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/images/%s' % UUID1, - 'file': '/v2/images/%s/file' % UUID1, - 'schema': '/v2/schemas/image', - 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', - } - response = webob.Response() - self.serializer.show(response, self.fixtures[0]) - actual = jsonutils.loads(response.body) - actual['tags'] = set(actual['tags']) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - def test_show_minimal_fixture(self): - expected = { - 'id': UUID2, - 'status': 'queued', - 'visibility': 'private', - 'protected': False, - 'tags': [], - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/images/%s' % UUID2, - 'file': '/v2/images/%s/file' % UUID2, - 'schema': '/v2/schemas/image', - 'size': None, - 'name': None, - 'owner': None, - 'min_ram': None, - 'min_disk': None, - 'checksum': None, - 'disk_format': None, - 'virtual_size': None, - 'container_format': None, - } - response = webob.Response() - self.serializer.show(response, self.fixtures[1]) - self.assertEqual(expected, jsonutils.loads(response.body)) - - def test_create(self): - expected = { - 'id': UUID1, - 'name': 'image-1', - 'status': 'queued', - 'visibility': 'public', - 'protected': False, - 'tags': ['one', 'two'], - 'size': 1024, - 'virtual_size': 3072, - 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', - 'container_format': 'ami', - 'disk_format': 'ami', - 'min_ram': 128, - 'min_disk': 10, - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/images/%s' % UUID1, - 'file': '/v2/images/%s/file' % UUID1, - 'schema': '/v2/schemas/image', - 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', - } - response = webob.Response() - self.serializer.create(response, self.fixtures[0]) - self.assertEqual(http.CREATED, response.status_int) - actual = jsonutils.loads(response.body) - actual['tags'] = sorted(actual['tags']) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - self.assertEqual('/v2/images/%s' % UUID1, response.location) - - def test_update(self): - expected = { - 'id': UUID1, - 'name': 'image-1', - 'status': 'queued', - 'visibility': 'public', - 'protected': False, - 'tags': set(['one', 'two']), - 'size': 1024, - 'virtual_size': 3072, - 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', - 'container_format': 'ami', - 'disk_format': 'ami', - 'min_ram': 128, - 'min_disk': 10, - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/images/%s' % UUID1, - 'file': '/v2/images/%s/file' % UUID1, - 'schema': '/v2/schemas/image', - 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', - } - response = webob.Response() - self.serializer.update(response, self.fixtures[0]) - actual = jsonutils.loads(response.body) - actual['tags'] = set(actual['tags']) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - -class TestImagesSerializerWithUnicode(test_utils.BaseTestCase): - - def setUp(self): - super(TestImagesSerializerWithUnicode, self).setUp() - self.serializer = glance.api.v2.images.ResponseSerializer() - self.fixtures = [ - # NOTE(bcwaldon): This first fixture has every property defined - _domain_fixture(UUID1, **{ - 'name': u'OpenStack\u2122-1', - 'size': 1024, - 'virtual_size': 3072, - 'tags': [u'\u2160', u'\u2161'], - 'created_at': DATETIME, - 'updated_at': DATETIME, - 'owner': TENANT1, - 'visibility': 'public', - 'container_format': 'ami', - 'disk_format': 'ami', - 'min_ram': 128, - 'min_disk': 10, - 'checksum': u'ca425b88f047ce8ec45ee90e813ada91', - 'extra_properties': {'lang': u'Fran\u00E7ais', - u'dispos\u00E9': u'f\u00E2ch\u00E9'}, - }), - ] - - def test_index(self): - expected = { - u'images': [ - { - u'id': UUID1, - u'name': u'OpenStack\u2122-1', - u'status': u'queued', - u'visibility': u'public', - u'protected': False, - u'tags': [u'\u2160', u'\u2161'], - u'size': 1024, - u'virtual_size': 3072, - u'checksum': u'ca425b88f047ce8ec45ee90e813ada91', - u'container_format': u'ami', - u'disk_format': u'ami', - u'min_ram': 128, - u'min_disk': 10, - u'created_at': six.text_type(ISOTIME), - u'updated_at': six.text_type(ISOTIME), - u'self': u'/v2/images/%s' % UUID1, - u'file': u'/v2/images/%s/file' % UUID1, - u'schema': u'/v2/schemas/image', - u'lang': u'Fran\u00E7ais', - u'dispos\u00E9': u'f\u00E2ch\u00E9', - u'owner': u'6838eb7b-6ded-434a-882c-b344c77fe8df', - }, - ], - u'first': u'/v2/images', - u'schema': u'/v2/schemas/images', - } - request = webob.Request.blank('/v2/images') - response = webob.Response(request=request) - result = {u'images': self.fixtures} - self.serializer.index(response, result) - actual = jsonutils.loads(response.body) - actual['images'][0]['tags'] = sorted(actual['images'][0]['tags']) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - def test_show_full_fixture(self): - expected = { - u'id': UUID1, - u'name': u'OpenStack\u2122-1', - u'status': u'queued', - u'visibility': u'public', - u'protected': False, - u'tags': set([u'\u2160', u'\u2161']), - u'size': 1024, - u'virtual_size': 3072, - u'checksum': u'ca425b88f047ce8ec45ee90e813ada91', - u'container_format': u'ami', - u'disk_format': u'ami', - u'min_ram': 128, - u'min_disk': 10, - u'created_at': six.text_type(ISOTIME), - u'updated_at': six.text_type(ISOTIME), - u'self': u'/v2/images/%s' % UUID1, - u'file': u'/v2/images/%s/file' % UUID1, - u'schema': u'/v2/schemas/image', - u'lang': u'Fran\u00E7ais', - u'dispos\u00E9': u'f\u00E2ch\u00E9', - u'owner': u'6838eb7b-6ded-434a-882c-b344c77fe8df', - } - response = webob.Response() - self.serializer.show(response, self.fixtures[0]) - actual = jsonutils.loads(response.body) - actual['tags'] = set(actual['tags']) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - def test_create(self): - expected = { - u'id': UUID1, - u'name': u'OpenStack\u2122-1', - u'status': u'queued', - u'visibility': u'public', - u'protected': False, - u'tags': [u'\u2160', u'\u2161'], - u'size': 1024, - u'virtual_size': 3072, - u'checksum': u'ca425b88f047ce8ec45ee90e813ada91', - u'container_format': u'ami', - u'disk_format': u'ami', - u'min_ram': 128, - u'min_disk': 10, - u'created_at': six.text_type(ISOTIME), - u'updated_at': six.text_type(ISOTIME), - u'self': u'/v2/images/%s' % UUID1, - u'file': u'/v2/images/%s/file' % UUID1, - u'schema': u'/v2/schemas/image', - u'lang': u'Fran\u00E7ais', - u'dispos\u00E9': u'f\u00E2ch\u00E9', - u'owner': u'6838eb7b-6ded-434a-882c-b344c77fe8df', - } - response = webob.Response() - self.serializer.create(response, self.fixtures[0]) - self.assertEqual(http.CREATED, response.status_int) - actual = jsonutils.loads(response.body) - actual['tags'] = sorted(actual['tags']) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - self.assertEqual('/v2/images/%s' % UUID1, response.location) - - def test_update(self): - expected = { - u'id': UUID1, - u'name': u'OpenStack\u2122-1', - u'status': u'queued', - u'visibility': u'public', - u'protected': False, - u'tags': set([u'\u2160', u'\u2161']), - u'size': 1024, - u'virtual_size': 3072, - u'checksum': u'ca425b88f047ce8ec45ee90e813ada91', - u'container_format': u'ami', - u'disk_format': u'ami', - u'min_ram': 128, - u'min_disk': 10, - u'created_at': six.text_type(ISOTIME), - u'updated_at': six.text_type(ISOTIME), - u'self': u'/v2/images/%s' % UUID1, - u'file': u'/v2/images/%s/file' % UUID1, - u'schema': u'/v2/schemas/image', - u'lang': u'Fran\u00E7ais', - u'dispos\u00E9': u'f\u00E2ch\u00E9', - u'owner': u'6838eb7b-6ded-434a-882c-b344c77fe8df', - } - response = webob.Response() - self.serializer.update(response, self.fixtures[0]) - actual = jsonutils.loads(response.body) - actual['tags'] = set(actual['tags']) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - -class TestImagesSerializerWithExtendedSchema(test_utils.BaseTestCase): - - def setUp(self): - super(TestImagesSerializerWithExtendedSchema, self).setUp() - self.config(allow_additional_image_properties=False) - custom_image_properties = { - 'color': { - 'type': 'string', - 'enum': ['red', 'green'], - }, - } - schema = glance.api.v2.images.get_schema(custom_image_properties) - self.serializer = glance.api.v2.images.ResponseSerializer(schema) - - props = dict(color='green', mood='grouchy') - self.fixture = _domain_fixture( - UUID2, name='image-2', owner=TENANT2, - checksum='ca425b88f047ce8ec45ee90e813ada91', - created_at=DATETIME, updated_at=DATETIME, size=1024, - virtual_size=3072, extra_properties=props) - - def test_show(self): - expected = { - 'id': UUID2, - 'name': 'image-2', - 'status': 'queued', - 'visibility': 'private', - 'protected': False, - 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', - 'tags': [], - 'size': 1024, - 'virtual_size': 3072, - 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', - 'color': 'green', - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/images/%s' % UUID2, - 'file': '/v2/images/%s/file' % UUID2, - 'schema': '/v2/schemas/image', - 'min_ram': None, - 'min_disk': None, - 'disk_format': None, - 'container_format': None, - } - response = webob.Response() - self.serializer.show(response, self.fixture) - self.assertEqual(expected, jsonutils.loads(response.body)) - - def test_show_reports_invalid_data(self): - self.fixture.extra_properties['color'] = 'invalid' - expected = { - 'id': UUID2, - 'name': 'image-2', - 'status': 'queued', - 'visibility': 'private', - 'protected': False, - 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', - 'tags': [], - 'size': 1024, - 'virtual_size': 3072, - 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', - 'color': 'invalid', - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/images/%s' % UUID2, - 'file': '/v2/images/%s/file' % UUID2, - 'schema': '/v2/schemas/image', - 'min_ram': None, - 'min_disk': None, - 'disk_format': None, - 'container_format': None, - } - response = webob.Response() - self.serializer.show(response, self.fixture) - self.assertEqual(expected, jsonutils.loads(response.body)) - - -class TestImagesSerializerWithAdditionalProperties(test_utils.BaseTestCase): - - def setUp(self): - super(TestImagesSerializerWithAdditionalProperties, self).setUp() - self.config(allow_additional_image_properties=True) - self.fixture = _domain_fixture( - UUID2, name='image-2', owner=TENANT2, - checksum='ca425b88f047ce8ec45ee90e813ada91', - created_at=DATETIME, updated_at=DATETIME, size=1024, - virtual_size=3072, extra_properties={'marx': 'groucho'}) - - def test_show(self): - serializer = glance.api.v2.images.ResponseSerializer() - expected = { - 'id': UUID2, - 'name': 'image-2', - 'status': 'queued', - 'visibility': 'private', - 'protected': False, - 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', - 'marx': 'groucho', - 'tags': [], - 'size': 1024, - 'virtual_size': 3072, - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/images/%s' % UUID2, - 'file': '/v2/images/%s/file' % UUID2, - 'schema': '/v2/schemas/image', - 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', - 'min_ram': None, - 'min_disk': None, - 'disk_format': None, - 'container_format': None, - } - response = webob.Response() - serializer.show(response, self.fixture) - self.assertEqual(expected, jsonutils.loads(response.body)) - - def test_show_invalid_additional_property(self): - """Ensure that the serializer passes - through invalid additional properties. - - It must not complains with i.e. non-string. - """ - serializer = glance.api.v2.images.ResponseSerializer() - self.fixture.extra_properties['marx'] = 123 - expected = { - 'id': UUID2, - 'name': 'image-2', - 'status': 'queued', - 'visibility': 'private', - 'protected': False, - 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', - 'marx': 123, - 'tags': [], - 'size': 1024, - 'virtual_size': 3072, - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/images/%s' % UUID2, - 'file': '/v2/images/%s/file' % UUID2, - 'schema': '/v2/schemas/image', - 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', - 'min_ram': None, - 'min_disk': None, - 'disk_format': None, - 'container_format': None, - } - response = webob.Response() - serializer.show(response, self.fixture) - self.assertEqual(expected, jsonutils.loads(response.body)) - - def test_show_with_additional_properties_disabled(self): - self.config(allow_additional_image_properties=False) - serializer = glance.api.v2.images.ResponseSerializer() - expected = { - 'id': UUID2, - 'name': 'image-2', - 'status': 'queued', - 'visibility': 'private', - 'protected': False, - 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', - 'tags': [], - 'size': 1024, - 'virtual_size': 3072, - 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/images/%s' % UUID2, - 'file': '/v2/images/%s/file' % UUID2, - 'schema': '/v2/schemas/image', - 'min_ram': None, - 'min_disk': None, - 'disk_format': None, - 'container_format': None, - } - response = webob.Response() - serializer.show(response, self.fixture) - self.assertEqual(expected, jsonutils.loads(response.body)) - - -class TestImagesSerializerDirectUrl(test_utils.BaseTestCase): - def setUp(self): - super(TestImagesSerializerDirectUrl, self).setUp() - self.serializer = glance.api.v2.images.ResponseSerializer() - - self.active_image = _domain_fixture( - UUID1, name='image-1', visibility='public', - status='active', size=1024, virtual_size=3072, - created_at=DATETIME, updated_at=DATETIME, - locations=[{'id': '1', 'url': 'http://some/fake/location', - 'metadata': {}, 'status': 'active'}]) - - self.queued_image = _domain_fixture( - UUID2, name='image-2', status='active', - created_at=DATETIME, updated_at=DATETIME, - checksum='ca425b88f047ce8ec45ee90e813ada91') - - self.location_data_image_url = 'http://abc.com/somewhere' - self.location_data_image_meta = {'key': 98231} - self.location_data_image = _domain_fixture( - UUID2, name='image-2', status='active', - created_at=DATETIME, updated_at=DATETIME, - locations=[{'id': '2', - 'url': self.location_data_image_url, - 'metadata': self.location_data_image_meta, - 'status': 'active'}]) - - def _do_index(self): - request = webob.Request.blank('/v2/images') - response = webob.Response(request=request) - self.serializer.index(response, - {'images': [self.active_image, - self.queued_image]}) - return jsonutils.loads(response.body)['images'] - - def _do_show(self, image): - request = webob.Request.blank('/v2/images') - response = webob.Response(request=request) - self.serializer.show(response, image) - return jsonutils.loads(response.body) - - def test_index_store_location_enabled(self): - self.config(show_image_direct_url=True) - images = self._do_index() - - # NOTE(markwash): ordering sanity check - self.assertEqual(UUID1, images[0]['id']) - self.assertEqual(UUID2, images[1]['id']) - - self.assertEqual('http://some/fake/location', images[0]['direct_url']) - self.assertNotIn('direct_url', images[1]) - - def test_index_store_multiple_location_enabled(self): - self.config(show_multiple_locations=True) - request = webob.Request.blank('/v2/images') - response = webob.Response(request=request) - self.serializer.index(response, - {'images': [self.location_data_image]}), - images = jsonutils.loads(response.body)['images'] - location = images[0]['locations'][0] - self.assertEqual(location['url'], self.location_data_image_url) - self.assertEqual(location['metadata'], self.location_data_image_meta) - - def test_index_store_location_explicitly_disabled(self): - self.config(show_image_direct_url=False) - images = self._do_index() - self.assertNotIn('direct_url', images[0]) - self.assertNotIn('direct_url', images[1]) - - def test_show_location_enabled(self): - self.config(show_image_direct_url=True) - image = self._do_show(self.active_image) - self.assertEqual('http://some/fake/location', image['direct_url']) - - def test_show_location_enabled_but_not_set(self): - self.config(show_image_direct_url=True) - image = self._do_show(self.queued_image) - self.assertNotIn('direct_url', image) - - def test_show_location_explicitly_disabled(self): - self.config(show_image_direct_url=False) - image = self._do_show(self.active_image) - self.assertNotIn('direct_url', image) - - -class TestImageSchemaFormatConfiguration(test_utils.BaseTestCase): - def test_default_disk_formats(self): - schema = glance.api.v2.images.get_schema() - expected = [None, 'ami', 'ari', 'aki', 'vhd', 'vhdx', 'vmdk', - 'raw', 'qcow2', 'vdi', 'iso', 'ploop'] - actual = schema.properties['disk_format']['enum'] - self.assertEqual(expected, actual) - - def test_custom_disk_formats(self): - self.config(disk_formats=['gabe'], group="image_format") - schema = glance.api.v2.images.get_schema() - expected = [None, 'gabe'] - actual = schema.properties['disk_format']['enum'] - self.assertEqual(expected, actual) - - def test_default_container_formats(self): - schema = glance.api.v2.images.get_schema() - expected = [None, 'ami', 'ari', 'aki', 'bare', 'ovf', 'ova', 'docker'] - actual = schema.properties['container_format']['enum'] - self.assertEqual(expected, actual) - - def test_custom_container_formats(self): - self.config(container_formats=['mark'], group="image_format") - schema = glance.api.v2.images.get_schema() - expected = [None, 'mark'] - actual = schema.properties['container_format']['enum'] - self.assertEqual(expected, actual) - - -class TestImageSchemaDeterminePropertyBasis(test_utils.BaseTestCase): - def test_custom_property_marked_as_non_base(self): - self.config(allow_additional_image_properties=False) - custom_image_properties = { - 'pants': { - 'type': 'string', - }, - } - schema = glance.api.v2.images.get_schema(custom_image_properties) - self.assertFalse(schema.properties['pants'].get('is_base', True)) - - def test_base_property_marked_as_base(self): - schema = glance.api.v2.images.get_schema() - self.assertTrue(schema.properties['disk_format'].get('is_base', True)) diff --git a/glance/tests/unit/v2/test_metadef_resources.py b/glance/tests/unit/v2/test_metadef_resources.py deleted file mode 100644 index 2f120b64..00000000 --- a/glance/tests/unit/v2/test_metadef_resources.py +++ /dev/null @@ -1,2079 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import mock - -from oslo_serialization import jsonutils -import webob - -from glance.api.v2 import metadef_namespaces as namespaces -from glance.api.v2 import metadef_objects as objects -from glance.api.v2 import metadef_properties as properties -from glance.api.v2 import metadef_resource_types as resource_types -from glance.api.v2 import metadef_tags as tags -import glance.gateway -from glance.tests.unit import base -import glance.tests.unit.utils as unit_test_utils - -DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) -ISOTIME = '2012-05-16T15:27:36Z' - -NAMESPACE1 = 'Namespace1' -NAMESPACE2 = 'Namespace2' -NAMESPACE3 = 'Namespace3' -NAMESPACE4 = 'Namespace4' -NAMESPACE5 = 'Namespace5' -NAMESPACE6 = 'Namespace6' - -PROPERTY1 = 'Property1' -PROPERTY2 = 'Property2' -PROPERTY3 = 'Property3' -PROPERTY4 = 'Property4' - -OBJECT1 = 'Object1' -OBJECT2 = 'Object2' -OBJECT3 = 'Object3' - -RESOURCE_TYPE1 = 'ResourceType1' -RESOURCE_TYPE2 = 'ResourceType2' -RESOURCE_TYPE3 = 'ResourceType3' -RESOURCE_TYPE4 = 'ResourceType4' - -TAG1 = 'Tag1' -TAG2 = 'Tag2' -TAG3 = 'Tag3' -TAG4 = 'Tag4' -TAG5 = 'Tag5' - -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' -TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' -TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' - -PREFIX1 = 'pref' - - -def _db_namespace_fixture(namespace, **kwargs): - obj = { - 'namespace': namespace, - 'display_name': None, - 'description': None, - 'visibility': 'public', - 'protected': False, - 'owner': None, - } - obj.update(kwargs) - return obj - - -def _db_property_fixture(name, **kwargs): - obj = { - 'name': name, - 'json_schema': {"type": "string", "title": "title"}, - } - obj.update(kwargs) - return obj - - -def _db_object_fixture(name, **kwargs): - obj = { - 'name': name, - 'description': None, - 'json_schema': {}, - 'required': '[]', - } - obj.update(kwargs) - return obj - - -def _db_resource_type_fixture(name, **kwargs): - obj = { - 'name': name, - 'protected': False, - } - obj.update(kwargs) - return obj - - -def _db_tag_fixture(name, **kwargs): - obj = { - 'name': name - } - obj.update(kwargs) - return obj - - -def _db_tags_fixture(tag_names=None): - tag_list = [] - if not tag_names: - tag_names = [TAG1, TAG2, TAG3] - - for tag_name in tag_names: - tag = tags.MetadefTag() - tag.name = tag_name - tag_list.append(tag) - return tag_list - - -def _db_namespace_resource_type_fixture(name, **kwargs): - obj = { - 'name': name, - 'properties_target': None, - 'prefix': None, - } - obj.update(kwargs) - return obj - - -class TestMetadefsControllers(base.IsolatedUnitTest): - - def setUp(self): - super(TestMetadefsControllers, self).setUp() - self.db = unit_test_utils.FakeDB(initialize=False) - self.policy = unit_test_utils.FakePolicyEnforcer() - self.notifier = unit_test_utils.FakeNotifier() - self._create_namespaces() - self._create_properties() - self._create_objects() - self._create_resource_types() - self._create_namespaces_resource_types() - self._create_tags() - self.namespace_controller = namespaces.NamespaceController( - self.db, self.policy, self.notifier) - self.property_controller = properties.NamespacePropertiesController( - self.db, self.policy, self.notifier) - self.object_controller = objects.MetadefObjectsController( - self.db, self.policy, self.notifier) - self.rt_controller = resource_types.ResourceTypeController( - self.db, self.policy, self.notifier) - self.tag_controller = tags.TagsController( - self.db, self.policy, self.notifier) - self.deserializer = objects.RequestDeserializer() - self.property_deserializer = properties.RequestDeserializer() - - def _create_namespaces(self): - req = unit_test_utils.get_fake_request() - self.namespaces = [ - _db_namespace_fixture(NAMESPACE1, owner=TENANT1, - visibility='private', protected=True), - _db_namespace_fixture(NAMESPACE2, owner=TENANT2, - visibility='private'), - _db_namespace_fixture(NAMESPACE3, owner=TENANT3), - _db_namespace_fixture(NAMESPACE5, owner=TENANT4), - _db_namespace_fixture(NAMESPACE6, owner=TENANT4), - ] - [self.db.metadef_namespace_create(req.context, namespace) - for namespace in self.namespaces] - - def _create_properties(self): - req = unit_test_utils.get_fake_request() - self.properties = [ - (NAMESPACE3, _db_property_fixture(PROPERTY1)), - (NAMESPACE3, _db_property_fixture(PROPERTY2)), - (NAMESPACE1, _db_property_fixture(PROPERTY1)), - (NAMESPACE6, _db_property_fixture(PROPERTY4)), - ] - [self.db.metadef_property_create(req.context, namespace, property) - for namespace, property in self.properties] - - def _create_objects(self): - req = unit_test_utils.get_fake_request() - self.objects = [ - (NAMESPACE3, _db_object_fixture(OBJECT1)), - (NAMESPACE3, _db_object_fixture(OBJECT2)), - (NAMESPACE1, _db_object_fixture(OBJECT1)), - ] - [self.db.metadef_object_create(req.context, namespace, object) - for namespace, object in self.objects] - - def _create_resource_types(self): - req = unit_test_utils.get_fake_request() - self.resource_types = [ - _db_resource_type_fixture(RESOURCE_TYPE1), - _db_resource_type_fixture(RESOURCE_TYPE2), - _db_resource_type_fixture(RESOURCE_TYPE4), - ] - [self.db.metadef_resource_type_create(req.context, resource_type) - for resource_type in self.resource_types] - - def _create_tags(self): - req = unit_test_utils.get_fake_request() - self.tags = [ - (NAMESPACE3, _db_tag_fixture(TAG1)), - (NAMESPACE3, _db_tag_fixture(TAG2)), - (NAMESPACE1, _db_tag_fixture(TAG1)), - ] - [self.db.metadef_tag_create(req.context, namespace, tag) - for namespace, tag in self.tags] - - def _create_namespaces_resource_types(self): - req = unit_test_utils.get_fake_request(is_admin=True) - self.ns_resource_types = [ - (NAMESPACE1, _db_namespace_resource_type_fixture(RESOURCE_TYPE1)), - (NAMESPACE3, _db_namespace_resource_type_fixture(RESOURCE_TYPE1)), - (NAMESPACE2, _db_namespace_resource_type_fixture(RESOURCE_TYPE1)), - (NAMESPACE2, _db_namespace_resource_type_fixture(RESOURCE_TYPE2)), - (NAMESPACE6, _db_namespace_resource_type_fixture(RESOURCE_TYPE4, - prefix=PREFIX1)), - ] - [self.db.metadef_resource_type_association_create(req.context, - namespace, - ns_resource_type) - for namespace, ns_resource_type in self.ns_resource_types] - - def assertNotificationLog(self, expected_event_type, expected_payloads): - events = [{'type': expected_event_type, - 'payload': payload} for payload in expected_payloads] - - self.assertNotificationsLog(events) - - def assertNotificationsLog(self, expected_events): - output_logs = self.notifier.get_logs() - expected_logs_count = len(expected_events) - self.assertEqual(expected_logs_count, len(output_logs)) - - for output_log, event in zip(output_logs, expected_events): - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual(event['type'], output_log['event_type']) - self.assertDictContainsSubset(event['payload'], - output_log['payload']) - self.notifier.log = [] - - def test_namespace_index(self): - request = unit_test_utils.get_fake_request() - output = self.namespace_controller.index(request) - output = output.to_dict() - self.assertEqual(4, len(output['namespaces'])) - actual = set([namespace.namespace for - namespace in output['namespaces']]) - expected = set([NAMESPACE1, NAMESPACE3, NAMESPACE5, NAMESPACE6]) - self.assertEqual(expected, actual) - - def test_namespace_index_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - output = self.namespace_controller.index(request) - output = output.to_dict() - self.assertEqual(5, len(output['namespaces'])) - actual = set([namespace.namespace for - namespace in output['namespaces']]) - expected = set([NAMESPACE1, NAMESPACE2, NAMESPACE3, NAMESPACE5, - NAMESPACE6]) - self.assertEqual(expected, actual) - - def test_namespace_index_visibility_public(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - filters = {'visibility': 'public'} - output = self.namespace_controller.index(request, filters=filters) - output = output.to_dict() - self.assertEqual(3, len(output['namespaces'])) - actual = set([namespace.namespace for namespace - in output['namespaces']]) - expected = set([NAMESPACE3, NAMESPACE5, NAMESPACE6]) - self.assertEqual(expected, actual) - - def test_namespace_index_resource_type(self): - request = unit_test_utils.get_fake_request() - filters = {'resource_types': [RESOURCE_TYPE1]} - output = self.namespace_controller.index(request, filters=filters) - output = output.to_dict() - self.assertEqual(2, len(output['namespaces'])) - actual = set([namespace.namespace for namespace - in output['namespaces']]) - expected = set([NAMESPACE1, NAMESPACE3]) - self.assertEqual(expected, actual) - - def test_namespace_show(self): - request = unit_test_utils.get_fake_request() - output = self.namespace_controller.show(request, NAMESPACE1) - output = output.to_dict() - self.assertEqual(NAMESPACE1, output['namespace']) - self.assertEqual(TENANT1, output['owner']) - self.assertTrue(output['protected']) - self.assertEqual('private', output['visibility']) - - def test_namespace_show_with_related_resources(self): - request = unit_test_utils.get_fake_request() - output = self.namespace_controller.show(request, NAMESPACE3) - output = output.to_dict() - self.assertEqual(NAMESPACE3, output['namespace']) - self.assertEqual(TENANT3, output['owner']) - self.assertFalse(output['protected']) - self.assertEqual('public', output['visibility']) - - self.assertEqual(2, len(output['properties'])) - actual = set([property for property in output['properties']]) - expected = set([PROPERTY1, PROPERTY2]) - self.assertEqual(expected, actual) - - self.assertEqual(2, len(output['objects'])) - actual = set([object.name for object in output['objects']]) - expected = set([OBJECT1, OBJECT2]) - self.assertEqual(expected, actual) - - self.assertEqual(1, len(output['resource_type_associations'])) - actual = set([rt.name for rt in output['resource_type_associations']]) - expected = set([RESOURCE_TYPE1]) - self.assertEqual(expected, actual) - - def test_namespace_show_with_property_prefix(self): - request = unit_test_utils.get_fake_request() - rt = resource_types.ResourceTypeAssociation() - rt.name = RESOURCE_TYPE2 - rt.prefix = 'pref' - rt = self.rt_controller.create(request, rt, NAMESPACE3) - - object = objects.MetadefObject() - object.name = OBJECT3 - object.required = [] - - property = properties.PropertyType() - property.name = PROPERTY2 - property.type = 'string' - property.title = 'title' - object.properties = {'prop1': property} - object = self.object_controller.create(request, object, NAMESPACE3) - - self.assertNotificationsLog([ - { - 'type': 'metadef_resource_type.create', - 'payload': { - 'namespace': NAMESPACE3, - 'name': RESOURCE_TYPE2, - 'prefix': 'pref', - 'properties_target': None, - } - }, - { - 'type': 'metadef_object.create', - 'payload': { - 'name': OBJECT3, - 'namespace': NAMESPACE3, - 'properties': [{ - 'name': 'prop1', - 'additionalItems': None, - 'confidential': None, - 'title': u'title', - 'default': None, - 'pattern': None, - 'enum': None, - 'maximum': None, - 'minItems': None, - 'minimum': None, - 'maxItems': None, - 'minLength': None, - 'uniqueItems': None, - 'maxLength': None, - 'items': None, - 'type': u'string', - 'description': None - }], - 'required': [], - 'description': None, - } - } - ]) - - filters = {'resource_type': RESOURCE_TYPE2} - output = self.namespace_controller.show(request, NAMESPACE3, filters) - output = output.to_dict() - - [self.assertTrue(property_name.startswith(rt.prefix)) for - property_name in output['properties'].keys()] - - for object in output['objects']: - [self.assertTrue(property_name.startswith(rt.prefix)) for - property_name in object.properties.keys()] - - @mock.patch('glance.api.v2.metadef_namespaces.LOG') - def test_cleanup_namespace_success(self, mock_log): - fake_gateway = glance.gateway.Gateway(db_api=self.db, - notifier=self.notifier, - policy_enforcer=self.policy) - req = unit_test_utils.get_fake_request() - ns_factory = fake_gateway.get_metadef_namespace_factory( - req.context) - ns_repo = fake_gateway.get_metadef_namespace_repo(req.context) - namespace = namespaces.Namespace() - namespace.namespace = 'FakeNamespace' - new_namespace = ns_factory.new_namespace(**namespace.to_dict()) - ns_repo.add(new_namespace) - - self.namespace_controller._cleanup_namespace(ns_repo, namespace, True) - - mock_log.debug.assert_called_with( - "Cleaned up namespace %(namespace)s ", - {'namespace': namespace.namespace}) - - @mock.patch('glance.api.v2.metadef_namespaces.LOG') - @mock.patch('glance.api.authorization.MetadefNamespaceRepoProxy.remove') - def test_cleanup_namespace_exception(self, mock_remove, mock_log): - mock_remove.side_effect = Exception(u'Mock remove was called') - - fake_gateway = glance.gateway.Gateway(db_api=self.db, - notifier=self.notifier, - policy_enforcer=self.policy) - req = unit_test_utils.get_fake_request() - ns_factory = fake_gateway.get_metadef_namespace_factory( - req.context) - ns_repo = fake_gateway.get_metadef_namespace_repo(req.context) - namespace = namespaces.Namespace() - namespace.namespace = 'FakeNamespace' - new_namespace = ns_factory.new_namespace(**namespace.to_dict()) - ns_repo.add(new_namespace) - - self.namespace_controller._cleanup_namespace(ns_repo, namespace, True) - - called_msg = 'Failed to delete namespace %(namespace)s.' \ - 'Exception: %(exception)s' - called_args = {'exception': u'Mock remove was called', - 'namespace': u'FakeNamespace'} - mock_log.error.assert_called_with((called_msg, called_args)) - mock_remove.assert_called_once_with(mock.ANY) - - def test_namespace_show_non_existing(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.show, request, 'FakeName') - - def test_namespace_show_non_visible(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.show, request, NAMESPACE2) - - def test_namespace_delete(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.namespace_controller.delete(request, NAMESPACE2) - self.assertNotificationLog("metadef_namespace.delete", - [{'namespace': NAMESPACE2}]) - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.show, request, NAMESPACE2) - - def test_namespace_delete_notification_disabled(self): - self.config(disabled_notifications=["metadef_namespace.delete"]) - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.namespace_controller.delete(request, NAMESPACE2) - self.assertNotificationsLog([]) - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.show, request, NAMESPACE2) - - def test_namespace_delete_notification_group_disabled(self): - self.config(disabled_notifications=["metadef_namespace"]) - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.namespace_controller.delete(request, NAMESPACE2) - self.assertNotificationsLog([]) - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.show, request, NAMESPACE2) - - def test_namespace_delete_notification_create_disabled(self): - self.config(disabled_notifications=["metadef_namespace.create"]) - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.namespace_controller.delete(request, NAMESPACE2) - self.assertNotificationLog("metadef_namespace.delete", - [{'namespace': NAMESPACE2}]) - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.show, request, NAMESPACE2) - - def test_namespace_delete_non_existing(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.delete, request, - 'FakeName') - self.assertNotificationsLog([]) - - def test_namespace_delete_non_visible(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.delete, request, - NAMESPACE2) - self.assertNotificationsLog([]) - - def test_namespace_delete_non_visible_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.namespace_controller.delete(request, NAMESPACE2) - self.assertNotificationLog("metadef_namespace.delete", - [{'namespace': NAMESPACE2}]) - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.show, request, NAMESPACE2) - - def test_namespace_delete_protected(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, - self.namespace_controller.delete, request, - NAMESPACE1) - self.assertNotificationsLog([]) - - def test_namespace_delete_protected_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.assertRaises(webob.exc.HTTPForbidden, - self.namespace_controller.delete, request, - NAMESPACE1) - self.assertNotificationsLog([]) - - def test_namespace_delete_with_contents(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.namespace_controller.delete(request, NAMESPACE3) - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.show, request, NAMESPACE3) - self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, - request, NAMESPACE3, OBJECT1) - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.show, request, NAMESPACE3, - OBJECT1) - - def test_namespace_delete_properties(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.namespace_controller.delete_properties(request, NAMESPACE3) - - output = self.property_controller.index(request, NAMESPACE3) - output = output.to_dict() - self.assertEqual(0, len(output['properties'])) - - self.assertNotificationLog("metadef_namespace.delete_properties", - [{'namespace': NAMESPACE3}]) - - def test_namespace_delete_properties_other_owner(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, - self.namespace_controller.delete_properties, - request, - NAMESPACE3) - self.assertNotificationsLog([]) - - def test_namespace_delete_properties_other_owner_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.namespace_controller.delete_properties(request, NAMESPACE3) - - output = self.property_controller.index(request, NAMESPACE3) - output = output.to_dict() - self.assertEqual(0, len(output['properties'])) - self.assertNotificationLog("metadef_namespace.delete_properties", - [{'namespace': NAMESPACE3}]) - - def test_namespace_non_existing_delete_properties(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.delete_properties, - request, - NAMESPACE4) - self.assertNotificationsLog([]) - - def test_namespace_delete_objects(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.namespace_controller.delete_objects(request, NAMESPACE3) - - output = self.object_controller.index(request, NAMESPACE3) - output = output.to_dict() - self.assertEqual(0, len(output['objects'])) - self.assertNotificationLog("metadef_namespace.delete_objects", - [{'namespace': NAMESPACE3}]) - - def test_namespace_delete_objects_other_owner(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, - self.namespace_controller.delete_objects, - request, - NAMESPACE3) - self.assertNotificationsLog([]) - - def test_namespace_delete_objects_other_owner_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.namespace_controller.delete_objects(request, NAMESPACE3) - - output = self.object_controller.index(request, NAMESPACE3) - output = output.to_dict() - self.assertEqual(0, len(output['objects'])) - - self.assertNotificationLog("metadef_namespace.delete_objects", - [{'namespace': NAMESPACE3}]) - - def test_namespace_non_existing_delete_objects(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.delete_objects, - request, - NAMESPACE4) - self.assertNotificationsLog([]) - - def test_namespace_delete_tags(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.namespace_controller.delete_tags(request, NAMESPACE3) - - output = self.tag_controller.index(request, NAMESPACE3) - output = output.to_dict() - self.assertEqual(0, len(output['tags'])) - self.assertNotificationLog("metadef_namespace.delete_tags", - [{'namespace': NAMESPACE3}]) - - def test_namespace_delete_tags_other_owner(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, - self.namespace_controller.delete_tags, - request, - NAMESPACE3) - self.assertNotificationsLog([]) - - def test_namespace_delete_tags_other_owner_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.namespace_controller.delete_tags(request, NAMESPACE3) - - output = self.tag_controller.index(request, NAMESPACE3) - output = output.to_dict() - self.assertEqual(0, len(output['tags'])) - self.assertNotificationLog("metadef_namespace.delete_tags", - [{'namespace': NAMESPACE3}]) - - def test_namespace_non_existing_delete_tags(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.delete_tags, - request, - NAMESPACE4) - self.assertNotificationsLog([]) - - def test_namespace_create(self): - request = unit_test_utils.get_fake_request() - - namespace = namespaces.Namespace() - namespace.namespace = NAMESPACE4 - namespace = self.namespace_controller.create(request, namespace) - self.assertEqual(NAMESPACE4, namespace.namespace) - - self.assertNotificationLog("metadef_namespace.create", - [{'namespace': NAMESPACE4}]) - namespace = self.namespace_controller.show(request, NAMESPACE4) - self.assertEqual(NAMESPACE4, namespace.namespace) - - def test_namespace_create_with_4byte_character(self): - request = unit_test_utils.get_fake_request() - - namespace = namespaces.Namespace() - namespace.namespace = u'\U0001f693' - - self.assertRaises(webob.exc.HTTPBadRequest, - self.namespace_controller.create, request, - namespace) - - def test_namespace_create_duplicate(self): - request = unit_test_utils.get_fake_request() - - namespace = namespaces.Namespace() - namespace.namespace = 'new-namespace' - new_ns = self.namespace_controller.create(request, namespace) - self.assertEqual('new-namespace', new_ns.namespace) - self.assertRaises(webob.exc.HTTPConflict, - self.namespace_controller.create, - request, namespace) - - def test_namespace_create_different_owner(self): - request = unit_test_utils.get_fake_request() - - namespace = namespaces.Namespace() - namespace.namespace = NAMESPACE4 - namespace.owner = TENANT4 - self.assertRaises(webob.exc.HTTPForbidden, - self.namespace_controller.create, request, namespace) - self.assertNotificationsLog([]) - - def test_namespace_create_different_owner_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - - namespace = namespaces.Namespace() - namespace.namespace = NAMESPACE4 - namespace.owner = TENANT4 - namespace = self.namespace_controller.create(request, namespace) - self.assertEqual(NAMESPACE4, namespace.namespace) - - self.assertNotificationLog("metadef_namespace.create", - [{'namespace': NAMESPACE4}]) - namespace = self.namespace_controller.show(request, NAMESPACE4) - self.assertEqual(NAMESPACE4, namespace.namespace) - - def test_namespace_create_with_related_resources(self): - request = unit_test_utils.get_fake_request() - - namespace = namespaces.Namespace() - namespace.namespace = NAMESPACE4 - - prop1 = properties.PropertyType() - prop1.type = 'string' - prop1.title = 'title' - prop2 = properties.PropertyType() - prop2.type = 'string' - prop2.title = 'title' - namespace.properties = {PROPERTY1: prop1, PROPERTY2: prop2} - - object1 = objects.MetadefObject() - object1.name = OBJECT1 - object1.required = [] - object1.properties = {} - object2 = objects.MetadefObject() - object2.name = OBJECT2 - object2.required = [] - object2.properties = {} - namespace.objects = [object1, object2] - - output = self.namespace_controller.create(request, namespace) - self.assertEqual(NAMESPACE4, namespace.namespace) - output = output.to_dict() - - self.assertEqual(2, len(output['properties'])) - actual = set([property for property in output['properties']]) - expected = set([PROPERTY1, PROPERTY2]) - self.assertEqual(expected, actual) - - self.assertEqual(2, len(output['objects'])) - actual = set([object.name for object in output['objects']]) - expected = set([OBJECT1, OBJECT2]) - self.assertEqual(expected, actual) - - output = self.namespace_controller.show(request, NAMESPACE4) - self.assertEqual(NAMESPACE4, namespace.namespace) - output = output.to_dict() - - self.assertEqual(2, len(output['properties'])) - actual = set([property for property in output['properties']]) - expected = set([PROPERTY1, PROPERTY2]) - self.assertEqual(expected, actual) - - self.assertEqual(2, len(output['objects'])) - actual = set([object.name for object in output['objects']]) - expected = set([OBJECT1, OBJECT2]) - self.assertEqual(expected, actual) - - self.assertNotificationsLog([ - { - 'type': 'metadef_namespace.create', - 'payload': { - 'namespace': NAMESPACE4, - 'owner': TENANT1, - } - }, - { - 'type': 'metadef_object.create', - 'payload': { - 'namespace': NAMESPACE4, - 'name': OBJECT1, - 'properties': [], - } - }, - { - 'type': 'metadef_object.create', - 'payload': { - 'namespace': NAMESPACE4, - 'name': OBJECT2, - 'properties': [], - } - }, - { - 'type': 'metadef_property.create', - 'payload': { - 'namespace': NAMESPACE4, - 'type': 'string', - 'title': 'title', - } - }, - { - 'type': 'metadef_property.create', - 'payload': { - 'namespace': NAMESPACE4, - 'type': 'string', - 'title': 'title', - } - } - ]) - - def test_namespace_create_conflict(self): - request = unit_test_utils.get_fake_request() - - namespace = namespaces.Namespace() - namespace.namespace = NAMESPACE1 - - self.assertRaises(webob.exc.HTTPConflict, - self.namespace_controller.create, request, namespace) - self.assertNotificationsLog([]) - - def test_namespace_update(self): - request = unit_test_utils.get_fake_request() - namespace = self.namespace_controller.show(request, NAMESPACE1) - - namespace.protected = False - namespace = self.namespace_controller.update(request, namespace, - NAMESPACE1) - self.assertFalse(namespace.protected) - self.assertNotificationLog("metadef_namespace.update", [ - {'namespace': NAMESPACE1, 'protected': False} - ]) - namespace = self.namespace_controller.show(request, NAMESPACE1) - self.assertFalse(namespace.protected) - - def test_namespace_update_non_existing(self): - request = unit_test_utils.get_fake_request() - - namespace = namespaces.Namespace() - namespace.namespace = NAMESPACE4 - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.update, request, namespace, - NAMESPACE4) - self.assertNotificationsLog([]) - - def test_namespace_update_non_visible(self): - request = unit_test_utils.get_fake_request() - - namespace = namespaces.Namespace() - namespace.namespace = NAMESPACE2 - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.update, request, namespace, - NAMESPACE2) - self.assertNotificationsLog([]) - - def test_namespace_update_non_visible_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - namespace = self.namespace_controller.show(request, NAMESPACE2) - - namespace.protected = False - namespace = self.namespace_controller.update(request, namespace, - NAMESPACE2) - self.assertFalse(namespace.protected) - self.assertNotificationLog("metadef_namespace.update", [ - {'namespace': NAMESPACE2, 'protected': False} - ]) - namespace = self.namespace_controller.show(request, NAMESPACE2) - self.assertFalse(namespace.protected) - - def test_namespace_update_name(self): - request = unit_test_utils.get_fake_request() - namespace = self.namespace_controller.show(request, NAMESPACE1) - - namespace.namespace = NAMESPACE4 - namespace = self.namespace_controller.update(request, namespace, - NAMESPACE1) - self.assertEqual(NAMESPACE4, namespace.namespace) - self.assertNotificationLog("metadef_namespace.update", [ - {'namespace': NAMESPACE4, 'namespace_old': NAMESPACE1} - ]) - namespace = self.namespace_controller.show(request, NAMESPACE4) - self.assertEqual(NAMESPACE4, namespace.namespace) - - self.assertRaises(webob.exc.HTTPNotFound, - self.namespace_controller.show, request, NAMESPACE1) - - def test_namespace_update_with_4byte_character(self): - request = unit_test_utils.get_fake_request() - - namespace = self.namespace_controller.show(request, NAMESPACE1) - namespace.namespace = u'\U0001f693' - - self.assertRaises(webob.exc.HTTPBadRequest, - self.namespace_controller.update, request, - namespace, NAMESPACE1) - - def test_namespace_update_name_conflict(self): - request = unit_test_utils.get_fake_request() - namespace = self.namespace_controller.show(request, NAMESPACE1) - namespace.namespace = NAMESPACE2 - self.assertRaises(webob.exc.HTTPConflict, - self.namespace_controller.update, request, namespace, - NAMESPACE1) - self.assertNotificationsLog([]) - - def test_property_index(self): - request = unit_test_utils.get_fake_request() - output = self.property_controller.index(request, NAMESPACE3) - self.assertEqual(2, len(output.properties)) - actual = set([property for property in output.properties]) - expected = set([PROPERTY1, PROPERTY2]) - self.assertEqual(expected, actual) - - def test_property_index_empty(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - output = self.property_controller.index(request, NAMESPACE2) - self.assertEqual(0, len(output.properties)) - - def test_property_index_non_existing_namespace(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.index, request, NAMESPACE4) - - def test_property_show(self): - request = unit_test_utils.get_fake_request() - output = self.property_controller.show(request, NAMESPACE3, PROPERTY1) - self.assertEqual(PROPERTY1, output.name) - - def test_property_show_specific_resource_type(self): - request = unit_test_utils.get_fake_request() - output = self.property_controller.show( - request, NAMESPACE6, ''.join([PREFIX1, PROPERTY4]), - filters={'resource_type': RESOURCE_TYPE4}) - self.assertEqual(PROPERTY4, output.name) - - def test_property_show_prefix_mismatch(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.show, request, NAMESPACE6, - PROPERTY4, filters={'resource_type': RESOURCE_TYPE4}) - - def test_property_show_non_existing_resource_type(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.show, request, NAMESPACE2, - PROPERTY1, filters={'resource_type': 'test'}) - - def test_property_show_non_existing(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.show, request, NAMESPACE2, - PROPERTY1) - - def test_property_show_non_visible(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.show, request, NAMESPACE1, - PROPERTY1) - - def test_property_show_non_visible_admin(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2, - is_admin=True) - output = self.property_controller.show(request, NAMESPACE1, PROPERTY1) - self.assertEqual(PROPERTY1, output.name) - - def test_property_delete(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.property_controller.delete(request, NAMESPACE3, PROPERTY1) - self.assertNotificationLog("metadef_property.delete", - [{'name': PROPERTY1, - 'namespace': NAMESPACE3}]) - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.show, request, NAMESPACE3, - PROPERTY1) - - def test_property_delete_disabled_notification(self): - self.config(disabled_notifications=["metadef_property.delete"]) - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.property_controller.delete(request, NAMESPACE3, PROPERTY1) - self.assertNotificationsLog([]) - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.show, request, NAMESPACE3, - PROPERTY1) - - def test_property_delete_other_owner(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, - self.property_controller.delete, request, NAMESPACE3, - PROPERTY1) - self.assertNotificationsLog([]) - - def test_property_delete_other_owner_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.property_controller.delete(request, NAMESPACE3, PROPERTY1) - self.assertNotificationLog("metadef_property.delete", - [{'name': PROPERTY1, - 'namespace': NAMESPACE3}]) - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.show, request, NAMESPACE3, - PROPERTY1) - - def test_property_delete_non_existing(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.delete, request, NAMESPACE5, - PROPERTY2) - self.assertNotificationsLog([]) - - def test_property_delete_non_existing_namespace(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.delete, request, NAMESPACE4, - PROPERTY1) - self.assertNotificationsLog([]) - - def test_property_delete_non_visible(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.delete, request, NAMESPACE1, - PROPERTY1) - self.assertNotificationsLog([]) - - def test_property_delete_admin_protected(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.assertRaises(webob.exc.HTTPForbidden, - self.property_controller.delete, request, NAMESPACE1, - PROPERTY1) - self.assertNotificationsLog([]) - - def test_property_create(self): - request = unit_test_utils.get_fake_request() - - property = properties.PropertyType() - property.name = PROPERTY2 - property.type = 'string' - property.title = 'title' - property = self.property_controller.create(request, NAMESPACE1, - property) - self.assertEqual(PROPERTY2, property.name) - self.assertEqual('string', property.type) - self.assertEqual('title', property.title) - self.assertNotificationLog("metadef_property.create", - [{'name': PROPERTY2, - 'namespace': NAMESPACE1}]) - property = self.property_controller.show(request, NAMESPACE1, - PROPERTY2) - self.assertEqual(PROPERTY2, property.name) - self.assertEqual('string', property.type) - self.assertEqual('title', property.title) - - def test_property_create_overlimit_name(self): - request = unit_test_utils.get_fake_request('/metadefs/namespaces/' - 'Namespace3/' - 'properties') - request.body = jsonutils.dump_as_bytes({'name': 'a' * 256}) - - self.assertRaises(webob.exc.HTTPBadRequest, - self.property_deserializer.create, - request) - - def test_property_create_with_4byte_character(self): - request = unit_test_utils.get_fake_request() - - property = properties.PropertyType() - property.name = u'\U0001f693' - property.type = 'string' - property.title = 'title' - - self.assertRaises(webob.exc.HTTPBadRequest, - self.property_controller.create, - request, NAMESPACE1, property) - - def test_property_create_with_operators(self): - request = unit_test_utils.get_fake_request() - - property = properties.PropertyType() - property.name = PROPERTY2 - property.type = 'string' - property.title = 'title' - property.operators = [''] - property = self.property_controller.create(request, NAMESPACE1, - property) - self.assertEqual(PROPERTY2, property.name) - self.assertEqual('string', property.type) - self.assertEqual('title', property.title) - self.assertEqual([''], property.operators) - - property = self.property_controller.show(request, NAMESPACE1, - PROPERTY2) - self.assertEqual(PROPERTY2, property.name) - self.assertEqual('string', property.type) - self.assertEqual('title', property.title) - self.assertEqual([''], property.operators) - - def test_property_create_conflict(self): - request = unit_test_utils.get_fake_request() - - property = properties.PropertyType() - property.name = PROPERTY1 - property.type = 'string' - property.title = 'title' - - self.assertRaises(webob.exc.HTTPConflict, - self.property_controller.create, request, NAMESPACE1, - property) - self.assertNotificationsLog([]) - - def test_property_create_non_visible_namespace(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - - property = properties.PropertyType() - property.name = PROPERTY1 - property.type = 'string' - property.title = 'title' - - self.assertRaises(webob.exc.HTTPForbidden, - self.property_controller.create, request, NAMESPACE1, - property) - self.assertNotificationsLog([]) - - def test_property_create_non_visible_namespace_admin(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2, - is_admin=True) - - property = properties.PropertyType() - property.name = PROPERTY2 - property.type = 'string' - property.title = 'title' - property = self.property_controller.create(request, NAMESPACE1, - property) - self.assertEqual(PROPERTY2, property.name) - self.assertEqual('string', property.type) - self.assertEqual('title', property.title) - - self.assertNotificationLog("metadef_property.create", - [{'name': PROPERTY2, - 'namespace': NAMESPACE1}]) - property = self.property_controller.show(request, NAMESPACE1, - PROPERTY2) - self.assertEqual(PROPERTY2, property.name) - self.assertEqual('string', property.type) - self.assertEqual('title', property.title) - - def test_property_create_non_existing_namespace(self): - request = unit_test_utils.get_fake_request() - - property = properties.PropertyType() - property.name = PROPERTY1 - property.type = 'string' - property.title = 'title' - - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.create, request, NAMESPACE4, - property) - self.assertNotificationsLog([]) - - def test_property_create_duplicate(self): - request = unit_test_utils.get_fake_request() - - property = properties.PropertyType() - property.name = 'new-property' - property.type = 'string' - property.title = 'title' - new_property = self.property_controller.create(request, NAMESPACE1, - property) - self.assertEqual('new-property', new_property.name) - self.assertRaises(webob.exc.HTTPConflict, - self.property_controller.create, request, - NAMESPACE1, property) - - def test_property_update(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - property = self.property_controller.show(request, NAMESPACE3, - PROPERTY1) - property.name = PROPERTY1 - property.type = 'string123' - property.title = 'title123' - property = self.property_controller.update(request, NAMESPACE3, - PROPERTY1, property) - self.assertEqual(PROPERTY1, property.name) - self.assertEqual('string123', property.type) - self.assertEqual('title123', property.title) - self.assertNotificationLog("metadef_property.update", [ - { - 'name': PROPERTY1, - 'namespace': NAMESPACE3, - 'type': 'string123', - 'title': 'title123', - } - ]) - property = self.property_controller.show(request, NAMESPACE3, - PROPERTY1) - self.assertEqual(PROPERTY1, property.name) - self.assertEqual('string123', property.type) - self.assertEqual('title123', property.title) - - def test_property_update_name(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - property = self.property_controller.show(request, NAMESPACE3, - PROPERTY1) - property.name = PROPERTY3 - property.type = 'string' - property.title = 'title' - property = self.property_controller.update(request, NAMESPACE3, - PROPERTY1, property) - self.assertEqual(PROPERTY3, property.name) - self.assertEqual('string', property.type) - self.assertEqual('title', property.title) - - self.assertNotificationLog("metadef_property.update", [ - { - 'name': PROPERTY3, - 'name_old': PROPERTY1, - 'namespace': NAMESPACE3, - 'type': 'string', - 'title': 'title', - } - ]) - property = self.property_controller.show(request, NAMESPACE3, - PROPERTY2) - self.assertEqual(PROPERTY2, property.name) - self.assertEqual('string', property.type) - self.assertEqual('title', property.title) - - def test_property_update_conflict(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - property = self.property_controller.show(request, NAMESPACE3, - PROPERTY1) - property.name = PROPERTY2 - property.type = 'string' - property.title = 'title' - self.assertRaises(webob.exc.HTTPConflict, - self.property_controller.update, request, NAMESPACE3, - PROPERTY1, property) - self.assertNotificationsLog([]) - - def test_property_update_with_4byte_character(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - property = self.property_controller.show(request, NAMESPACE3, - PROPERTY1) - property.name = u'\U0001f693' - property.type = 'string' - property.title = 'title' - - self.assertRaises(webob.exc.HTTPBadRequest, - self.property_controller.update, request, - NAMESPACE3, PROPERTY1, property) - - def test_property_update_non_existing(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - property = properties.PropertyType() - property.name = PROPERTY1 - property.type = 'string' - property.title = 'title' - - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.update, request, NAMESPACE5, - PROPERTY1, property) - self.assertNotificationsLog([]) - - def test_property_update_namespace_non_existing(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - property = properties.PropertyType() - property.name = PROPERTY1 - property.type = 'string' - property.title = 'title' - - self.assertRaises(webob.exc.HTTPNotFound, - self.property_controller.update, request, NAMESPACE4, - PROPERTY1, property) - self.assertNotificationsLog([]) - - def test_object_index(self): - request = unit_test_utils.get_fake_request() - output = self.object_controller.index(request, NAMESPACE3) - output = output.to_dict() - self.assertEqual(2, len(output['objects'])) - actual = set([object.name for object in output['objects']]) - expected = set([OBJECT1, OBJECT2]) - self.assertEqual(expected, actual) - - def test_object_index_zero_limit(self): - request = unit_test_utils.get_fake_request('/metadefs/namespaces/' - 'Namespace3/' - 'objects?limit=0') - self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, - request) - - def test_object_index_empty(self): - request = unit_test_utils.get_fake_request() - output = self.object_controller.index(request, NAMESPACE5) - output = output.to_dict() - self.assertEqual(0, len(output['objects'])) - - def test_object_index_non_existing_namespace(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.index, - request, NAMESPACE4) - - def test_object_show(self): - request = unit_test_utils.get_fake_request() - output = self.object_controller.show(request, NAMESPACE3, OBJECT1) - self.assertEqual(OBJECT1, output.name) - - def test_object_show_non_existing(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, - request, NAMESPACE5, OBJECT1) - - def test_object_show_non_visible(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, - request, NAMESPACE1, OBJECT1) - - def test_object_show_non_visible_admin(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2, - is_admin=True) - - output = self.object_controller.show(request, NAMESPACE1, OBJECT1) - self.assertEqual(OBJECT1, output.name) - - def test_object_delete(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.object_controller.delete(request, NAMESPACE3, OBJECT1) - self.assertNotificationLog("metadef_object.delete", - [{'name': OBJECT1, - 'namespace': NAMESPACE3}]) - self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, - request, NAMESPACE3, OBJECT1) - - def test_object_delete_disabled_notification(self): - self.config(disabled_notifications=["metadef_object.delete"]) - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.object_controller.delete(request, NAMESPACE3, OBJECT1) - self.assertNotificationsLog([]) - self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, - request, NAMESPACE3, OBJECT1) - - def test_object_delete_other_owner(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, - self.object_controller.delete, request, NAMESPACE3, - OBJECT1) - self.assertNotificationsLog([]) - - def test_object_delete_other_owner_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.object_controller.delete(request, NAMESPACE3, OBJECT1) - self.assertNotificationLog("metadef_object.delete", - [{'name': OBJECT1, - 'namespace': NAMESPACE3}]) - self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, - request, NAMESPACE3, OBJECT1) - - def test_object_delete_non_existing(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.object_controller.delete, request, NAMESPACE5, - OBJECT1) - self.assertNotificationsLog([]) - - def test_object_delete_non_existing_namespace(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.object_controller.delete, request, NAMESPACE4, - OBJECT1) - self.assertNotificationsLog([]) - - def test_object_delete_non_visible(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.assertRaises(webob.exc.HTTPNotFound, - self.object_controller.delete, request, NAMESPACE1, - OBJECT1) - self.assertNotificationsLog([]) - - def test_object_delete_admin_protected(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.assertRaises(webob.exc.HTTPForbidden, - self.object_controller.delete, request, NAMESPACE1, - OBJECT1) - self.assertNotificationsLog([]) - - def test_object_create(self): - request = unit_test_utils.get_fake_request() - - object = objects.MetadefObject() - object.name = OBJECT2 - object.required = [] - object.properties = {} - object = self.object_controller.create(request, object, NAMESPACE1) - self.assertEqual(OBJECT2, object.name) - self.assertEqual([], object.required) - self.assertEqual({}, object.properties) - self.assertNotificationLog("metadef_object.create", - [{'name': OBJECT2, - 'namespace': NAMESPACE1, - 'properties': []}]) - object = self.object_controller.show(request, NAMESPACE1, OBJECT2) - self.assertEqual(OBJECT2, object.name) - self.assertEqual([], object.required) - self.assertEqual({}, object.properties) - - def test_object_create_overlimit_name(self): - request = unit_test_utils.get_fake_request('/metadefs/namespaces/' - 'Namespace3/' - 'objects') - request.body = jsonutils.dump_as_bytes({'name': 'a' * 256}) - - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.create, - request) - - def test_object_create_duplicate(self): - request = unit_test_utils.get_fake_request() - - object = objects.MetadefObject() - object.name = 'New-Object' - object.required = [] - object.properties = {} - new_obj = self.object_controller.create(request, object, NAMESPACE3) - self.assertEqual('New-Object', new_obj.name) - self.assertRaises(webob.exc.HTTPConflict, - self.object_controller.create, request, object, - NAMESPACE3) - - def test_object_create_conflict(self): - request = unit_test_utils.get_fake_request() - - object = objects.MetadefObject() - object.name = OBJECT1 - object.required = [] - object.properties = {} - - self.assertRaises(webob.exc.HTTPConflict, - self.object_controller.create, request, object, - NAMESPACE1) - self.assertNotificationsLog([]) - - def test_object_create_with_4byte_character(self): - request = unit_test_utils.get_fake_request() - - object = objects.MetadefObject() - object.name = u'\U0001f693' - object.required = [] - object.properties = {} - - self.assertRaises(webob.exc.HTTPBadRequest, - self.object_controller.create, request, - object, NAMESPACE1) - - def test_object_create_non_existing_namespace(self): - request = unit_test_utils.get_fake_request() - - object = objects.MetadefObject() - object.name = PROPERTY1 - object.required = [] - object.properties = {} - - self.assertRaises(webob.exc.HTTPNotFound, - self.object_controller.create, request, object, - NAMESPACE4) - self.assertNotificationsLog([]) - - def test_object_create_non_visible_namespace(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - - object = objects.MetadefObject() - object.name = OBJECT1 - object.required = [] - object.properties = {} - - self.assertRaises(webob.exc.HTTPForbidden, - self.object_controller.create, request, object, - NAMESPACE1) - self.assertNotificationsLog([]) - - def test_object_create_non_visible_namespace_admin(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2, - is_admin=True) - - object = objects.MetadefObject() - object.name = OBJECT2 - object.required = [] - object.properties = {} - object = self.object_controller.create(request, object, NAMESPACE1) - self.assertEqual(OBJECT2, object.name) - self.assertEqual([], object.required) - self.assertEqual({}, object.properties) - self.assertNotificationLog("metadef_object.create", - [{'name': OBJECT2, - 'namespace': NAMESPACE1}]) - object = self.object_controller.show(request, NAMESPACE1, OBJECT2) - self.assertEqual(OBJECT2, object.name) - self.assertEqual([], object.required) - self.assertEqual({}, object.properties) - - def test_object_create_missing_properties(self): - request = unit_test_utils.get_fake_request() - - object = objects.MetadefObject() - object.name = OBJECT2 - object.required = [] - object = self.object_controller.create(request, object, NAMESPACE1) - self.assertEqual(OBJECT2, object.name) - self.assertEqual([], object.required) - self.assertNotificationLog("metadef_object.create", - [{'name': OBJECT2, - 'namespace': NAMESPACE1, - 'properties': []}]) - object = self.object_controller.show(request, NAMESPACE1, OBJECT2) - self.assertEqual(OBJECT2, object.name) - self.assertEqual([], object.required) - self.assertEqual({}, object.properties) - - def test_object_update(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - object = self.object_controller.show(request, NAMESPACE3, OBJECT1) - object.name = OBJECT1 - object.description = 'description' - object = self.object_controller.update(request, object, NAMESPACE3, - OBJECT1) - self.assertEqual(OBJECT1, object.name) - self.assertEqual('description', object.description) - self.assertNotificationLog("metadef_object.update", [ - { - 'name': OBJECT1, - 'namespace': NAMESPACE3, - 'description': 'description', - } - ]) - property = self.object_controller.show(request, NAMESPACE3, OBJECT1) - self.assertEqual(OBJECT1, property.name) - self.assertEqual('description', object.description) - - def test_object_update_name(self): - request = unit_test_utils.get_fake_request() - - object = self.object_controller.show(request, NAMESPACE1, OBJECT1) - object.name = OBJECT2 - object = self.object_controller.update(request, object, NAMESPACE1, - OBJECT1) - self.assertEqual(OBJECT2, object.name) - self.assertNotificationLog("metadef_object.update", [ - { - 'name': OBJECT2, - 'name_old': OBJECT1, - 'namespace': NAMESPACE1, - } - ]) - object = self.object_controller.show(request, NAMESPACE1, OBJECT2) - self.assertEqual(OBJECT2, object.name) - - def test_object_update_with_4byte_character(self): - request = unit_test_utils.get_fake_request() - - object = self.object_controller.show(request, NAMESPACE1, OBJECT1) - object.name = u'\U0001f693' - - self.assertRaises(webob.exc.HTTPBadRequest, - self.object_controller.update, request, - object, NAMESPACE1, OBJECT1) - - def test_object_update_conflict(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - object = self.object_controller.show(request, NAMESPACE3, OBJECT1) - object.name = OBJECT2 - self.assertRaises(webob.exc.HTTPConflict, - self.object_controller.update, request, object, - NAMESPACE3, OBJECT1) - self.assertNotificationsLog([]) - - def test_object_update_non_existing(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - object = objects.MetadefObject() - object.name = OBJECT1 - object.required = [] - object.properties = {} - - self.assertRaises(webob.exc.HTTPNotFound, - self.object_controller.update, request, object, - NAMESPACE5, OBJECT1) - self.assertNotificationsLog([]) - - def test_object_update_namespace_non_existing(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - object = objects.MetadefObject() - object.name = OBJECT1 - object.required = [] - object.properties = {} - - self.assertRaises(webob.exc.HTTPNotFound, - self.object_controller.update, request, object, - NAMESPACE4, OBJECT1) - self.assertNotificationsLog([]) - - def test_resource_type_index(self): - request = unit_test_utils.get_fake_request() - output = self.rt_controller.index(request) - - self.assertEqual(3, len(output.resource_types)) - actual = set([rtype.name for rtype in output.resource_types]) - expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2, RESOURCE_TYPE4]) - self.assertEqual(expected, actual) - - def test_resource_type_show(self): - request = unit_test_utils.get_fake_request() - output = self.rt_controller.show(request, NAMESPACE3) - - self.assertEqual(1, len(output.resource_type_associations)) - actual = set([rt.name for rt in output.resource_type_associations]) - expected = set([RESOURCE_TYPE1]) - self.assertEqual(expected, actual) - - def test_resource_type_show_empty(self): - request = unit_test_utils.get_fake_request() - output = self.rt_controller.show(request, NAMESPACE5) - - self.assertEqual(0, len(output.resource_type_associations)) - - def test_resource_type_show_non_visible(self): - request = unit_test_utils.get_fake_request() - - self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.show, - request, NAMESPACE2) - - def test_resource_type_show_non_visible_admin(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2, - is_admin=True) - - output = self.rt_controller.show(request, NAMESPACE2) - self.assertEqual(2, len(output.resource_type_associations)) - actual = set([rt.name for rt in output.resource_type_associations]) - expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2]) - self.assertEqual(expected, actual) - - def test_resource_type_show_non_existing_namespace(self): - request = unit_test_utils.get_fake_request() - - self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.show, - request, NAMESPACE4) - - def test_resource_type_association_delete(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.rt_controller.delete(request, NAMESPACE3, RESOURCE_TYPE1) - self.assertNotificationLog("metadef_resource_type.delete", - [{'name': RESOURCE_TYPE1, - 'namespace': NAMESPACE3}]) - output = self.rt_controller.show(request, NAMESPACE3) - self.assertEqual(0, len(output.resource_type_associations)) - - def test_resource_type_association_delete_disabled_notification(self): - self.config(disabled_notifications=["metadef_resource_type.delete"]) - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.rt_controller.delete(request, NAMESPACE3, RESOURCE_TYPE1) - self.assertNotificationsLog([]) - output = self.rt_controller.show(request, NAMESPACE3) - self.assertEqual(0, len(output.resource_type_associations)) - - def test_resource_type_association_delete_other_owner(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.rt_controller.delete, - request, NAMESPACE3, RESOURCE_TYPE1) - self.assertNotificationsLog([]) - - def test_resource_type_association_delete_other_owner_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.rt_controller.delete(request, NAMESPACE3, RESOURCE_TYPE1) - self.assertNotificationLog("metadef_resource_type.delete", - [{'name': RESOURCE_TYPE1, - 'namespace': NAMESPACE3}]) - output = self.rt_controller.show(request, NAMESPACE3) - self.assertEqual(0, len(output.resource_type_associations)) - - def test_resource_type_association_delete_non_existing(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.delete, - request, NAMESPACE1, RESOURCE_TYPE2) - self.assertNotificationsLog([]) - - def test_resource_type_association_delete_non_existing_namespace(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.delete, - request, NAMESPACE4, RESOURCE_TYPE1) - self.assertNotificationsLog([]) - - def test_resource_type_association_delete_non_visible(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.delete, - request, NAMESPACE1, RESOURCE_TYPE1) - self.assertNotificationsLog([]) - - def test_resource_type_association_delete_protected_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.assertRaises(webob.exc.HTTPForbidden, self.rt_controller.delete, - request, NAMESPACE1, RESOURCE_TYPE1) - self.assertNotificationsLog([]) - - def test_resource_type_association_create(self): - request = unit_test_utils.get_fake_request() - - rt = resource_types.ResourceTypeAssociation() - rt.name = RESOURCE_TYPE2 - rt.prefix = 'pref' - rt = self.rt_controller.create(request, rt, NAMESPACE1) - self.assertEqual(RESOURCE_TYPE2, rt.name) - self.assertEqual('pref', rt.prefix) - self.assertNotificationLog("metadef_resource_type.create", - [{'name': RESOURCE_TYPE2, - 'namespace': NAMESPACE1}]) - output = self.rt_controller.show(request, NAMESPACE1) - self.assertEqual(2, len(output.resource_type_associations)) - actual = set([x.name for x in output.resource_type_associations]) - expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2]) - self.assertEqual(expected, actual) - - def test_resource_type_association_create_conflict(self): - request = unit_test_utils.get_fake_request() - - rt = resource_types.ResourceTypeAssociation() - rt.name = RESOURCE_TYPE1 - rt.prefix = 'pref' - self.assertRaises(webob.exc.HTTPConflict, self.rt_controller.create, - request, rt, NAMESPACE1) - self.assertNotificationsLog([]) - - def test_resource_type_association_create_non_existing_namespace(self): - request = unit_test_utils.get_fake_request() - - rt = resource_types.ResourceTypeAssociation() - rt.name = RESOURCE_TYPE1 - rt.prefix = 'pref' - self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.create, - request, rt, NAMESPACE4) - self.assertNotificationsLog([]) - - def test_resource_type_association_create_non_existing_resource_type(self): - request = unit_test_utils.get_fake_request() - - rt = resource_types.ResourceTypeAssociation() - rt.name = RESOURCE_TYPE3 - rt.prefix = 'pref' - self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.create, - request, rt, NAMESPACE1) - self.assertNotificationsLog([]) - - def test_resource_type_association_create_non_visible_namespace(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - - rt = resource_types.ResourceTypeAssociation() - rt.name = RESOURCE_TYPE2 - rt.prefix = 'pref' - self.assertRaises(webob.exc.HTTPForbidden, self.rt_controller.create, - request, rt, NAMESPACE1) - self.assertNotificationsLog([]) - - def test_resource_type_association_create_non_visible_namesp_admin(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2, - is_admin=True) - - rt = resource_types.ResourceTypeAssociation() - rt.name = RESOURCE_TYPE2 - rt.prefix = 'pref' - rt = self.rt_controller.create(request, rt, NAMESPACE1) - self.assertEqual(RESOURCE_TYPE2, rt.name) - self.assertEqual('pref', rt.prefix) - self.assertNotificationLog("metadef_resource_type.create", - [{'name': RESOURCE_TYPE2, - 'namespace': NAMESPACE1}]) - output = self.rt_controller.show(request, NAMESPACE1) - self.assertEqual(2, len(output.resource_type_associations)) - actual = set([x.name for x in output.resource_type_associations]) - expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2]) - self.assertEqual(expected, actual) - - def test_tag_index(self): - request = unit_test_utils.get_fake_request() - output = self.tag_controller.index(request, NAMESPACE3) - output = output.to_dict() - self.assertEqual(2, len(output['tags'])) - actual = set([tag.name for tag in output['tags']]) - expected = set([TAG1, TAG2]) - self.assertEqual(expected, actual) - - def test_tag_index_empty(self): - request = unit_test_utils.get_fake_request() - output = self.tag_controller.index(request, NAMESPACE5) - output = output.to_dict() - self.assertEqual(0, len(output['tags'])) - - def test_tag_index_non_existing_namespace(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.index, - request, NAMESPACE4) - - def test_tag_show(self): - request = unit_test_utils.get_fake_request() - output = self.tag_controller.show(request, NAMESPACE3, TAG1) - self.assertEqual(TAG1, output.name) - - def test_tag_show_non_existing(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, - request, NAMESPACE5, TAG1) - - def test_tag_show_non_visible(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, - request, NAMESPACE1, TAG1) - - def test_tag_show_non_visible_admin(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2, - is_admin=True) - - output = self.tag_controller.show(request, NAMESPACE1, TAG1) - self.assertEqual(TAG1, output.name) - - def test_tag_delete(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.tag_controller.delete(request, NAMESPACE3, TAG1) - self.assertNotificationLog("metadef_tag.delete", - [{'name': TAG1, - 'namespace': NAMESPACE3}]) - self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, - request, NAMESPACE3, TAG1) - - def test_tag_delete_disabled_notification(self): - self.config(disabled_notifications=["metadef_tag.delete"]) - request = unit_test_utils.get_fake_request(tenant=TENANT3) - self.tag_controller.delete(request, NAMESPACE3, TAG1) - self.assertNotificationsLog([]) - self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, - request, NAMESPACE3, TAG1) - - def test_tag_delete_other_owner(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, - self.tag_controller.delete, request, NAMESPACE3, - TAG1) - self.assertNotificationsLog([]) - - def test_tag_delete_other_owner_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.tag_controller.delete(request, NAMESPACE3, TAG1) - self.assertNotificationLog("metadef_tag.delete", - [{'name': TAG1, - 'namespace': NAMESPACE3}]) - self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, - request, NAMESPACE3, TAG1) - - def test_tag_delete_non_existing(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.tag_controller.delete, request, NAMESPACE5, - TAG1) - self.assertNotificationsLog([]) - - def test_tag_delete_non_existing_namespace(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.tag_controller.delete, request, NAMESPACE4, - TAG1) - self.assertNotificationsLog([]) - - def test_tag_delete_non_visible(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.assertRaises(webob.exc.HTTPNotFound, - self.tag_controller.delete, request, NAMESPACE1, - TAG1) - self.assertNotificationsLog([]) - - def test_tag_delete_admin_protected(self): - request = unit_test_utils.get_fake_request(is_admin=True) - self.assertRaises(webob.exc.HTTPForbidden, - self.tag_controller.delete, request, NAMESPACE1, - TAG1) - self.assertNotificationsLog([]) - - def test_tag_create(self): - request = unit_test_utils.get_fake_request() - tag = self.tag_controller.create(request, NAMESPACE1, TAG2) - self.assertEqual(TAG2, tag.name) - self.assertNotificationLog("metadef_tag.create", - [{'name': TAG2, - 'namespace': NAMESPACE1}]) - - tag = self.tag_controller.show(request, NAMESPACE1, TAG2) - self.assertEqual(TAG2, tag.name) - - def test_tag_create_overlimit_name(self): - request = unit_test_utils.get_fake_request() - - self.assertRaises(webob.exc.HTTPBadRequest, - self.tag_controller.create, - request, NAMESPACE1, 'a' * 256) - - def test_tag_create_with_4byte_character(self): - request = unit_test_utils.get_fake_request() - - self.assertRaises(webob.exc.HTTPBadRequest, - self.tag_controller.create, - request, NAMESPACE1, u'\U0001f693') - - def test_tag_create_tags(self): - request = unit_test_utils.get_fake_request() - - metadef_tags = tags.MetadefTags() - metadef_tags.tags = _db_tags_fixture() - output = self.tag_controller.create_tags( - request, metadef_tags, NAMESPACE1) - output = output.to_dict() - self.assertEqual(3, len(output['tags'])) - actual = set([tag.name for tag in output['tags']]) - expected = set([TAG1, TAG2, TAG3]) - self.assertEqual(expected, actual) - self.assertNotificationLog( - "metadef_tag.create", [ - {'name': TAG1, 'namespace': NAMESPACE1}, - {'name': TAG2, 'namespace': NAMESPACE1}, - {'name': TAG3, 'namespace': NAMESPACE1}, - ] - ) - - def test_tag_create_duplicate_tags(self): - request = unit_test_utils.get_fake_request() - - metadef_tags = tags.MetadefTags() - metadef_tags.tags = _db_tags_fixture([TAG4, TAG5, TAG4]) - self.assertRaises( - webob.exc.HTTPConflict, - self.tag_controller.create_tags, - request, metadef_tags, NAMESPACE1) - self.assertNotificationsLog([]) - - def test_tag_create_duplicate_with_pre_existing_tags(self): - request = unit_test_utils.get_fake_request() - - metadef_tags = tags.MetadefTags() - metadef_tags.tags = _db_tags_fixture([TAG1, TAG2, TAG3]) - output = self.tag_controller.create_tags( - request, metadef_tags, NAMESPACE1) - output = output.to_dict() - self.assertEqual(3, len(output['tags'])) - actual = set([tag.name for tag in output['tags']]) - expected = set([TAG1, TAG2, TAG3]) - self.assertEqual(expected, actual) - self.assertNotificationLog( - "metadef_tag.create", [ - {'name': TAG1, 'namespace': NAMESPACE1}, - {'name': TAG2, 'namespace': NAMESPACE1}, - {'name': TAG3, 'namespace': NAMESPACE1}, - ] - ) - - metadef_tags = tags.MetadefTags() - metadef_tags.tags = _db_tags_fixture([TAG4, TAG5, TAG4]) - self.assertRaises( - webob.exc.HTTPConflict, - self.tag_controller.create_tags, - request, metadef_tags, NAMESPACE1) - self.assertNotificationsLog([]) - - output = self.tag_controller.index(request, NAMESPACE1) - output = output.to_dict() - self.assertEqual(3, len(output['tags'])) - actual = set([tag.name for tag in output['tags']]) - expected = set([TAG1, TAG2, TAG3]) - self.assertEqual(expected, actual) - - def test_tag_create_conflict(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPConflict, - self.tag_controller.create, request, - NAMESPACE1, TAG1) - self.assertNotificationsLog([]) - - def test_tag_create_non_existing_namespace(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPNotFound, - self.tag_controller.create, request, - NAMESPACE4, TAG1) - self.assertNotificationsLog([]) - - def test_tag_create_non_visible_namespace(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2) - self.assertRaises(webob.exc.HTTPForbidden, - self.tag_controller.create, request, - NAMESPACE1, TAG1) - self.assertNotificationsLog([]) - - def test_tag_create_non_visible_namespace_admin(self): - request = unit_test_utils.get_fake_request(tenant=TENANT2, - is_admin=True) - tag = self.tag_controller.create(request, NAMESPACE1, TAG2) - self.assertEqual(TAG2, tag.name) - self.assertNotificationLog("metadef_tag.create", - [{'name': TAG2, - 'namespace': NAMESPACE1}]) - - tag = self.tag_controller.show(request, NAMESPACE1, TAG2) - self.assertEqual(TAG2, tag.name) - - def test_tag_update(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - tag = self.tag_controller.show(request, NAMESPACE3, TAG1) - tag.name = TAG3 - tag = self.tag_controller.update(request, tag, NAMESPACE3, TAG1) - self.assertEqual(TAG3, tag.name) - self.assertNotificationLog("metadef_tag.update", [ - {'name': TAG3, 'namespace': NAMESPACE3} - ]) - - property = self.tag_controller.show(request, NAMESPACE3, TAG3) - self.assertEqual(TAG3, property.name) - - def test_tag_update_name(self): - request = unit_test_utils.get_fake_request() - - tag = self.tag_controller.show(request, NAMESPACE1, TAG1) - tag.name = TAG2 - tag = self.tag_controller.update(request, tag, NAMESPACE1, TAG1) - self.assertEqual(TAG2, tag.name) - self.assertNotificationLog("metadef_tag.update", [ - {'name': TAG2, 'name_old': TAG1, 'namespace': NAMESPACE1} - ]) - - tag = self.tag_controller.show(request, NAMESPACE1, TAG2) - self.assertEqual(TAG2, tag.name) - - def test_tag_update_with_4byte_character(self): - request = unit_test_utils.get_fake_request() - - tag = self.tag_controller.show(request, NAMESPACE1, TAG1) - tag.name = u'\U0001f693' - - self.assertRaises(webob.exc.HTTPBadRequest, - self.tag_controller.update, request, tag, - NAMESPACE1, TAG1) - - def test_tag_update_conflict(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - tag = self.tag_controller.show(request, NAMESPACE3, TAG1) - tag.name = TAG2 - self.assertRaises(webob.exc.HTTPConflict, - self.tag_controller.update, request, tag, - NAMESPACE3, TAG1) - self.assertNotificationsLog([]) - - def test_tag_update_non_existing(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - tag = tags.MetadefTag() - tag.name = TAG1 - - self.assertRaises(webob.exc.HTTPNotFound, - self.tag_controller.update, request, tag, - NAMESPACE5, TAG1) - self.assertNotificationsLog([]) - - def test_tag_update_namespace_non_existing(self): - request = unit_test_utils.get_fake_request(tenant=TENANT3) - - tag = tags.MetadefTag() - tag.name = TAG1 - - self.assertRaises(webob.exc.HTTPNotFound, - self.tag_controller.update, request, tag, - NAMESPACE4, TAG1) - self.assertNotificationsLog([]) - - -class TestMetadefNamespaceResponseSerializers(base.IsolatedUnitTest): - - def setUp(self): - super(TestMetadefNamespaceResponseSerializers, self).setUp() - self.serializer = namespaces.ResponseSerializer(schema={}) - self.response = mock.Mock() - self.result = mock.Mock() - - def test_delete_tags(self): - self.serializer.delete_tags(self.response, self.result) - self.assertEqual(204, self.response.status_int) diff --git a/glance/tests/unit/v2/test_registry_api.py b/glance/tests/unit/v2/test_registry_api.py deleted file mode 100644 index 9994671c..00000000 --- a/glance/tests/unit/v2/test_registry_api.py +++ /dev/null @@ -1,1620 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -from oslo_serialization import jsonutils -import routes -import six -from six.moves import http_client as http -import webob - -import glance.api.common -import glance.common.config -from glance.common import timeutils -import glance.context -from glance.db.sqlalchemy import api as db_api -from glance.db.sqlalchemy import models as db_models -from glance.registry.api import v2 as rserver -from glance.tests.unit import base -from glance.tests import utils as test_utils - -_gen_uuid = lambda: str(uuid.uuid4()) - -UUID1 = _gen_uuid() -UUID2 = _gen_uuid() - - -class TestRegistryRPC(base.IsolatedUnitTest): - - def setUp(self): - super(TestRegistryRPC, self).setUp() - self.mapper = routes.Mapper() - self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), - is_admin=True) - - uuid1_time = timeutils.utcnow() - uuid2_time = uuid1_time + datetime.timedelta(seconds=5) - - self.FIXTURES = [ - {'id': UUID1, - 'name': 'fake image #1', - 'status': 'active', - 'disk_format': 'ami', - 'container_format': 'ami', - 'visibility': 'shared', - 'created_at': uuid1_time, - 'updated_at': uuid1_time, - 'deleted_at': None, - 'deleted': False, - 'checksum': None, - 'min_disk': 0, - 'min_ram': 0, - 'size': 13, - 'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID1), - 'metadata': {}, 'status': 'active'}], - 'properties': {'type': 'kernel'}}, - {'id': UUID2, - 'name': 'fake image #2', - 'status': 'active', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'visibility': 'public', - 'created_at': uuid2_time, - 'updated_at': uuid2_time, - 'deleted_at': None, - 'deleted': False, - 'checksum': None, - 'min_disk': 5, - 'min_ram': 256, - 'size': 19, - 'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID2), - 'metadata': {}, 'status': 'active'}], - 'properties': {}}] - - self.context = glance.context.RequestContext(is_admin=True) - db_api.get_engine() - self.destroy_fixtures() - self.create_fixtures() - - def tearDown(self): - """Clear the test environment""" - super(TestRegistryRPC, self).tearDown() - self.destroy_fixtures() - - def create_fixtures(self): - for fixture in self.FIXTURES: - db_api.image_create(self.context, fixture) - # We write a fake image file to the filesystem - with open("%s/%s" % (self.test_dir, fixture['id']), 'wb') as image: - image.write(b"chunk00000remainder") - image.flush() - - def destroy_fixtures(self): - # Easiest to just drop the models and re-create them... - db_models.unregister_models(db_api.get_engine()) - db_models.register_models(db_api.get_engine()) - - def _compare_images_and_uuids(self, uuids, images): - self.assertListEqual(uuids, [image['id'] for image in images]) - - def test_show(self): - """Tests that registry API endpoint returns the expected image.""" - fixture = {'id': UUID2, - 'name': 'fake image #2', - 'size': 19, - 'min_ram': 256, - 'min_disk': 5, - 'checksum': None} - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get', - 'kwargs': {'image_id': UUID2}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - image = res_dict - for k, v in six.iteritems(fixture): - self.assertEqual(v, image[k]) - - def test_show_unknown(self): - """Tests the registry API endpoint returns 404 for an unknown id.""" - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get', - 'kwargs': {'image_id': _gen_uuid()}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - res_dict = jsonutils.loads(res.body)[0] - self.assertEqual('glance.common.exception.ImageNotFound', - res_dict["_error"]["cls"]) - - def test_get_index(self): - """Tests that the image_get_all command returns list of images.""" - fixture = {'id': UUID2, - 'name': 'fake image #2', - 'size': 19, - 'checksum': None} - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'filters': fixture}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - - images = jsonutils.loads(res.body)[0] - self.assertEqual(1, len(images)) - - for k, v in six.iteritems(fixture): - self.assertEqual(v, images[0][k]) - - def test_get_index_marker(self): - """Tests that the registry API returns list of public images. - - Must conforms to a marker query param. - """ - uuid5_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - uuid4_time = uuid5_time + datetime.timedelta(seconds=5) - uuid3_time = uuid4_time + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 19, - 'checksum': None, - 'created_at': uuid3_time, - 'updated_at': uuid3_time} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 20, - 'checksum': None, - 'created_at': uuid4_time, - 'updated_at': uuid4_time} - - db_api.image_create(self.context, extra_fixture) - - UUID5 = _gen_uuid() - extra_fixture = {'id': UUID5, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 20, - 'checksum': None, - 'created_at': uuid5_time, - 'updated_at': uuid5_time} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'marker': UUID4, "is_public": True}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - - images = jsonutils.loads(res.body)[0] - # should be sorted by created_at desc, id desc - # page should start after marker 4 - uuid_list = [UUID5, UUID2] - self._compare_images_and_uuids(uuid_list, images) - - def test_get_index_marker_and_name_asc(self): - """Test marker and null name ascending - - Tests that the registry API returns 200 - when a marker and a null name are combined - ascending order - """ - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': None, - 'size': 19, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'marker': UUID3, 'sort_key': ['name'], - 'sort_dir': ['asc']}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(2, len(images)) - - def test_get_index_marker_and_name_desc(self): - """Test marker and null name descending - - Tests that the registry API returns 200 - when a marker and a null name are combined - descending order - """ - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': None, - 'size': 19, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'marker': UUID3, 'sort_key': ['name'], - 'sort_dir': ['desc']}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(0, len(images)) - - def test_get_index_marker_and_disk_format_asc(self): - """Test marker and null disk format ascending - - Tests that the registry API returns 200 - when a marker and a null disk_format are combined - ascending order - """ - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': None, - 'container_format': 'ovf', - 'name': 'Fake image', - 'size': 19, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'marker': UUID3, 'sort_key': ['disk_format'], - 'sort_dir': ['asc']}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(2, len(images)) - - def test_get_index_marker_and_disk_format_desc(self): - """Test marker and null disk format descending - - Tests that the registry API returns 200 - when a marker and a null disk_format are combined - descending order - """ - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': None, - 'container_format': 'ovf', - 'name': 'Fake image', - 'size': 19, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'marker': UUID3, 'sort_key': ['disk_format'], - 'sort_dir': ['desc']}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(0, len(images)) - - def test_get_index_marker_and_container_format_asc(self): - """Test marker and null container format ascending - - Tests that the registry API returns 200 - when a marker and a null container_format are combined - ascending order - """ - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': None, - 'name': 'Fake image', - 'size': 19, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'marker': UUID3, 'sort_key': ['container_format'], - 'sort_dir': ['asc']}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(2, len(images)) - - def test_get_index_marker_and_container_format_desc(self): - """Test marker and null container format descending - - Tests that the registry API returns 200 - when a marker and a null container_format are combined - descending order - """ - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': None, - 'name': 'Fake image', - 'size': 19, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'marker': UUID3, 'sort_key': ['container_format'], - 'sort_dir': ['desc']}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(0, len(images)) - - def test_get_index_unknown_marker(self): - """Tests the registry API returns a NotFound with unknown marker.""" - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'marker': _gen_uuid()}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - result = jsonutils.loads(res.body)[0] - - self.assertIn("_error", result) - self.assertIn("NotFound", result["_error"]["cls"]) - - def test_get_index_limit(self): - """Tests that the registry API returns list of public images. - - Must conforms to a limit query param. - """ - uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - uuid4_time = uuid3_time + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 19, - 'checksum': None, - 'created_at': uuid3_time, - 'updated_at': uuid3_time} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 20, - 'checksum': None, - 'created_at': uuid4_time, - 'updated_at': uuid4_time} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'limit': 1}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - images = jsonutils.loads(res.body)[0] - self.assertEqual(http.OK, res.status_int) - - self._compare_images_and_uuids([UUID4], images) - - def test_get_index_limit_marker(self): - """Tests that the registry API returns list of public images. - - Must conforms to limit and marker query params. - """ - uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - uuid4_time = uuid3_time + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 19, - 'checksum': None, - 'created_at': uuid3_time, - 'updated_at': uuid3_time} - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = {'id': _gen_uuid(), - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 20, - 'checksum': None, - 'created_at': uuid4_time, - 'updated_at': uuid4_time} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'marker': UUID3, 'limit': 1}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - res_dict = jsonutils.loads(res.body)[0] - self.assertEqual(http.OK, res.status_int) - - images = res_dict - self._compare_images_and_uuids([UUID2], images) - - def test_get_index_filter_name(self): - """Tests that the registry API returns list of public images. - - Use a specific name. This is really a sanity check, filtering is - tested more in-depth using /images/detail - - """ - extra_fixture = {'id': _gen_uuid(), - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 19, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = {'id': _gen_uuid(), - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 20, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'filters': {'name': 'new name! #123'}}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - res_dict = jsonutils.loads(res.body)[0] - self.assertEqual(http.OK, res.status_int) - - images = res_dict - self.assertEqual(2, len(images)) - - for image in images: - self.assertEqual('new name! #123', image['name']) - - def test_get_index_filter_on_user_defined_properties(self): - """Tests that the registry API returns list of public images. - - Use a specific user-defined properties. - """ - properties = {'distro': 'ubuntu', 'arch': 'i386', 'type': 'kernel'} - extra_id = _gen_uuid() - extra_fixture = {'id': extra_id, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'image-extra-1', - 'size': 19, 'properties': properties, - 'checksum': None} - db_api.image_create(self.context, extra_fixture) - - # testing with a common property. - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'filters': {'type': 'kernel'}}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(2, len(images)) - self.assertEqual(extra_id, images[0]['id']) - self.assertEqual(UUID1, images[1]['id']) - - # testing with a non-existent value for a common property. - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'filters': {'type': 'random'}}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(0, len(images)) - - # testing with a non-existent value for a common property. - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'filters': {'type': 'random'}}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(0, len(images)) - - # testing with a non-existent property. - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'filters': {'poo': 'random'}}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(0, len(images)) - - # testing with multiple existing properties. - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'filters': {'type': 'kernel', 'distro': 'ubuntu'}}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(1, len(images)) - self.assertEqual(extra_id, images[0]['id']) - - # testing with multiple existing properties but non-existent values. - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'filters': {'type': 'random', 'distro': 'random'}}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(0, len(images)) - - # testing with multiple non-existing properties. - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'filters': {'typo': 'random', 'poo': 'random'}}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(0, len(images)) - - # testing with one existing property and the other non-existing. - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'filters': {'type': 'kernel', 'poo': 'random'}}, - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - images = jsonutils.loads(res.body)[0] - self.assertEqual(0, len(images)) - - def test_get_index_sort_default_created_at_desc(self): - """Tests that the registry API returns list of public images. - - Must conforms to a default sort key/dir. - """ - uuid5_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - uuid4_time = uuid5_time + datetime.timedelta(seconds=5) - uuid3_time = uuid4_time + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 19, - 'checksum': None, - 'created_at': uuid3_time, - 'updated_at': uuid3_time} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 20, - 'checksum': None, - 'created_at': uuid4_time, - 'updated_at': uuid4_time} - - db_api.image_create(self.context, extra_fixture) - - UUID5 = _gen_uuid() - extra_fixture = {'id': UUID5, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 20, - 'checksum': None, - 'created_at': uuid5_time, - 'updated_at': uuid5_time} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - res_dict = jsonutils.loads(res.body)[0] - self.assertEqual(http.OK, res.status_int) - - images = res_dict - # (flaper87)registry's v1 forced is_public to True - # when no value was specified. This is not - # the default behaviour anymore. - uuid_list = [UUID3, UUID4, UUID5, UUID2, UUID1] - self._compare_images_and_uuids(uuid_list, images) - - def test_get_index_sort_name_asc(self): - """Tests that the registry API returns list of public images. - - Must be sorted alphabetically by name in ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'asdf', - 'size': 19, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'xyz', - 'size': 20, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - UUID5 = _gen_uuid() - extra_fixture = {'id': UUID5, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': None, - 'size': 20, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['name'], 'sort_dir': ['asc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID5, UUID3, UUID1, UUID2, UUID4] - - self._compare_images_and_uuids(uuid_list, images) - - def test_get_index_sort_status_desc(self): - """Tests that the registry API returns list of public images. - - Must be sorted alphabetically by status in descending order. - """ - uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'queued', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'asdf', - 'size': 19, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'xyz', - 'size': 20, - 'checksum': None, - 'created_at': uuid4_time, - 'updated_at': uuid4_time} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['status'], 'sort_dir': ['asc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID1, UUID2, UUID4, UUID3] - self._compare_images_and_uuids(uuid_list, images) - - def test_get_index_sort_disk_format_asc(self): - """Tests that the registry API returns list of public images. - - Must be sorted alphabetically by disk_format in ascending order. - """ - uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'ami', - 'container_format': 'ami', - 'name': 'asdf', - 'size': 19, - 'checksum': None, - 'created_at': uuid3_time, - 'updated_at': uuid3_time} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vdi', - 'container_format': 'ovf', - 'name': 'xyz', - 'size': 20, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['disk_format'], 'sort_dir': ['asc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID1, UUID3, UUID4, UUID2] - self._compare_images_and_uuids(uuid_list, images) - - def test_get_index_sort_container_format_desc(self): - """Tests that the registry API returns list of public images. - - Must be sorted alphabetically by container_format in descending order. - """ - uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'ami', - 'container_format': 'ami', - 'name': 'asdf', - 'size': 19, - 'checksum': None, - 'created_at': uuid3_time, - 'updated_at': uuid3_time} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'iso', - 'container_format': 'bare', - 'name': 'xyz', - 'size': 20, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['container_format'], - 'sort_dir': ['desc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID2, UUID4, UUID3, UUID1] - self._compare_images_and_uuids(uuid_list, images) - - def test_get_index_sort_size_asc(self): - """Tests that the registry API returns list of public images. - - Must be sorted by size in ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'ami', - 'container_format': 'ami', - 'name': 'asdf', - 'size': 100, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'iso', - 'container_format': 'bare', - 'name': 'xyz', - 'size': 2, - 'checksum': None} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['size'], - 'sort_dir': ['asc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID4, UUID1, UUID2, UUID3] - self._compare_images_and_uuids(uuid_list, images) - - def test_get_index_sort_created_at_asc(self): - """Tests that the registry API returns list of public images. - - Must be sorted by created_at in ascending order. - """ - uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - uuid3_time = uuid4_time + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 19, - 'checksum': None, - 'created_at': uuid3_time, - 'updated_at': uuid3_time} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 20, - 'checksum': None, - 'created_at': uuid4_time, - 'updated_at': uuid4_time} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['created_at'], - 'sort_dir': ['asc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID1, UUID2, UUID4, UUID3] - self._compare_images_and_uuids(uuid_list, images) - - def test_get_index_sort_updated_at_desc(self): - """Tests that the registry API returns list of public images. - - Must be sorted by updated_at in descending order. - """ - uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - uuid3_time = uuid4_time + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 19, - 'checksum': None, - 'created_at': None, - 'updated_at': uuid3_time} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'new name! #123', - 'size': 20, - 'checksum': None, - 'created_at': None, - 'updated_at': uuid4_time} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['updated_at'], - 'sort_dir': ['desc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID3, UUID4, UUID2, UUID1] - self._compare_images_and_uuids(uuid_list, images) - - def test_get_index_sort_multiple_keys_one_sort_dir(self): - """ - Tests that the registry API returns list of - public images sorted by name-size and size-name with ascending - sort direction. - """ - uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - uuid3_time = uuid4_time + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'asdf', - 'size': 19, - 'checksum': None, - 'created_at': None, - 'updated_at': uuid3_time} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'xyz', - 'size': 20, - 'checksum': None, - 'created_at': None, - 'updated_at': uuid4_time} - - db_api.image_create(self.context, extra_fixture) - - UUID5 = _gen_uuid() - extra_fixture = {'id': UUID5, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'asdf', - 'size': 20, - 'checksum': None, - 'created_at': None, - 'updated_at': uuid4_time} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['name', 'size'], - 'sort_dir': ['asc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID3, UUID5, UUID1, UUID2, UUID4] - self._compare_images_and_uuids(uuid_list, images) - - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['size', 'name'], - 'sort_dir': ['asc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID1, UUID3, UUID2, UUID5, UUID4] - self._compare_images_and_uuids(uuid_list, images) - - def test_get_index_sort_multiple_keys_multiple_sort_dirs(self): - """ - Tests that the registry API returns list of - public images sorted by name-size and size-name - with ascending and descending directions. - """ - uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - uuid3_time = uuid4_time + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = {'id': UUID3, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'asdf', - 'size': 19, - 'checksum': None, - 'created_at': None, - 'updated_at': uuid3_time} - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = {'id': UUID4, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'xyz', - 'size': 20, - 'checksum': None, - 'created_at': None, - 'updated_at': uuid4_time} - - db_api.image_create(self.context, extra_fixture) - - UUID5 = _gen_uuid() - extra_fixture = {'id': UUID5, - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'name': 'asdf', - 'size': 20, - 'checksum': None, - 'created_at': None, - 'updated_at': uuid4_time} - - db_api.image_create(self.context, extra_fixture) - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['name', 'size'], - 'sort_dir': ['desc', 'asc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID4, UUID2, UUID1, UUID3, UUID5] - self._compare_images_and_uuids(uuid_list, images) - - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['size', 'name'], - 'sort_dir': ['desc', 'asc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID5, UUID4, UUID3, UUID2, UUID1] - self._compare_images_and_uuids(uuid_list, images) - - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['name', 'size'], - 'sort_dir': ['asc', 'desc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID5, UUID3, UUID1, UUID2, UUID4] - self._compare_images_and_uuids(uuid_list, images) - - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'sort_key': ['size', 'name'], - 'sort_dir': ['asc', 'desc']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - res_dict = jsonutils.loads(res.body)[0] - - images = res_dict - uuid_list = [UUID1, UUID2, UUID3, UUID4, UUID5] - self._compare_images_and_uuids(uuid_list, images) - - def test_create_image(self): - """Tests that the registry API creates the image""" - fixture = {'name': 'fake public image', - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf'} - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_create', - 'kwargs': {'values': fixture} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - - self.assertEqual(http.OK, res.status_int) - - res_dict = jsonutils.loads(res.body)[0] - - for k, v in six.iteritems(fixture): - self.assertEqual(v, res_dict[k]) - - # Test status was updated properly - self.assertEqual('active', res_dict['status']) - - def test_create_image_with_min_disk(self): - """Tests that the registry API creates the image""" - fixture = {'name': 'fake public image', - 'visibility': 'public', - 'status': 'active', - 'min_disk': 5, - 'disk_format': 'vhd', - 'container_format': 'ovf'} - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_create', - 'kwargs': {'values': fixture} - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - - res_dict = jsonutils.loads(res.body)[0] - - self.assertEqual(fixture['min_disk'], res_dict['min_disk']) - - def test_create_image_with_min_ram(self): - """Tests that the registry API creates the image""" - fixture = {'name': 'fake public image', - 'visibility': 'public', - 'status': 'active', - 'min_ram': 256, - 'disk_format': 'vhd', - 'container_format': 'ovf'} - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_create', - 'kwargs': {'values': fixture} - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - - res_dict = jsonutils.loads(res.body)[0] - - self.assertEqual(fixture['min_ram'], res_dict['min_ram']) - - def test_create_image_with_min_ram_default(self): - """Tests that the registry API creates the image""" - fixture = {'name': 'fake public image', - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf'} - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_create', - 'kwargs': {'values': fixture} - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - - res_dict = jsonutils.loads(res.body)[0] - - self.assertEqual(0, res_dict['min_ram']) - - def test_create_image_with_min_disk_default(self): - """Tests that the registry API creates the image""" - fixture = {'name': 'fake public image', - 'status': 'active', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf'} - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_create', - 'kwargs': {'values': fixture} - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - - res_dict = jsonutils.loads(res.body)[0] - - self.assertEqual(0, res_dict['min_disk']) - - def test_update_image(self): - """Tests that the registry API updates the image""" - fixture = {'name': 'fake public image #2', - 'min_disk': 5, - 'min_ram': 256, - 'disk_format': 'raw'} - - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_update', - 'kwargs': {'values': fixture, - 'image_id': UUID2} - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - - res_dict = jsonutils.loads(res.body)[0] - - self.assertNotEqual(res_dict['created_at'], - res_dict['updated_at']) - - for k, v in six.iteritems(fixture): - self.assertEqual(v, res_dict[k]) - - def _send_request(self, command, kwargs, method): - req = webob.Request.blank('/rpc') - req.method = method - cmd = [{'command': command, 'kwargs': kwargs}] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - res_dict = jsonutils.loads(res.body)[0] - return res.status_int, res_dict - - def _expect_fail(self, command, kwargs, error_cls, method='POST'): - # on any exception status_int is always 200, so have to check _error - # dict - code, res_dict = self._send_request(command, kwargs, method) - self.assertIn('_error', res_dict) - self.assertEqual(error_cls, res_dict['_error']['cls']) - return res_dict - - def _expect_ok(self, command, kwargs, method, expected_status=http.OK): - code, res_dict = self._send_request(command, kwargs) - self.assertEqual(expected_status, code) - return res_dict - - def test_create_image_bad_name(self): - fixture = {'name': u'A bad name \U0001fff2', 'status': 'queued'} - self._expect_fail('image_create', - {'values': fixture}, - 'glance.common.exception.Invalid') - - def test_create_image_bad_location(self): - fixture = {'status': 'queued', - 'locations': [{'url': u'file:///tmp/tests/\U0001fee2', - 'metadata': {}, - 'status': 'active'}]} - self._expect_fail('image_create', - {'values': fixture}, - 'glance.common.exception.Invalid') - - def test_create_image_bad_property(self): - fixture = {'status': 'queued', - 'properties': {'ok key': u' bad value \U0001f2aa'}} - self._expect_fail('image_create', - {'values': fixture}, - 'glance.common.exception.Invalid') - fixture = {'status': 'queued', - 'properties': {u'invalid key \U00010020': 'ok value'}} - self._expect_fail('image_create', - {'values': fixture}, - 'glance.common.exception.Invalid') - - def test_update_image_bad_tag(self): - self._expect_fail('image_tag_create', - {'value': u'\U0001fff2', 'image_id': UUID2}, - 'glance.common.exception.Invalid') - - def test_update_image_bad_name(self): - fixture = {'name': u'A bad name \U0001fff2'} - self._expect_fail('image_update', - {'values': fixture, 'image_id': UUID1}, - 'glance.common.exception.Invalid') - - def test_update_image_bad_location(self): - fixture = {'locations': - [{'url': u'file:///tmp/glance-tests/\U0001fee2', - 'metadata': {}, - 'status': 'active'}]} - self._expect_fail('image_update', - {'values': fixture, 'image_id': UUID1}, - 'glance.common.exception.Invalid') - - def test_update_bad_property(self): - fixture = {'properties': {'ok key': u' bad value \U0001f2aa'}} - self._expect_fail('image_update', - {'values': fixture, 'image_id': UUID2}, - 'glance.common.exception.Invalid') - fixture = {'properties': {u'invalid key \U00010020': 'ok value'}} - self._expect_fail('image_update', - {'values': fixture, 'image_id': UUID2}, - 'glance.common.exception.Invalid') - - def test_delete_image(self): - """Tests that the registry API deletes the image""" - - # Grab the original number of images - req = webob.Request.blank('/rpc') - req.method = "POST" - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'filters': {'deleted': False}} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - res_dict = jsonutils.loads(res.body)[0] - self.assertEqual(http.OK, res.status_int) - - orig_num_images = len(res_dict) - - # Delete image #2 - cmd = [{ - 'command': 'image_destroy', - 'kwargs': {'image_id': UUID2} - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - - # Verify one less image - cmd = [{ - 'command': 'image_get_all', - 'kwargs': {'filters': {'deleted': False}} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - res_dict = jsonutils.loads(res.body)[0] - self.assertEqual(http.OK, res.status_int) - - new_num_images = len(res_dict) - self.assertEqual(new_num_images, orig_num_images - 1) - - def test_delete_image_response(self): - """Tests that the registry API delete returns the image metadata""" - - image = self.FIXTURES[0] - req = webob.Request.blank('/rpc') - req.method = 'POST' - cmd = [{ - 'command': 'image_destroy', - 'kwargs': {'image_id': image['id']} - }] - req.body = jsonutils.dump_as_bytes(cmd) - res = req.get_response(self.api) - - self.assertEqual(http.OK, res.status_int) - deleted_image = jsonutils.loads(res.body)[0] - - self.assertEqual(image['id'], deleted_image['id']) - self.assertTrue(deleted_image['deleted']) - self.assertTrue(deleted_image['deleted_at']) - - def test_get_image_members(self): - """Tests members listing for existing images.""" - req = webob.Request.blank('/rpc') - req.method = 'POST' - cmd = [{ - 'command': 'image_member_find', - 'kwargs': {'image_id': UUID2} - }] - req.body = jsonutils.dump_as_bytes(cmd) - - res = req.get_response(self.api) - self.assertEqual(http.OK, res.status_int) - - memb_list = jsonutils.loads(res.body)[0] - self.assertEqual(0, len(memb_list)) diff --git a/glance/tests/unit/v2/test_registry_client.py b/glance/tests/unit/v2/test_registry_client.py deleted file mode 100644 index 6d5b11bc..00000000 --- a/glance/tests/unit/v2/test_registry_client.py +++ /dev/null @@ -1,779 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for Glance Registry's client. - -This tests are temporary and will be removed once -the registry's driver tests will be added. -""" - -import copy -import datetime -import os -import uuid - -from mock import patch -from six.moves import reload_module - -from glance.common import config -from glance.common import exception -from glance.common import timeutils -from glance import context -from glance.db.sqlalchemy import api as db_api -from glance.i18n import _ -from glance.registry.api import v2 as rserver -import glance.registry.client.v2.api as rapi -from glance.registry.client.v2.api import client as rclient -from glance.tests.unit import base -from glance.tests import utils as test_utils - -_gen_uuid = lambda: str(uuid.uuid4()) - -UUID1 = str(uuid.uuid4()) -UUID2 = str(uuid.uuid4()) - -# NOTE(bcwaldon): needed to init config_dir cli opt -config.parse_args(args=[]) - - -class TestRegistryV2Client(base.IsolatedUnitTest, - test_utils.RegistryAPIMixIn): - """Test proper actions made against a registry service. - - Test for both valid and invalid requests. - """ - - # Registry server to user - # in the stub. - registry = rserver - - def setUp(self): - """Establish a clean test environment""" - super(TestRegistryV2Client, self).setUp() - db_api.get_engine() - self.context = context.RequestContext(is_admin=True) - uuid1_time = timeutils.utcnow() - uuid2_time = uuid1_time + datetime.timedelta(seconds=5) - self.FIXTURES = [ - self.get_extra_fixture( - id=UUID1, name='fake image #1', visibility='shared', - disk_format='ami', container_format='ami', size=13, - virtual_size=26, properties={'type': 'kernel'}, - location="swift://user:passwd@acct/container/obj.tar.0", - created_at=uuid1_time), - self.get_extra_fixture(id=UUID2, name='fake image #2', - properties={}, size=19, virtual_size=38, - location="file:///tmp/glance-tests/2", - created_at=uuid2_time)] - self.destroy_fixtures() - self.create_fixtures() - self.client = rclient.RegistryClient("0.0.0.0") - - def tearDown(self): - """Clear the test environment""" - super(TestRegistryV2Client, self).tearDown() - self.destroy_fixtures() - - def test_image_get_index(self): - """Test correct set of public image returned""" - images = self.client.image_get_all() - self.assertEqual(2, len(images)) - - def test_create_image_with_null_min_disk_min_ram(self): - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', min_disk=None, - min_ram=None) - db_api.image_create(self.context, extra_fixture) - image = self.client.image_get(image_id=UUID3) - self.assertEqual(0, image["min_ram"]) - self.assertEqual(0, image["min_disk"]) - - def test_get_index_sort_name_asc(self): - """Tests that the registry API returns list of public images. - - Must be sorted alphabetically by name in ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf') - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='xyz') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(sort_key=['name'], - sort_dir=['asc']) - - self.assertEqualImages(images, (UUID3, UUID1, UUID2, UUID4), - unjsonify=False) - - def test_get_index_sort_status_desc(self): - """Tests that the registry API returns list of public images. - - Must be sorted alphabetically by status in descending order. - """ - uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', - status='queued') - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='xyz', - created_at=uuid4_time) - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(sort_key=['status'], - sort_dir=['desc']) - - self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1), - unjsonify=False) - - def test_get_index_sort_disk_format_asc(self): - """Tests that the registry API returns list of public images. - - Must besorted alphabetically by disk_format in ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', - disk_format='ami', - container_format='ami') - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='xyz', - disk_format='vdi') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(sort_key=['disk_format'], - sort_dir=['asc']) - - self.assertEqualImages(images, (UUID1, UUID3, UUID4, UUID2), - unjsonify=False) - - def test_get_index_sort_container_format_desc(self): - """Tests that the registry API returns list of public images. - - Must be sorted alphabetically by container_format in descending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', - disk_format='ami', - container_format='ami') - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='xyz', - disk_format='iso', - container_format='bare') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(sort_key=['container_format'], - sort_dir=['desc']) - - self.assertEqualImages(images, (UUID2, UUID4, UUID3, UUID1), - unjsonify=False) - - def test_get_index_sort_size_asc(self): - """Tests that the registry API returns list of public images. - - Must be sorted by size in ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', - disk_format='ami', - container_format='ami', - size=100, virtual_size=200) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='asdf', - disk_format='iso', - container_format='bare', - size=2, virtual_size=4) - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(sort_key=['size'], sort_dir=['asc']) - - self.assertEqualImages(images, (UUID4, UUID1, UUID2, UUID3), - unjsonify=False) - - def test_get_index_sort_created_at_asc(self): - """Tests that the registry API returns list of public images. - - Must be sorted by created_at in ascending order. - """ - uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - uuid3_time = uuid4_time + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, created_at=uuid3_time) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, created_at=uuid4_time) - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(sort_key=['created_at'], - sort_dir=['asc']) - - self.assertEqualImages(images, (UUID1, UUID2, UUID4, UUID3), - unjsonify=False) - - def test_get_index_sort_updated_at_desc(self): - """Tests that the registry API returns list of public images. - - Must be sorted by updated_at in descending order. - """ - uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - uuid3_time = uuid4_time + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, created_at=None, - updated_at=uuid3_time) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, created_at=None, - updated_at=uuid4_time) - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(sort_key=['updated_at'], - sort_dir=['desc']) - - self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1), - unjsonify=False) - - def test_get_image_details_sort_multiple_keys(self): - """ - Tests that a detailed call returns list of - public images sorted by name-size and - size-name in ascending order. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', - size=19) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name=u'xyz', - size=20) - - db_api.image_create(self.context, extra_fixture) - - UUID5 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID5, name=u'asdf', - size=20) - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(sort_key=['name', 'size'], - sort_dir=['asc']) - - self.assertEqualImages(images, (UUID3, UUID5, UUID1, UUID2, UUID4), - unjsonify=False) - - images = self.client.image_get_all(sort_key=['size', 'name'], - sort_dir=['asc']) - - self.assertEqualImages(images, (UUID1, UUID3, UUID2, UUID5, UUID4), - unjsonify=False) - - def test_get_image_details_sort_multiple_dirs(self): - """ - Tests that a detailed call returns list of - public images sorted by name-size and - size-name in ascending and descending orders. - """ - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='asdf', - size=19) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='xyz', - size=20) - - db_api.image_create(self.context, extra_fixture) - - UUID5 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID5, name='asdf', - size=20) - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(sort_key=['name', 'size'], - sort_dir=['asc', 'desc']) - - self.assertEqualImages(images, (UUID5, UUID3, UUID1, UUID2, UUID4), - unjsonify=False) - - images = self.client.image_get_all(sort_key=['name', 'size'], - sort_dir=['desc', 'asc']) - - self.assertEqualImages(images, (UUID4, UUID2, UUID1, UUID3, UUID5), - unjsonify=False) - - images = self.client.image_get_all(sort_key=['size', 'name'], - sort_dir=['asc', 'desc']) - - self.assertEqualImages(images, (UUID1, UUID2, UUID3, UUID4, UUID5), - unjsonify=False) - - images = self.client.image_get_all(sort_key=['size', 'name'], - sort_dir=['desc', 'asc']) - - self.assertEqualImages(images, (UUID5, UUID4, UUID3, UUID2, UUID1), - unjsonify=False) - - def test_image_get_index_marker(self): - """Test correct set of images returned with marker param.""" - uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - uuid3_time = uuid4_time + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='new name! #123', - status='saving', - created_at=uuid3_time) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='new name! #125', - status='saving', - created_at=uuid4_time) - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(marker=UUID3) - - self.assertEqualImages(images, (UUID4, UUID2, UUID1), unjsonify=False) - - def test_image_get_index_limit(self): - """Test correct number of images returned with limit param.""" - extra_fixture = self.get_fixture(id=_gen_uuid(), - name='new name! #123', - status='saving') - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), - name='new name! #125', - status='saving') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(limit=2) - self.assertEqual(2, len(images)) - - def test_image_get_index_marker_limit(self): - """Test correct set of images returned with marker/limit params.""" - uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) - uuid3_time = uuid4_time + datetime.timedelta(seconds=5) - - UUID3 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID3, name='new name! #123', - status='saving', - created_at=uuid3_time) - - db_api.image_create(self.context, extra_fixture) - - UUID4 = _gen_uuid() - extra_fixture = self.get_fixture(id=UUID4, name='new name! #125', - status='saving', - created_at=uuid4_time) - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(marker=UUID4, limit=1) - - self.assertEqualImages(images, (UUID2,), unjsonify=False) - - def test_image_get_index_limit_None(self): - """Test correct set of images returned with limit param == None.""" - extra_fixture = self.get_fixture(id=_gen_uuid(), - name='new name! #123', - status='saving') - - db_api.image_create(self.context, extra_fixture) - - extra_fixture = self.get_fixture(id=_gen_uuid(), - name='new name! #125', - status='saving') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(limit=None) - self.assertEqual(4, len(images)) - - def test_image_get_index_by_name(self): - """Test correct set of public, name-filtered image returned. - - This is just a sanity check, we test the details call more in-depth. - """ - extra_fixture = self.get_fixture(id=_gen_uuid(), - name='new name! #123') - - db_api.image_create(self.context, extra_fixture) - - images = self.client.image_get_all(filters={'name': 'new name! #123'}) - self.assertEqual(1, len(images)) - - for image in images: - self.assertEqual('new name! #123', image['name']) - - def test_image_get_is_public_v2(self): - """Tests that a detailed call can be filtered by a property""" - extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving', - properties={'is_public': 'avalue'}) - - context = copy.copy(self.context) - db_api.image_create(context, extra_fixture) - - filters = {'is_public': 'avalue'} - images = self.client.image_get_all(filters=filters) - self.assertEqual(1, len(images)) - - for image in images: - self.assertEqual('avalue', image['properties'][0]['value']) - - def test_image_get(self): - """Tests that the detailed info about an image returned""" - fixture = self.get_fixture(id=UUID1, name='fake image #1', - visibility='shared', - size=13, virtual_size=26, - disk_format='ami', container_format='ami') - - data = self.client.image_get(image_id=UUID1) - - for k, v in fixture.items(): - el = data[k] - self.assertEqual(v, data[k], - "Failed v != data[k] where v = %(v)s and " - "k = %(k)s and data[k] = %(el)s" % - dict(v=v, k=k, el=el)) - - def test_image_get_non_existing(self): - """Tests that NotFound is raised when getting a non-existing image""" - self.assertRaises(exception.NotFound, - self.client.image_get, - image_id=_gen_uuid()) - - def test_image_create_basic(self): - """Tests that we can add image metadata and returns the new id""" - fixture = self.get_fixture() - - new_image = self.client.image_create(values=fixture) - - # Test all other attributes set - data = self.client.image_get(image_id=new_image['id']) - - for k, v in fixture.items(): - self.assertEqual(v, data[k]) - - # Test status was updated properly - self.assertIn('status', data) - self.assertEqual('active', data['status']) - - def test_image_create_with_properties(self): - """Tests that we can add image metadata with properties""" - fixture = self.get_fixture(location="file:///tmp/glance-tests/2", - properties={'distro': 'Ubuntu 10.04 LTS'}) - - new_image = self.client.image_create(values=fixture) - - self.assertIn('properties', new_image) - self.assertEqual(new_image['properties'][0]['value'], - fixture['properties']['distro']) - - del fixture['location'] - del fixture['properties'] - - for k, v in fixture.items(): - self.assertEqual(v, new_image[k]) - - # Test status was updated properly - self.assertIn('status', new_image.keys()) - self.assertEqual('active', new_image['status']) - - def test_image_create_already_exists(self): - """Tests proper exception is raised if image with ID already exists""" - fixture = self.get_fixture(id=UUID2, - location="file:///tmp/glance-tests/2") - - self.assertRaises(exception.Duplicate, - self.client.image_create, - values=fixture) - - def test_image_create_with_bad_status(self): - """Tests proper exception is raised if a bad status is set""" - fixture = self.get_fixture(status='bad status', - location="file:///tmp/glance-tests/2") - - self.assertRaises(exception.Invalid, - self.client.image_create, - values=fixture) - - def test_image_update(self): - """Tests that the registry API updates the image""" - fixture = {'name': 'fake public image #2', - 'disk_format': 'vmdk', - 'status': 'saving'} - - self.assertTrue(self.client.image_update(image_id=UUID2, - values=fixture)) - - # Test all other attributes set - data = self.client.image_get(image_id=UUID2) - - for k, v in fixture.items(): - self.assertEqual(v, data[k]) - - def test_image_update_conflict(self): - """Tests that the registry API updates the image""" - next_state = 'saving' - fixture = {'name': 'fake public image #2', - 'disk_format': 'vmdk', - 'status': next_state} - - image = self.client.image_get(image_id=UUID2) - current = image['status'] - self.assertEqual('active', current) - - # image is in 'active' state so this should cause a failure. - from_state = 'saving' - - self.assertRaises(exception.Conflict, self.client.image_update, - image_id=UUID2, values=fixture, - from_state=from_state) - - try: - self.client.image_update(image_id=UUID2, values=fixture, - from_state=from_state) - except exception.Conflict as exc: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': next_state, - 'from': from_state}) - self.assertEqual(str(exc), msg) - - def test_image_update_with_invalid_min_disk(self): - """Tests that the registry API updates the image""" - next_state = 'saving' - fixture = {'name': 'fake image', - 'disk_format': 'vmdk', - 'min_disk': 2 ** 31 + 1, - 'status': next_state} - - image = self.client.image_get(image_id=UUID2) - current = image['status'] - self.assertEqual('active', current) - - # image is in 'active' state so this should cause a failure. - from_state = 'saving' - - self.assertRaises(exception.Invalid, self.client.image_update, - image_id=UUID2, values=fixture, - from_state=from_state) - - def test_image_update_with_invalid_min_ram(self): - """Tests that the registry API updates the image""" - next_state = 'saving' - fixture = {'name': 'fake image', - 'disk_format': 'vmdk', - 'min_ram': 2 ** 31 + 1, - 'status': next_state} - - image = self.client.image_get(image_id=UUID2) - current = image['status'] - self.assertEqual('active', current) - - # image is in 'active' state so this should cause a failure. - from_state = 'saving' - - self.assertRaises(exception.Invalid, self.client.image_update, - image_id=UUID2, values=fixture, - from_state=from_state) - - def _test_image_update_not_existing(self): - """Tests non existing image update doesn't work""" - fixture = self.get_fixture(status='bad status') - - self.assertRaises(exception.NotFound, - self.client.image_update, - image_id=_gen_uuid(), - values=fixture) - - def test_image_destroy(self): - """Tests that image metadata is deleted properly""" - # Grab the original number of images - orig_num_images = len(self.client.image_get_all()) - - # Delete image #2 - image = self.FIXTURES[1] - deleted_image = self.client.image_destroy(image_id=image['id']) - self.assertTrue(deleted_image) - self.assertEqual(image['id'], deleted_image['id']) - self.assertTrue(deleted_image['deleted']) - self.assertTrue(deleted_image['deleted_at']) - - # Verify one less image - filters = {'deleted': False} - new_num_images = len(self.client.image_get_all(filters=filters)) - - self.assertEqual(new_num_images, orig_num_images - 1) - - def test_image_destroy_not_existing(self): - """Tests cannot delete non-existing image""" - self.assertRaises(exception.NotFound, - self.client.image_destroy, - image_id=_gen_uuid()) - - def test_image_get_members(self): - """Tests getting image members""" - memb_list = self.client.image_member_find(image_id=UUID2) - num_members = len(memb_list) - self.assertEqual(0, num_members) - - def test_image_get_members_not_existing(self): - """Tests getting non-existent image members""" - self.assertRaises(exception.NotFound, - self.client.image_get_members, - image_id=_gen_uuid()) - - def test_image_member_find(self): - """Tests getting member images""" - memb_list = self.client.image_member_find(member='pattieblack') - num_members = len(memb_list) - self.assertEqual(0, num_members) - - def test_image_member_find_include_deleted(self): - """Tests getting image members including the deleted member""" - values = dict(image_id=UUID2, member='pattieblack') - # create a member - member = self.client.image_member_create(values=values) - memb_list = self.client.image_member_find(member='pattieblack') - memb_list2 = self.client.image_member_find(member='pattieblack', - include_deleted=True) - self.assertEqual(1, len(memb_list)) - self.assertEqual(1, len(memb_list2)) - # delete the member - self.client.image_member_delete(memb_id=member['id']) - memb_list = self.client.image_member_find(member='pattieblack') - memb_list2 = self.client.image_member_find(member='pattieblack', - include_deleted=True) - self.assertEqual(0, len(memb_list)) - self.assertEqual(1, len(memb_list2)) - # create it again - member = self.client.image_member_create(values=values) - memb_list = self.client.image_member_find(member='pattieblack') - memb_list2 = self.client.image_member_find(member='pattieblack', - include_deleted=True) - self.assertEqual(1, len(memb_list)) - self.assertEqual(2, len(memb_list2)) - - def test_add_update_members(self): - """Tests updating image members""" - values = dict(image_id=UUID2, member='pattieblack') - member = self.client.image_member_create(values=values) - self.assertTrue(member) - - values['member'] = 'pattieblack2' - self.assertTrue(self.client.image_member_update(memb_id=member['id'], - values=values)) - - def test_add_delete_member(self): - """Tests deleting image members""" - values = dict(image_id=UUID2, member='pattieblack') - member = self.client.image_member_create(values=values) - - self.client.image_member_delete(memb_id=member['id']) - memb_list = self.client.image_member_find(member='pattieblack') - self.assertEqual(0, len(memb_list)) - - -class TestRegistryV2ClientApi(base.IsolatedUnitTest): - """Test proper actions made against a registry service. - - Test for both valid and invalid requests. - """ - - def setUp(self): - """Establish a clean test environment""" - super(TestRegistryV2ClientApi, self).setUp() - reload_module(rapi) - - def test_configure_registry_client_not_using_use_user_token(self): - self.config(use_user_token=False) - with patch.object(rapi, - 'configure_registry_admin_creds') as mock_rapi: - rapi.configure_registry_client() - mock_rapi.assert_called_once_with() - - def _get_fake_config_creds(self, auth_url='auth_url', strategy='keystone'): - return { - 'user': 'user', - 'password': 'password', - 'username': 'user', - 'tenant': 'tenant', - 'auth_url': auth_url, - 'strategy': strategy, - 'region': 'region' - } - - def test_configure_registry_admin_creds(self): - expected = self._get_fake_config_creds(auth_url=None, - strategy='configured_strategy') - self.config(admin_user=expected['user']) - self.config(admin_password=expected['password']) - self.config(admin_tenant_name=expected['tenant']) - self.config(auth_strategy=expected['strategy']) - self.config(auth_region=expected['region']) - self.stubs.Set(os, 'getenv', lambda x: None) - - self.assertIsNone(rapi._CLIENT_CREDS) - rapi.configure_registry_admin_creds() - self.assertEqual(expected, rapi._CLIENT_CREDS) - - def test_configure_registry_admin_creds_with_auth_url(self): - expected = self._get_fake_config_creds() - self.config(admin_user=expected['user']) - self.config(admin_password=expected['password']) - self.config(admin_tenant_name=expected['tenant']) - self.config(auth_url=expected['auth_url']) - self.config(auth_strategy='test_strategy') - self.config(auth_region=expected['region']) - - self.assertIsNone(rapi._CLIENT_CREDS) - rapi.configure_registry_admin_creds() - self.assertEqual(expected, rapi._CLIENT_CREDS) diff --git a/glance/tests/unit/v2/test_schemas_resource.py b/glance/tests/unit/v2/test_schemas_resource.py deleted file mode 100644 index 36046f99..00000000 --- a/glance/tests/unit/v2/test_schemas_resource.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import glance.api.v2.schemas -import glance.tests.unit.utils as unit_test_utils -import glance.tests.utils as test_utils - - -class TestSchemasController(test_utils.BaseTestCase): - - def setUp(self): - super(TestSchemasController, self).setUp() - self.controller = glance.api.v2.schemas.Controller() - - def test_image(self): - req = unit_test_utils.get_fake_request() - output = self.controller.image(req) - self.assertEqual('image', output['name']) - expected = set(['status', 'name', 'tags', 'checksum', 'created_at', - 'disk_format', 'updated_at', 'visibility', 'self', - 'file', 'container_format', 'schema', 'id', 'size', - 'direct_url', 'min_ram', 'min_disk', 'protected', - 'locations', 'owner', 'virtual_size']) - self.assertEqual(expected, set(output['properties'].keys())) - - def test_images(self): - req = unit_test_utils.get_fake_request() - output = self.controller.images(req) - self.assertEqual('images', output['name']) - expected = set(['images', 'schema', 'first', 'next']) - self.assertEqual(expected, set(output['properties'].keys())) - expected = set(['{schema}', '{first}', '{next}']) - actual = set([link['href'] for link in output['links']]) - self.assertEqual(expected, actual) - - def test_member(self): - req = unit_test_utils.get_fake_request() - output = self.controller.member(req) - self.assertEqual('member', output['name']) - expected = set(['status', 'created_at', 'updated_at', 'image_id', - 'member_id', 'schema']) - self.assertEqual(expected, set(output['properties'].keys())) - - def test_members(self): - req = unit_test_utils.get_fake_request() - output = self.controller.members(req) - self.assertEqual('members', output['name']) - expected = set(['schema', 'members']) - self.assertEqual(expected, set(output['properties'].keys())) diff --git a/glance/tests/unit/v2/test_tasks_resource.py b/glance/tests/unit/v2/test_tasks_resource.py deleted file mode 100644 index eb267fe1..00000000 --- a/glance/tests/unit/v2/test_tasks_resource.py +++ /dev/null @@ -1,848 +0,0 @@ -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -import mock -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves import http_client as http -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range -import webob - -import glance.api.v2.tasks -from glance.common import timeutils -import glance.domain -import glance.gateway -from glance.tests.unit import base -import glance.tests.unit.utils as unit_test_utils -import glance.tests.utils as test_utils - -UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' -UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' -UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' -UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' - -TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' -TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' -TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' -TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' - -DATETIME = datetime.datetime(2013, 9, 28, 15, 27, 36, 325355) -ISOTIME = '2013-09-28T15:27:36Z' - - -def _db_fixture(task_id, **kwargs): - default_datetime = timeutils.utcnow() - obj = { - 'id': task_id, - 'status': 'pending', - 'type': 'import', - 'input': {}, - 'result': None, - 'owner': None, - 'message': None, - 'expires_at': default_datetime + datetime.timedelta(days=365), - 'created_at': default_datetime, - 'updated_at': default_datetime, - 'deleted_at': None, - 'deleted': False - } - obj.update(kwargs) - return obj - - -def _domain_fixture(task_id, **kwargs): - default_datetime = timeutils.utcnow() - task_properties = { - 'task_id': task_id, - 'status': kwargs.get('status', 'pending'), - 'task_type': kwargs.get('type', 'import'), - 'owner': kwargs.get('owner'), - 'expires_at': kwargs.get('expires_at'), - 'created_at': kwargs.get('created_at', default_datetime), - 'updated_at': kwargs.get('updated_at', default_datetime), - 'task_input': kwargs.get('task_input', {}), - 'message': kwargs.get('message'), - 'result': kwargs.get('result') - } - task = glance.domain.Task(**task_properties) - return task - -CONF = cfg.CONF -CONF.import_opt('task_time_to_live', 'glance.common.config', group='task') - - -class TestTasksController(test_utils.BaseTestCase): - - def setUp(self): - super(TestTasksController, self).setUp() - self.db = unit_test_utils.FakeDB(initialize=False) - self.policy = unit_test_utils.FakePolicyEnforcer() - self.notifier = unit_test_utils.FakeNotifier() - self.store = unit_test_utils.FakeStoreAPI() - self._create_tasks() - self.controller = glance.api.v2.tasks.TasksController(self.db, - self.policy, - self.notifier, - self.store) - self.gateway = glance.gateway.Gateway(self.db, self.store, - self.notifier, self.policy) - - def _create_tasks(self): - now = timeutils.utcnow() - times = [now + datetime.timedelta(seconds=5 * i) for i in range(4)] - self.tasks = [ - _db_fixture(UUID1, owner=TENANT1, - created_at=times[0], updated_at=times[0]), - # FIXME(venkatesh): change the type to include clone and export - # once they are included as a valid types under Task domain model. - _db_fixture(UUID2, owner=TENANT2, type='import', - created_at=times[1], updated_at=times[1]), - _db_fixture(UUID3, owner=TENANT3, type='import', - created_at=times[2], updated_at=times[2]), - _db_fixture(UUID4, owner=TENANT4, type='import', - created_at=times[3], updated_at=times[3])] - [self.db.task_create(None, task) for task in self.tasks] - - def test_index(self): - self.config(limit_param_default=1, api_limit_max=3) - request = unit_test_utils.get_fake_request() - output = self.controller.index(request) - self.assertEqual(1, len(output['tasks'])) - actual = set([task.task_id for task in output['tasks']]) - expected = set([UUID1]) - self.assertEqual(expected, actual) - - def test_index_admin(self): - request = unit_test_utils.get_fake_request(is_admin=True) - output = self.controller.index(request) - self.assertEqual(4, len(output['tasks'])) - - def test_index_return_parameters(self): - self.config(limit_param_default=1, api_limit_max=4) - request = unit_test_utils.get_fake_request(is_admin=True) - output = self.controller.index(request, marker=UUID3, limit=1, - sort_key='created_at', sort_dir='desc') - self.assertEqual(1, len(output['tasks'])) - actual = set([task.task_id for task in output['tasks']]) - expected = set([UUID2]) - self.assertEqual(expected, actual) - self.assertEqual(UUID2, output['next_marker']) - - def test_index_next_marker(self): - self.config(limit_param_default=1, api_limit_max=3) - request = unit_test_utils.get_fake_request(is_admin=True) - output = self.controller.index(request, marker=UUID3, limit=2) - self.assertEqual(2, len(output['tasks'])) - actual = set([task.task_id for task in output['tasks']]) - expected = set([UUID2, UUID1]) - self.assertEqual(expected, actual) - self.assertEqual(UUID1, output['next_marker']) - - def test_index_no_next_marker(self): - self.config(limit_param_default=1, api_limit_max=3) - request = unit_test_utils.get_fake_request(is_admin=True) - output = self.controller.index(request, marker=UUID1, limit=2) - self.assertEqual(0, len(output['tasks'])) - actual = set([task.task_id for task in output['tasks']]) - expected = set([]) - self.assertEqual(expected, actual) - self.assertNotIn('next_marker', output) - - def test_index_with_id_filter(self): - request = unit_test_utils.get_fake_request('/tasks?id=%s' % UUID1) - output = self.controller.index(request, filters={'id': UUID1}) - self.assertEqual(1, len(output['tasks'])) - actual = set([task.task_id for task in output['tasks']]) - expected = set([UUID1]) - self.assertEqual(expected, actual) - - def test_index_with_filters_return_many(self): - path = '/tasks?status=pending' - request = unit_test_utils.get_fake_request(path, is_admin=True) - output = self.controller.index(request, filters={'status': 'pending'}) - self.assertEqual(4, len(output['tasks'])) - actual = set([task.task_id for task in output['tasks']]) - expected = set([UUID1, UUID2, UUID3, UUID4]) - self.assertEqual(sorted(expected), sorted(actual)) - - def test_index_with_many_filters(self): - url = '/tasks?status=pending&type=import' - request = unit_test_utils.get_fake_request(url, is_admin=True) - output = self.controller.index(request, - filters={ - 'status': 'pending', - 'type': 'import', - 'owner': TENANT1, - }) - self.assertEqual(1, len(output['tasks'])) - actual = set([task.task_id for task in output['tasks']]) - expected = set([UUID1]) - self.assertEqual(expected, actual) - - def test_index_with_marker(self): - self.config(limit_param_default=1, api_limit_max=3) - path = '/tasks' - request = unit_test_utils.get_fake_request(path, is_admin=True) - output = self.controller.index(request, marker=UUID3) - actual = set([task.task_id for task in output['tasks']]) - self.assertEqual(1, len(actual)) - self.assertIn(UUID2, actual) - - def test_index_with_limit(self): - path = '/tasks' - limit = 2 - request = unit_test_utils.get_fake_request(path, is_admin=True) - output = self.controller.index(request, limit=limit) - actual = set([task.task_id for task in output['tasks']]) - self.assertEqual(limit, len(actual)) - - def test_index_greater_than_limit_max(self): - self.config(limit_param_default=1, api_limit_max=3) - path = '/tasks' - request = unit_test_utils.get_fake_request(path, is_admin=True) - output = self.controller.index(request, limit=4) - actual = set([task.task_id for task in output['tasks']]) - self.assertEqual(3, len(actual)) - self.assertNotIn(output['next_marker'], output) - - def test_index_default_limit(self): - self.config(limit_param_default=1, api_limit_max=3) - path = '/tasks' - request = unit_test_utils.get_fake_request(path) - output = self.controller.index(request) - actual = set([task.task_id for task in output['tasks']]) - self.assertEqual(1, len(actual)) - - def test_index_with_sort_dir(self): - path = '/tasks' - request = unit_test_utils.get_fake_request(path, is_admin=True) - output = self.controller.index(request, sort_dir='asc', limit=3) - actual = [task.task_id for task in output['tasks']] - self.assertEqual(3, len(actual)) - self.assertEqual([UUID1, UUID2, UUID3], actual) - - def test_index_with_sort_key(self): - path = '/tasks' - request = unit_test_utils.get_fake_request(path, is_admin=True) - output = self.controller.index(request, sort_key='created_at', limit=3) - actual = [task.task_id for task in output['tasks']] - self.assertEqual(3, len(actual)) - self.assertEqual(UUID4, actual[0]) - self.assertEqual(UUID3, actual[1]) - self.assertEqual(UUID2, actual[2]) - - def test_index_with_marker_not_found(self): - fake_uuid = str(uuid.uuid4()) - path = '/tasks' - request = unit_test_utils.get_fake_request(path) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, request, marker=fake_uuid) - - def test_index_with_marker_is_not_like_uuid(self): - marker = 'INVALID_UUID' - path = '/tasks' - request = unit_test_utils.get_fake_request(path) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, request, marker=marker) - - def test_index_invalid_sort_key(self): - path = '/tasks' - request = unit_test_utils.get_fake_request(path) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, request, sort_key='foo') - - def test_index_zero_tasks(self): - self.db.reset() - request = unit_test_utils.get_fake_request() - output = self.controller.index(request) - self.assertEqual([], output['tasks']) - - def test_get(self): - request = unit_test_utils.get_fake_request() - task = self.controller.get(request, task_id=UUID1) - self.assertEqual(UUID1, task.task_id) - self.assertEqual('import', task.type) - - def test_get_non_existent(self): - request = unit_test_utils.get_fake_request() - task_id = str(uuid.uuid4()) - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.get, request, task_id) - - def test_get_not_allowed(self): - request = unit_test_utils.get_fake_request() - self.assertEqual(TENANT1, request.context.tenant) - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.get, request, UUID4) - - @mock.patch.object(glance.gateway.Gateway, 'get_task_factory') - @mock.patch.object(glance.gateway.Gateway, 'get_task_executor_factory') - @mock.patch.object(glance.gateway.Gateway, 'get_task_repo') - def test_create(self, mock_get_task_repo, mock_get_task_executor_factory, - mock_get_task_factory): - # setup - request = unit_test_utils.get_fake_request() - task = { - "type": "import", - "input": { - "import_from": "swift://cloud.foo/myaccount/mycontainer/path", - "import_from_format": "qcow2", - "image_properties": {} - } - } - get_task_factory = mock.Mock() - mock_get_task_factory.return_value = get_task_factory - - new_task = mock.Mock() - get_task_factory.new_task.return_value = new_task - - new_task.run.return_value = mock.ANY - - get_task_executor_factory = mock.Mock() - mock_get_task_executor_factory.return_value = get_task_executor_factory - get_task_executor_factory.new_task_executor.return_value = mock.Mock() - - get_task_repo = mock.Mock() - mock_get_task_repo.return_value = get_task_repo - get_task_repo.add.return_value = mock.Mock() - - # call - self.controller.create(request, task=task) - - # assert - self.assertEqual(1, get_task_factory.new_task.call_count) - self.assertEqual(1, get_task_repo.add.call_count) - self.assertEqual( - 1, get_task_executor_factory.new_task_executor.call_count) - - @mock.patch('glance.common.scripts.utils.get_image_data_iter') - @mock.patch('glance.common.scripts.utils.validate_location_uri') - def test_create_with_live_time(self, mock_validate_location_uri, - mock_get_image_data_iter): - request = unit_test_utils.get_fake_request() - task = { - "type": "import", - "input": { - "import_from": "http://download.cirros-cloud.net/0.3.4/" - "cirros-0.3.4-x86_64-disk.img", - "import_from_format": "qcow2", - "image_properties": { - "disk_format": "qcow2", - "container_format": "bare", - "name": "test-task" - } - } - } - - new_task = self.controller.create(request, task=task) - executor_factory = self.gateway.get_task_executor_factory( - request.context) - task_executor = executor_factory.new_task_executor(request.context) - task_executor.begin_processing(new_task.task_id) - success_task = self.controller.get(request, new_task.task_id) - - # ignore second and microsecond to avoid flaky runs - task_live_time = (success_task.expires_at.replace(second=0, - microsecond=0) - - success_task.updated_at.replace(second=0, - microsecond=0)) - task_live_time_hour = (task_live_time.days * 24 + - task_live_time.seconds / 3600) - self.assertEqual(CONF.task.task_time_to_live, task_live_time_hour) - - def test_create_with_wrong_import_form(self): - request = unit_test_utils.get_fake_request() - wrong_import_from = [ - "swift://cloud.foo/myaccount/mycontainer/path", - "file:///path", - "cinder://volume-id" - ] - executor_factory = self.gateway.get_task_executor_factory( - request.context) - task_repo = self.gateway.get_task_repo(request.context) - - for import_from in wrong_import_from: - task = { - "type": "import", - "input": { - "import_from": import_from, - "import_from_format": "qcow2", - "image_properties": { - "disk_format": "qcow2", - "container_format": "bare", - "name": "test-task" - } - } - } - new_task = self.controller.create(request, task=task) - task_executor = executor_factory.new_task_executor(request.context) - task_executor.begin_processing(new_task.task_id) - final_task = task_repo.get(new_task.task_id) - - self.assertEqual('failure', final_task.status) - if import_from.startswith("file:///"): - msg = ("File based imports are not allowed. Please use a " - "non-local source of image data.") - else: - supported = ['http', ] - msg = ("The given uri is not valid. Please specify a " - "valid uri from the following list of supported uri " - "%(supported)s") % {'supported': supported} - self.assertEqual(msg, final_task.message) - - def test_create_with_properties_missed(self): - request = unit_test_utils.get_fake_request() - executor_factory = self.gateway.get_task_executor_factory( - request.context) - task_repo = self.gateway.get_task_repo(request.context) - - task = { - "type": "import", - "input": { - "import_from": "swift://cloud.foo/myaccount/mycontainer/path", - "import_from_format": "qcow2", - } - } - new_task = self.controller.create(request, task=task) - task_executor = executor_factory.new_task_executor(request.context) - task_executor.begin_processing(new_task.task_id) - final_task = task_repo.get(new_task.task_id) - - self.assertEqual('failure', final_task.status) - msg = "Input does not contain 'image_properties' field" - self.assertEqual(msg, final_task.message) - - @mock.patch.object(glance.gateway.Gateway, 'get_task_factory') - def test_notifications_on_create(self, mock_get_task_factory): - request = unit_test_utils.get_fake_request() - - new_task = mock.MagicMock(type='import') - mock_get_task_factory.new_task.return_value = new_task - new_task.run.return_value = mock.ANY - - task = {"type": "import", "input": { - "import_from": "http://cloud.foo/myaccount/mycontainer/path", - "import_from_format": "qcow2", - "image_properties": {} - } - } - task = self.controller.create(request, task=task) - output_logs = [nlog for nlog in self.notifier.get_logs() - if nlog['event_type'] == 'task.create'] - self.assertEqual(1, len(output_logs)) - output_log = output_logs[0] - self.assertEqual('INFO', output_log['notification_type']) - self.assertEqual('task.create', output_log['event_type']) - - -class TestTasksControllerPolicies(base.IsolatedUnitTest): - - def setUp(self): - super(TestTasksControllerPolicies, self).setUp() - self.db = unit_test_utils.FakeDB() - self.policy = unit_test_utils.FakePolicyEnforcer() - self.controller = glance.api.v2.tasks.TasksController(self.db, - self.policy) - - def test_index_unauthorized(self): - rules = {"get_tasks": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.index, - request) - - def test_get_unauthorized(self): - rules = {"get_task": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPForbidden, self.controller.get, - request, task_id=UUID2) - - def test_create_task_unauthorized(self): - rules = {"add_task": False} - self.policy.set_rules(rules) - request = unit_test_utils.get_fake_request() - task = {'type': 'import', 'input': {"import_from": "fake"}} - self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, - request, task) - - def test_delete(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPMethodNotAllowed, - self.controller.delete, - request, - 'fake_id') - - -class TestTasksDeserializer(test_utils.BaseTestCase): - - def setUp(self): - super(TestTasksDeserializer, self).setUp() - self.deserializer = glance.api.v2.tasks.RequestDeserializer() - - def test_create_no_body(self): - request = unit_test_utils.get_fake_request() - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.create, request) - - def test_create(self): - request = unit_test_utils.get_fake_request() - request.body = jsonutils.dump_as_bytes({ - 'type': 'import', - 'input': {'import_from': - 'swift://cloud.foo/myaccount/mycontainer/path', - 'import_from_format': 'qcow2', - 'image_properties': {'name': 'fake1'}}, - }) - output = self.deserializer.create(request) - properties = { - 'type': 'import', - 'input': {'import_from': - 'swift://cloud.foo/myaccount/mycontainer/path', - 'import_from_format': 'qcow2', - 'image_properties': {'name': 'fake1'}}, - } - self.maxDiff = None - expected = {'task': properties} - self.assertEqual(expected, output) - - def test_index(self): - marker = str(uuid.uuid4()) - path = '/tasks?limit=1&marker=%s' % marker - request = unit_test_utils.get_fake_request(path) - expected = {'limit': 1, - 'marker': marker, - 'sort_key': 'created_at', - 'sort_dir': 'desc', - 'filters': {}} - output = self.deserializer.index(request) - self.assertEqual(expected, output) - - def test_index_strip_params_from_filters(self): - type = 'import' - path = '/tasks?type=%s' % type - request = unit_test_utils.get_fake_request(path) - output = self.deserializer.index(request) - self.assertEqual(type, output['filters']['type']) - - def test_index_with_many_filter(self): - status = 'success' - type = 'import' - path = '/tasks?status=%(status)s&type=%(type)s' % {'status': status, - 'type': type} - request = unit_test_utils.get_fake_request(path) - output = self.deserializer.index(request) - self.assertEqual(status, output['filters']['status']) - self.assertEqual(type, output['filters']['type']) - - def test_index_with_filter_and_limit(self): - status = 'success' - path = '/tasks?status=%s&limit=1' % status - request = unit_test_utils.get_fake_request(path) - output = self.deserializer.index(request) - self.assertEqual(status, output['filters']['status']) - self.assertEqual(1, output['limit']) - - def test_index_non_integer_limit(self): - request = unit_test_utils.get_fake_request('/tasks?limit=blah') - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_zero_limit(self): - request = unit_test_utils.get_fake_request('/tasks?limit=0') - expected = {'limit': 0, - 'sort_key': 'created_at', - 'sort_dir': 'desc', - 'filters': {}} - output = self.deserializer.index(request) - self.assertEqual(expected, output) - - def test_index_negative_limit(self): - path = '/tasks?limit=-1' - request = unit_test_utils.get_fake_request(path) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_fraction(self): - request = unit_test_utils.get_fake_request('/tasks?limit=1.1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_invalid_status(self): - path = '/tasks?status=blah' - request = unit_test_utils.get_fake_request(path) - self.assertRaises(webob.exc.HTTPBadRequest, - self.deserializer.index, request) - - def test_index_marker(self): - marker = str(uuid.uuid4()) - path = '/tasks?marker=%s' % marker - request = unit_test_utils.get_fake_request(path) - output = self.deserializer.index(request) - self.assertEqual(marker, output.get('marker')) - - def test_index_marker_not_specified(self): - request = unit_test_utils.get_fake_request('/tasks') - output = self.deserializer.index(request) - self.assertNotIn('marker', output) - - def test_index_limit_not_specified(self): - request = unit_test_utils.get_fake_request('/tasks') - output = self.deserializer.index(request) - self.assertNotIn('limit', output) - - def test_index_sort_key_id(self): - request = unit_test_utils.get_fake_request('/tasks?sort_key=id') - output = self.deserializer.index(request) - expected = { - 'sort_key': 'id', - 'sort_dir': 'desc', - 'filters': {} - } - self.assertEqual(expected, output) - - def test_index_sort_dir_asc(self): - request = unit_test_utils.get_fake_request('/tasks?sort_dir=asc') - output = self.deserializer.index(request) - expected = { - 'sort_key': 'created_at', - 'sort_dir': 'asc', - 'filters': {}} - self.assertEqual(expected, output) - - def test_index_sort_dir_bad_value(self): - request = unit_test_utils.get_fake_request('/tasks?sort_dir=invalid') - self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, - request) - - -class TestTasksSerializer(test_utils.BaseTestCase): - - def setUp(self): - super(TestTasksSerializer, self).setUp() - self.serializer = glance.api.v2.tasks.ResponseSerializer() - self.fixtures = [ - _domain_fixture(UUID1, type='import', status='pending', - task_input={'loc': 'fake'}, result={}, - owner=TENANT1, message='', created_at=DATETIME, - updated_at=DATETIME), - _domain_fixture(UUID2, type='import', status='processing', - task_input={'loc': 'bake'}, owner=TENANT2, - message='', created_at=DATETIME, - updated_at=DATETIME, result={}), - _domain_fixture(UUID3, type='import', status='success', - task_input={'loc': 'foo'}, owner=TENANT3, - message='', created_at=DATETIME, - updated_at=DATETIME, result={}, - expires_at=DATETIME), - _domain_fixture(UUID4, type='import', status='failure', - task_input={'loc': 'boo'}, owner=TENANT4, - message='', created_at=DATETIME, - updated_at=DATETIME, result={}, - expires_at=DATETIME), - ] - - def test_index(self): - expected = { - 'tasks': [ - { - 'id': UUID1, - 'type': 'import', - 'status': 'pending', - 'owner': TENANT1, - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/tasks/%s' % UUID1, - 'schema': '/v2/schemas/task', - }, - { - 'id': UUID2, - 'type': 'import', - 'status': 'processing', - 'owner': TENANT2, - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/tasks/%s' % UUID2, - 'schema': '/v2/schemas/task', - }, - { - 'id': UUID3, - 'type': 'import', - 'status': 'success', - 'owner': TENANT3, - 'expires_at': ISOTIME, - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/tasks/%s' % UUID3, - 'schema': '/v2/schemas/task', - }, - { - 'id': UUID4, - 'type': 'import', - 'status': 'failure', - 'owner': TENANT4, - 'expires_at': ISOTIME, - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/tasks/%s' % UUID4, - 'schema': '/v2/schemas/task', - }, - ], - 'first': '/v2/tasks', - 'schema': '/v2/schemas/tasks', - } - request = webob.Request.blank('/v2/tasks') - response = webob.Response(request=request) - task_fixtures = [f for f in self.fixtures] - result = {'tasks': task_fixtures} - self.serializer.index(response, result) - actual = jsonutils.loads(response.body) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - def test_index_next_marker(self): - request = webob.Request.blank('/v2/tasks') - response = webob.Response(request=request) - task_fixtures = [f for f in self.fixtures] - result = {'tasks': task_fixtures, 'next_marker': UUID2} - self.serializer.index(response, result) - output = jsonutils.loads(response.body) - self.assertEqual('/v2/tasks?marker=%s' % UUID2, output['next']) - - def test_index_carries_query_parameters(self): - url = '/v2/tasks?limit=10&sort_key=id&sort_dir=asc' - request = webob.Request.blank(url) - response = webob.Response(request=request) - task_fixtures = [f for f in self.fixtures] - result = {'tasks': task_fixtures, 'next_marker': UUID2} - self.serializer.index(response, result) - output = jsonutils.loads(response.body) - - expected_url = '/v2/tasks?limit=10&sort_dir=asc&sort_key=id' - self.assertEqual(unit_test_utils.sort_url_by_qs_keys(expected_url), - unit_test_utils.sort_url_by_qs_keys(output['first'])) - - expect_next = '/v2/tasks?limit=10&marker=%s&sort_dir=asc&sort_key=id' - self.assertEqual(unit_test_utils.sort_url_by_qs_keys( - expect_next % UUID2), - unit_test_utils.sort_url_by_qs_keys(output['next'])) - - def test_get(self): - expected = { - 'id': UUID4, - 'type': 'import', - 'status': 'failure', - 'input': {'loc': 'boo'}, - 'result': {}, - 'owner': TENANT4, - 'message': '', - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'expires_at': ISOTIME, - 'self': '/v2/tasks/%s' % UUID4, - 'schema': '/v2/schemas/task', - } - response = webob.Response() - self.serializer.get(response, self.fixtures[3]) - actual = jsonutils.loads(response.body) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - def test_get_ensure_expires_at_not_returned(self): - expected = { - 'id': UUID1, - 'type': 'import', - 'status': 'pending', - 'input': {'loc': 'fake'}, - 'result': {}, - 'owner': TENANT1, - 'message': '', - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/tasks/%s' % UUID1, - 'schema': '/v2/schemas/task', - } - response = webob.Response() - self.serializer.get(response, self.fixtures[0]) - actual = jsonutils.loads(response.body) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - expected = { - 'id': UUID2, - 'type': 'import', - 'status': 'processing', - 'input': {'loc': 'bake'}, - 'result': {}, - 'owner': TENANT2, - 'message': '', - 'created_at': ISOTIME, - 'updated_at': ISOTIME, - 'self': '/v2/tasks/%s' % UUID2, - 'schema': '/v2/schemas/task', - } - response = webob.Response() - - self.serializer.get(response, self.fixtures[1]) - - actual = jsonutils.loads(response.body) - self.assertEqual(expected, actual) - self.assertEqual('application/json', response.content_type) - - def test_create(self): - response = webob.Response() - - self.serializer.create(response, self.fixtures[3]) - - serialized_task = jsonutils.loads(response.body) - self.assertEqual(http.CREATED, response.status_int) - self.assertEqual(self.fixtures[3].task_id, - serialized_task['id']) - self.assertEqual(self.fixtures[3].task_input, - serialized_task['input']) - self.assertIn('expires_at', serialized_task) - self.assertEqual('application/json', response.content_type) - - def test_create_ensure_expires_at_is_not_returned(self): - response = webob.Response() - - self.serializer.create(response, self.fixtures[0]) - - serialized_task = jsonutils.loads(response.body) - self.assertEqual(http.CREATED, response.status_int) - self.assertEqual(self.fixtures[0].task_id, - serialized_task['id']) - self.assertEqual(self.fixtures[0].task_input, - serialized_task['input']) - self.assertNotIn('expires_at', serialized_task) - self.assertEqual('application/json', response.content_type) - - response = webob.Response() - - self.serializer.create(response, self.fixtures[1]) - - serialized_task = jsonutils.loads(response.body) - self.assertEqual(http.CREATED, response.status_int) - self.assertEqual(self.fixtures[1].task_id, - serialized_task['id']) - self.assertEqual(self.fixtures[1].task_input, - serialized_task['input']) - self.assertNotIn('expires_at', serialized_task) - self.assertEqual('application/json', response.content_type) diff --git a/glance/tests/utils.py b/glance/tests/utils.py deleted file mode 100644 index 19899c2f..00000000 --- a/glance/tests/utils.py +++ /dev/null @@ -1,695 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Common utilities used in testing""" - -import errno -import functools -import os -import shlex -import shutil -import socket -import subprocess - -from alembic import command as alembic_command -import fixtures -from oslo_config import cfg -from oslo_config import fixture as cfg_fixture -from oslo_log import log -from oslo_serialization import jsonutils -from oslotest import moxstubout -import six -from six.moves import BaseHTTPServer -from six.moves import http_client as http -import testtools -import webob - -from glance.common import config -from glance.common import exception -from glance.common import property_utils -from glance.common import timeutils -from glance.common import utils -from glance.common import wsgi -from glance import context -from glance.db import migration as db_migration -from glance.db.sqlalchemy import alembic_migrations -from glance.db.sqlalchemy import api as db_api -from glance.db.sqlalchemy import models as db_models - -CONF = cfg.CONF -try: - CONF.debug -except cfg.NoSuchOptError: - # NOTE(sigmavirus24): If we run the entire test suite, the logging options - # will be registered appropriately and we do not need to re-register them. - # However, when we run a test in isolation (or use --debug), those options - # will not be registered for us. In order for a test in a class that - # inherits from BaseTestCase to even run, we will need to register them - # ourselves. BaseTestCase.config will set the debug level if something - # calls self.config(debug=True) so we need these options registered - # appropriately. - # See bug 1433785 for more details. - log.register_options(CONF) - - -class BaseTestCase(testtools.TestCase): - - def setUp(self): - super(BaseTestCase, self).setUp() - - self._config_fixture = self.useFixture(cfg_fixture.Config()) - - # NOTE(bcwaldon): parse_args has to be called to register certain - # command-line options - specifically we need config_dir for - # the following policy tests - config.parse_args(args=[]) - self.addCleanup(CONF.reset) - mox_fixture = self.useFixture(moxstubout.MoxStubout()) - self.stubs = mox_fixture.stubs - self.stubs.Set(exception, '_FATAL_EXCEPTION_FORMAT_ERRORS', True) - self.test_dir = self.useFixture(fixtures.TempDir()).path - self.conf_dir = os.path.join(self.test_dir, 'etc') - utils.safe_mkdirs(self.conf_dir) - self.set_policy() - - def set_policy(self): - conf_file = "policy.json" - self.policy_file = self._copy_data_file(conf_file, self.conf_dir) - self.config(policy_file=self.policy_file, group='oslo_policy') - - def set_property_protections(self, use_policies=False): - self.unset_property_protections() - conf_file = "property-protections.conf" - if use_policies: - conf_file = "property-protections-policies.conf" - self.config(property_protection_rule_format="policies") - self.property_file = self._copy_data_file(conf_file, self.test_dir) - self.config(property_protection_file=self.property_file) - - def unset_property_protections(self): - for section in property_utils.CONFIG.sections(): - property_utils.CONFIG.remove_section(section) - - def _copy_data_file(self, file_name, dst_dir): - src_file_name = os.path.join('glance/tests/etc', file_name) - shutil.copy(src_file_name, dst_dir) - dst_file_name = os.path.join(dst_dir, file_name) - return dst_file_name - - def set_property_protection_rules(self, rules): - with open(self.property_file, 'w') as f: - for rule_key in rules.keys(): - f.write('[%s]\n' % rule_key) - for operation in rules[rule_key].keys(): - roles_str = ','.join(rules[rule_key][operation]) - f.write('%s = %s\n' % (operation, roles_str)) - - def config(self, **kw): - """ - Override some configuration values. - - The keyword arguments are the names of configuration options to - override and their values. - - If a group argument is supplied, the overrides are applied to - the specified configuration option group. - - All overrides are automatically cleared at the end of the current - test by the fixtures cleanup process. - """ - self._config_fixture.config(**kw) - - -class requires(object): - """Decorator that initiates additional test setup/teardown.""" - def __init__(self, setup=None, teardown=None): - self.setup = setup - self.teardown = teardown - - def __call__(self, func): - def _runner(*args, **kw): - if self.setup: - self.setup(args[0]) - func(*args, **kw) - if self.teardown: - self.teardown(args[0]) - _runner.__name__ = func.__name__ - _runner.__doc__ = func.__doc__ - return _runner - - -class depends_on_exe(object): - """Decorator to skip test if an executable is unavailable""" - def __init__(self, exe): - self.exe = exe - - def __call__(self, func): - def _runner(*args, **kw): - cmd = 'which %s' % self.exe - exitcode, out, err = execute(cmd, raise_error=False) - if exitcode != 0: - args[0].disabled_message = 'test requires exe: %s' % self.exe - args[0].disabled = True - func(*args, **kw) - _runner.__name__ = func.__name__ - _runner.__doc__ = func.__doc__ - return _runner - - -def skip_if_disabled(func): - """Decorator that skips a test if test case is disabled.""" - @functools.wraps(func) - def wrapped(*a, **kwargs): - func.__test__ = False - test_obj = a[0] - message = getattr(test_obj, 'disabled_message', - 'Test disabled') - if getattr(test_obj, 'disabled', False): - test_obj.skipTest(message) - func(*a, **kwargs) - return wrapped - - -def fork_exec(cmd, - exec_env=None, - logfile=None, - pass_fds=None): - """ - Execute a command using fork/exec. - - This is needed for programs system executions that need path - searching but cannot have a shell as their parent process, for - example: glance-api. When glance-api starts it sets itself as - the parent process for its own process group. Thus the pid that - a Popen process would have is not the right pid to use for killing - the process group. This patch gives the test env direct access - to the actual pid. - - :param cmd: Command to execute as an array of arguments. - :param exec_env: A dictionary representing the environment with - which to run the command. - :param logfile: A path to a file which will hold the stdout/err of - the child process. - :param pass_fds: Sequence of file descriptors passed to the child. - """ - env = os.environ.copy() - if exec_env is not None: - for env_name, env_val in exec_env.items(): - if callable(env_val): - env[env_name] = env_val(env.get(env_name)) - else: - env[env_name] = env_val - - pid = os.fork() - if pid == 0: - if logfile: - fds = [1, 2] - with open(logfile, 'r+b') as fptr: - for desc in fds: # close fds - try: - os.dup2(fptr.fileno(), desc) - except OSError: - pass - if pass_fds and hasattr(os, 'set_inheritable'): - # os.set_inheritable() is only available and needed - # since Python 3.4. On Python 3.3 and older, file descriptors are - # inheritable by default. - for fd in pass_fds: - os.set_inheritable(fd, True) - - args = shlex.split(cmd) - os.execvpe(args[0], args, env) - else: - return pid - - -def wait_for_fork(pid, - raise_error=True, - expected_exitcode=0): - """ - Wait for a process to complete - - This function will wait for the given pid to complete. If the - exit code does not match that of the expected_exitcode an error - is raised. - """ - - rc = 0 - try: - (pid, rc) = os.waitpid(pid, 0) - rc = os.WEXITSTATUS(rc) - if rc != expected_exitcode: - raise RuntimeError('The exit code %d is not %d' - % (rc, expected_exitcode)) - except Exception: - if raise_error: - raise - - return rc - - -def execute(cmd, - raise_error=True, - no_venv=False, - exec_env=None, - expect_exit=True, - expected_exitcode=0, - context=None): - """ - Executes a command in a subprocess. Returns a tuple - of (exitcode, out, err), where out is the string output - from stdout and err is the string output from stderr when - executing the command. - - :param cmd: Command string to execute - :param raise_error: If returncode is not 0 (success), then - raise a RuntimeError? Default: True) - :param no_venv: Disable the virtual environment - :param exec_env: Optional dictionary of additional environment - variables; values may be callables, which will - be passed the current value of the named - environment variable - :param expect_exit: Optional flag true iff timely exit is expected - :param expected_exitcode: expected exitcode from the launcher - :param context: additional context for error message - """ - - env = os.environ.copy() - if exec_env is not None: - for env_name, env_val in exec_env.items(): - if callable(env_val): - env[env_name] = env_val(env.get(env_name)) - else: - env[env_name] = env_val - - # If we're asked to omit the virtualenv, and if one is set up, - # restore the various environment variables - if no_venv and 'VIRTUAL_ENV' in env: - # Clip off the first element of PATH - env['PATH'] = env['PATH'].split(os.pathsep, 1)[-1] - del env['VIRTUAL_ENV'] - - # Make sure that we use the programs in the - # current source directory's bin/ directory. - path_ext = [os.path.join(os.getcwd(), 'bin')] - - # Also jack in the path cmd comes from, if it's absolute - args = shlex.split(cmd) - executable = args[0] - if os.path.isabs(executable): - path_ext.append(os.path.dirname(executable)) - - env['PATH'] = ':'.join(path_ext) + ':' + env['PATH'] - process = subprocess.Popen(args, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=env) - if expect_exit: - result = process.communicate() - (out, err) = result - exitcode = process.returncode - else: - out = '' - err = '' - exitcode = 0 - - if exitcode != expected_exitcode and raise_error: - msg = ("Command %(cmd)s did not succeed. Returned an exit " - "code of %(exitcode)d." - "\n\nSTDOUT: %(out)s" - "\n\nSTDERR: %(err)s" % {'cmd': cmd, 'exitcode': exitcode, - 'out': out, 'err': err}) - if context: - msg += "\n\nCONTEXT: %s" % context - raise RuntimeError(msg) - return exitcode, out, err - - -def find_executable(cmdname): - """ - Searches the path for a given cmdname. Returns an absolute - filename if an executable with the given name exists in the path, - or None if one does not. - - :param cmdname: The bare name of the executable to search for - """ - - # Keep an eye out for the possibility of an absolute pathname - if os.path.isabs(cmdname): - return cmdname - - # Get a list of the directories to search - path = ([os.path.join(os.getcwd(), 'bin')] + - os.environ['PATH'].split(os.pathsep)) - - # Search through each in turn - for elem in path: - full_path = os.path.join(elem, cmdname) - if os.access(full_path, os.X_OK): - return full_path - - # No dice... - return None - - -def get_unused_port(): - """ - Returns an unused port on localhost. - """ - port, s = get_unused_port_and_socket() - s.close() - return port - - -def get_unused_port_and_socket(): - """ - Returns an unused port on localhost and the open socket - from which it was created. - """ - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.bind(('localhost', 0)) - addr, port = s.getsockname() - return (port, s) - - -def get_unused_port_ipv6(): - """ - Returns an unused port on localhost on IPv6 (uses ::1). - """ - port, s = get_unused_port_and_socket_ipv6() - s.close() - return port - - -def get_unused_port_and_socket_ipv6(): - """ - Returns an unused port on localhost and the open socket - from which it was created, but uses IPv6 (::1). - """ - s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) - s.bind(('::1', 0)) - # Ignoring flowinfo and scopeid... - addr, port, flowinfo, scopeid = s.getsockname() - return (port, s) - - -def xattr_writes_supported(path): - """ - Returns True if the we can write a file to the supplied - path and subsequently write a xattr to that file. - """ - try: - import xattr - except ImportError: - return False - - def set_xattr(path, key, value): - xattr.setxattr(path, "user.%s" % key, value) - - # We do a quick attempt to write a user xattr to a temporary file - # to check that the filesystem is even enabled to support xattrs - fake_filepath = os.path.join(path, 'testing-checkme') - result = True - with open(fake_filepath, 'wb') as fake_file: - fake_file.write(b"XXX") - fake_file.flush() - try: - set_xattr(fake_filepath, 'hits', b'1') - except IOError as e: - if e.errno == errno.EOPNOTSUPP: - result = False - else: - # Cleanup after ourselves... - if os.path.exists(fake_filepath): - os.unlink(fake_filepath) - - return result - - -def minimal_headers(name, public=True): - headers = { - 'Content-Type': 'application/octet-stream', - 'X-Image-Meta-Name': name, - 'X-Image-Meta-disk_format': 'raw', - 'X-Image-Meta-container_format': 'ovf', - } - if public: - headers['X-Image-Meta-Is-Public'] = 'True' - return headers - - -def minimal_add_command(port, name, suffix='', public=True): - visibility = 'is_public=True' if public else '' - return ("bin/glance --port=%d add %s" - " disk_format=raw container_format=ovf" - " name=%s %s" % (port, visibility, name, suffix)) - - -def start_http_server(image_id, image_data): - def _get_http_handler_class(fixture): - class StaticHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): - def do_GET(self): - self.send_response(http.OK) - self.send_header('Content-Length', str(len(fixture))) - self.end_headers() - self.wfile.write(fixture) - return - - def do_HEAD(self): - # reserve non_existing_image_path for the cases where we expect - # 404 from the server - if 'non_existing_image_path' in self.path: - self.send_response(http.NOT_FOUND) - else: - self.send_response(http.OK) - self.send_header('Content-Length', str(len(fixture))) - self.end_headers() - return - - def log_message(self, *args, **kwargs): - # Override this method to prevent debug output from going - # to stderr during testing - return - - return StaticHTTPRequestHandler - - server_address = ('127.0.0.1', 0) - handler_class = _get_http_handler_class(image_data) - httpd = BaseHTTPServer.HTTPServer(server_address, handler_class) - port = httpd.socket.getsockname()[1] - - pid = os.fork() - if pid == 0: - httpd.serve_forever() - else: - return pid, port - - -class RegistryAPIMixIn(object): - - def create_fixtures(self): - for fixture in self.FIXTURES: - db_api.image_create(self.context, fixture) - with open(os.path.join(self.test_dir, fixture['id']), - 'wb') as image: - image.write(b"chunk00000remainder") - - def destroy_fixtures(self): - db_models.unregister_models(db_api.get_engine()) - db_models.register_models(db_api.get_engine()) - - def get_fixture(self, **kwargs): - fixture = {'name': 'fake public image', - 'status': 'active', - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'visibility': 'public', - 'size': 20, - 'checksum': None} - if 'is_public' in kwargs: - fixture.pop('visibility') - fixture.update(kwargs) - return fixture - - def get_minimal_fixture(self, **kwargs): - fixture = {'name': 'fake public image', - 'visibility': 'public', - 'disk_format': 'vhd', - 'container_format': 'ovf'} - if 'is_public' in kwargs: - fixture.pop('visibility') - fixture.update(kwargs) - return fixture - - def get_extra_fixture(self, id, name, **kwargs): - created_at = kwargs.pop('created_at', timeutils.utcnow()) - updated_at = kwargs.pop('updated_at', created_at) - return self.get_fixture( - id=id, name=name, deleted=False, deleted_at=None, - created_at=created_at, updated_at=updated_at, - **kwargs) - - def get_api_response_ext(self, http_resp, url='/images', headers=None, - body=None, method=None, api=None, - content_type=None): - if api is None: - api = self.api - if headers is None: - headers = {} - req = webob.Request.blank(url) - for k, v in six.iteritems(headers): - req.headers[k] = v - if method: - req.method = method - if body: - req.body = body - if content_type == 'json': - req.content_type = 'application/json' - elif content_type == 'octet': - req.content_type = 'application/octet-stream' - res = req.get_response(api) - self.assertEqual(res.status_int, http_resp) - return res - - def assertEqualImages(self, res, uuids, key='images', unjsonify=True): - images = jsonutils.loads(res.body)[key] if unjsonify else res - self.assertEqual(len(images), len(uuids)) - for i, value in enumerate(uuids): - self.assertEqual(images[i]['id'], value) - - -class FakeAuthMiddleware(wsgi.Middleware): - - def __init__(self, app, is_admin=False): - super(FakeAuthMiddleware, self).__init__(app) - self.is_admin = is_admin - - def process_request(self, req): - auth_token = req.headers.get('X-Auth-Token') - user = None - tenant = None - roles = [] - if auth_token: - user, tenant, role = auth_token.split(':') - if tenant.lower() == 'none': - tenant = None - roles = [role] - req.headers['X-User-Id'] = user - req.headers['X-Tenant-Id'] = tenant - req.headers['X-Roles'] = role - req.headers['X-Identity-Status'] = 'Confirmed' - kwargs = { - 'user': user, - 'tenant': tenant, - 'roles': roles, - 'is_admin': self.is_admin, - 'auth_token': auth_token, - } - - req.context = context.RequestContext(**kwargs) - - -class FakeHTTPResponse(object): - def __init__(self, status=http.OK, headers=None, data=None, - *args, **kwargs): - data = data or b'I am a teapot, short and stout\n' - self.data = six.BytesIO(data) - self.read = self.data.read - self.status = status - self.headers = headers or {'content-length': len(data)} - - def getheader(self, name, default=None): - return self.headers.get(name.lower(), default) - - def getheaders(self): - return self.headers or {} - - def read(self, amt): - self.data.read(amt) - - -class Httplib2WsgiAdapter(object): - def __init__(self, app): - self.app = app - - def request(self, uri, method="GET", body=None, headers=None): - req = webob.Request.blank(uri, method=method, headers=headers) - if isinstance(body, str): - req.body = body.encode('utf-8') - else: - req.body = body - resp = req.get_response(self.app) - return Httplib2WebobResponse(resp), resp.body.decode('utf-8') - - -class Httplib2WebobResponse(object): - def __init__(self, webob_resp): - self.webob_resp = webob_resp - - @property - def status(self): - return self.webob_resp.status_code - - def __getitem__(self, key): - return self.webob_resp.headers[key] - - def get(self, key): - return self.webob_resp.headers[key] - - @property - def allow(self): - return self.webob_resp.allow - - @allow.setter - def allow(self, allowed): - if type(allowed) is not str: - raise TypeError('Allow header should be a str') - - self.webob_resp.allow = allowed - - -class HttplibWsgiAdapter(object): - def __init__(self, app): - self.app = app - self.req = None - - def request(self, method, url, body=None, headers=None): - if headers is None: - headers = {} - self.req = webob.Request.blank(url, method=method, headers=headers) - self.req.body = body - - def getresponse(self): - response = self.req.get_response(self.app) - return FakeHTTPResponse(response.status_code, response.headers, - response.body) - - -def db_sync(version=None, engine=None): - """Migrate the database to `version` or the most recent version.""" - if version is None: - version = db_migration.LATEST_REVISION - if engine is None: - engine = db_api.get_engine() - - alembic_config = alembic_migrations.get_alembic_config(engine=engine) - alembic_command.upgrade(alembic_config, version) - - -def is_sqlite_version_prior_to(major, minor): - import sqlite3 - tup = sqlite3.sqlite_version_info - return tup[0] < major or (tup[0] == major and tup[1] < minor) diff --git a/glance/tests/var/ca.crt b/glance/tests/var/ca.crt deleted file mode 100644 index c8252d01..00000000 --- a/glance/tests/var/ca.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDiTCCAnGgAwIBAgIJAMj+Lfpqc9lLMA0GCSqGSIb3DQEBCwUAMFsxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMRIwEAYDVQQKDAlPcGVuU3RhY2sx -DzANBgNVBAsMBkdsYW5jZTESMBAGA1UEAwwJR2xhbmNlIENBMB4XDTE1MDEzMTA1 -MzAyNloXDTI1MDEyODA1MzAyNlowWzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNv -bWUtU3RhdGUxEjAQBgNVBAoMCU9wZW5TdGFjazEPMA0GA1UECwwGR2xhbmNlMRIw -EAYDVQQDDAlHbGFuY2UgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQDcW4cRtw96/ZYsx3UB1jWWT0pAlsMQ03En7dueh9o4UZYChY2NMqTJ3gVqy1vf -4wyRU1ROb/N5L4KdQiJARH/ARbV+qrWoRvkcWBfg9w/4uZ9ZFhCBbaa2cAtTIGzV -ta6HP9UPeyfXrS+jgjqU2QN3bcc0ZCMAiQbtW7Vpw8RNr0NvTJDaSCzmpGQ7TQtB -0jXm1nSG7FZUbojUCYB6TBGd01Cg8GzAai3ngXDq6foVJEwfmaV2Zapb0A4FLquX -OzebskY5EL/okQGPofSRCu/ar+HV4HN3+PgIIrfa8RhDDdlv6qE1iEuS6isSH1s+ -7BA2ZKfzT5t8G/8lSjKa/r2pAgMBAAGjUDBOMB0GA1UdDgQWBBT3M/WuigtS7JYZ -QD0XJEDD8JSZrTAfBgNVHSMEGDAWgBT3M/WuigtS7JYZQD0XJEDD8JSZrTAMBgNV -HRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCWOhC9kBZAJalQhAeNGIiiJ2bV -HpvzSCEXSEAdh3A0XDK1KxoMHy1LhNGYrMmN2a+2O3SoX0FLB4p9zOifq4ACwaMD -CjQeB/whsfPt5s0gV3mGMCR+V2b8r5H/30KRbIzQGXmy+/r6Wfe012jcVVXsQawW -Omd4d+Bduf5iiL1OCKEMepqjQLu7Yg41ucRpUewBA+A9hoKp7jpwSnzSALX7FWEQ -TBJtJ9jEnZl36S81eZJvOXSzeptHyomSAt8eGFCVuPB0dZCXuBNLu4Gsn+dIhfyj -NwK4noYZXMndPwGy92KDhjxVnHzd9HwImgr6atmWhPPz5hm50BrA7sv06Nto ------END CERTIFICATE----- diff --git a/glance/tests/var/ca.key b/glance/tests/var/ca.key deleted file mode 100644 index 1730e1db..00000000 --- a/glance/tests/var/ca.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDcW4cRtw96/ZYs -x3UB1jWWT0pAlsMQ03En7dueh9o4UZYChY2NMqTJ3gVqy1vf4wyRU1ROb/N5L4Kd -QiJARH/ARbV+qrWoRvkcWBfg9w/4uZ9ZFhCBbaa2cAtTIGzVta6HP9UPeyfXrS+j -gjqU2QN3bcc0ZCMAiQbtW7Vpw8RNr0NvTJDaSCzmpGQ7TQtB0jXm1nSG7FZUbojU -CYB6TBGd01Cg8GzAai3ngXDq6foVJEwfmaV2Zapb0A4FLquXOzebskY5EL/okQGP -ofSRCu/ar+HV4HN3+PgIIrfa8RhDDdlv6qE1iEuS6isSH1s+7BA2ZKfzT5t8G/8l -SjKa/r2pAgMBAAECggEABeoS+v+906BAypzj4BO+xnUEWi1xuN7j951juqKM0dwm -uZSaEwMb9ysVXCNvKNgwOypQZfaNQ2BqEgx3XOA5yZBVabvtOkIFZ6RZp7kZ3aQl -yb9U3BR0WAsz0pxZL3c74vdsoYi9rgVA9ROGvP4CIM96fEZ/xgDnhbFjch5GA4u2 -8XQ/kJUwLl0Uzxyo10sqGu3hgMwpM8lpaRW6d5EQ628rJEtA/Wmy5GpyCUhTD/5B -jE1IzhjT4T5LqiPjA/Dsmz4Sa0+MyKRmA+zfSH6uS4szSaj53GVMHh4K+Xg2/EeD -6I3hGOtzZuYp5HBHE6J8VgeuErBQf32CCglHqN/dLQKBgQD4XaXa+AZtB10cRUV4 -LZDB1AePJLloBhKikeTboZyhZEwbNuvw3JSQBAfUdpx3+8Na3Po1Tfy3DlZaVCU2 -0PWh2UYrtwA3dymp8GCuSvnsLz1kNGv0Q7WEYaepyKRO8qHCjrTDUFuGVztU+H6O -OWPHRd4DnyF3pKN7K4j6pU76HwKBgQDjIXylwPb6TD9ln13ijJ06t9l1E13dSS0B -+9QU3f4abjMmW0K7icrNdmsjHafWLGXP2dxB0k4sx448buH+L8uLjC8G80wLQMSJ -NAKpxIsmkOMpPUl80ks8bmzsqztmtql6kAgSwSW84vftJyNrFnp2kC2O4ZYGwz1+ -8rj3nBrfNwKBgQDrCJxCyoIyPUy0yy0BnIUnmAILSSKXuV97LvtXiOnTpTmMa339 -8pA4dUf/nLtXpA3r98BkH0gu50d6tbR92mMI5bdM+SIgWwk3g33KkrNN+iproFwk -zMqC23Mx7ejnuR6xIiEXz/y89eH0+C+zYcX1tz1xSe7+7PO0RK+dGkDR2wKBgHGR -L+MtPhDfCSAF9IqvpnpSrR+2BEv+J8wDIAMjEMgka9z06sQc3NOpL17KmD4lyu6H -z3L19fK8ASnEg6l2On9XI7iE9HP3+Y1k/SPny3AIKB1ZsKICAG6CBGK+J6BvGwTW -ecLu4rC0iCUDWdlUzvzzkGQN9dcBzoDoWoYsft83AoGAAh4MyrM32gwlUgQD8/jX -8rsJlKnme0qMjX4A66caBomjztsH2Qt6cH7DIHx+hU75pnDAuEmR9xqnX7wFTR9Y -0j/XqTVsTjDINRLgMkrg7wIqKtWdicibBx1ER9LzwfNwht/ZFeMLdeUUUYMNv3cg -cMSLxlxgFaUggYj/dsF6ypQ= ------END PRIVATE KEY----- diff --git a/glance/tests/var/certificate.crt b/glance/tests/var/certificate.crt deleted file mode 100644 index c8e9d3f6..00000000 --- a/glance/tests/var/certificate.crt +++ /dev/null @@ -1,92 +0,0 @@ -# > openssl x509 -in glance/tests/var/certificate.crt -noout -text -# Certificate: -# Data: -# Version: 1 (0x0) -# Serial Number: 1 (0x1) -# Signature Algorithm: sha1WithRSAEncryption -# Issuer: C=AU, ST=Some-State, O=OpenStack, OU=Glance, CN=Glance CA -# Validity -# Not Before: Feb 2 20:22:13 2015 GMT -# Not After : Jan 31 20:22:13 2024 GMT -# Subject: C=AU, ST=Some-State, O=OpenStack, OU=Glance, CN=127.0.0.1 -# Subject Public Key Info: -# Public Key Algorithm: rsaEncryption -# RSA Public Key: (4096 bit) -# Modulus (4096 bit): -# 00:9f:44:13:51:de:e9:5a:f7:ac:33:2a:1a:4c:91: -# a1:73:bc:f3:a6:d3:e6:59:ae:e8:e2:34:68:3e:f4: -# 40:c1:a1:1a:65:9a:a3:67:e9:2c:b9:79:9c:00:b1: -# 7c:c1:e6:9e:de:47:bf:f1:cb:f2:73:d4:c3:62:fe: -# 82:90:6f:b4:75:ca:7e:56:8f:99:3d:06:51:3c:40: -# f4:ff:74:97:4f:0d:d2:e6:66:76:8d:97:bf:89:ce: -# fe:b2:d7:89:71:f2:a0:d9:f5:26:7c:1a:7a:bf:2b: -# 8f:72:80:e7:1f:4d:4a:40:a3:b9:9e:33:f6:55:e0: -# 40:2b:1e:49:e4:8c:71:9d:11:32:cf:21:41:e1:13: -# 28:c6:d6:f6:e0:b3:26:10:6d:5b:63:1d:c3:ee:d0: -# c4:66:63:38:89:6b:8f:2a:c2:bd:4f:e4:bc:03:8f: -# a2:f2:5c:1d:73:11:9c:7b:93:3d:d6:a3:d1:2d:cd: -# 64:23:24:bc:65:3c:71:20:28:60:a0:ea:fe:77:0e: -# 1d:95:36:76:ad:e7:2f:1c:27:62:55:e3:9d:11:c1: -# fb:43:3e:e5:21:ac:fd:0e:7e:3d:c9:44:d2:bd:6f: -# 89:7e:0f:cb:88:54:57:fd:8d:21:c8:34:e1:47:01: -# 28:0f:45:a1:7e:60:1a:9c:4c:0c:b8:c1:37:2d:46: -# ab:18:9e:ca:49:d3:77:b7:92:3a:d2:7f:ca:d5:02: -# f1:75:81:66:39:51:aa:bc:d7:f0:91:23:69:e8:71: -# ae:44:76:5e:87:54:eb:72:fc:ac:fd:60:22:e0:6a: -# e4:ad:37:b7:f6:e5:24:b4:95:2c:26:0e:75:a0:e9: -# ed:57:be:37:42:64:1f:02:49:0c:bd:5d:74:6d:e6: -# f2:da:5c:54:82:fa:fc:ff:3a:e4:1a:7a:a9:3c:3d: -# ee:b5:df:09:0c:69:c3:51:92:67:80:71:9b:10:8b: -# 20:ff:a2:5e:c5:f2:86:a0:06:65:1c:42:f9:91:24: -# 54:29:ed:7e:ec:db:4c:7b:54:ee:b1:25:1b:38:53: -# ae:01:b6:c5:93:1e:a3:4d:1b:e8:73:47:50:57:e8: -# ec:a0:80:53:b1:34:74:37:9a:c1:8c:14:64:2e:16: -# dd:a1:2e:d3:45:3e:2c:46:62:20:2a:93:7a:92:4c: -# b2:cc:64:47:ad:63:32:0b:68:0c:24:98:20:83:08: -# 35:74:a7:68:7a:ef:d6:84:07:d1:5e:d7:c0:6c:3f: -# a7:4a:78:62:a8:70:75:37:fb:ce:1f:09:1e:7c:11: -# 35:cc:b3:5a:a3:cc:3f:35:c9:ee:24:6f:63:f8:54: -# 6f:7c:5b:b4:76:3d:f2:81:6d:ad:64:66:10:d0:c4: -# 0b:2c:2f -# Exponent: 65537 (0x10001) -# Signature Algorithm: sha1WithRSAEncryption -# 5f:e8:a8:93:20:6c:0f:12:90:a6:e2:64:21:ed:63:0e:8c:e0: -# 0f:d5:04:13:4d:2a:e9:a5:91:b7:e4:51:94:bd:0a:70:4b:94: -# c7:1c:94:ed:d7:64:95:07:6b:a1:4a:bc:0b:53:b5:1a:7e:f1: -# 9c:12:59:24:5f:36:72:34:ca:33:ee:28:46:fd:21:e6:52:19: -# 0c:3d:94:6b:bd:cb:76:a1:45:7f:30:7b:71:f1:84:b6:3c:e0: -# ac:af:13:81:9c:0e:6e:3c:9b:89:19:95:de:8e:9c:ef:70:ac: -# 07:ae:74:42:47:35:50:88:36:ec:32:1a:55:24:08:f2:44:57: -# 67:fe:0a:bb:6b:a7:bd:bc:af:bf:2a:e4:dd:53:84:6b:de:1d: -# 2a:28:21:38:06:7a:5b:d8:83:15:65:31:6d:61:67:00:9e:1a: -# 61:85:15:a2:4c:9a:eb:6d:59:8e:34:ac:2c:d5:24:4e:00:ff: -# 30:4d:a3:d5:80:63:17:52:65:ac:7f:f4:0a:8e:56:a4:97:51: -# 39:81:ae:e8:cb:52:09:b3:47:b4:fd:1b:e2:04:f9:f2:76:e3: -# 63:ef:90:aa:54:98:96:05:05:a9:91:76:18:ed:5d:9e:6e:88: -# 50:9a:f7:2c:ce:5e:54:ba:15:ec:62:ff:5d:be:af:35:03:b1: -# 3f:32:3e:0e ------BEGIN CERTIFICATE----- -MIIEKjCCAxICAQEwDQYJKoZIhvcNAQEFBQAwWzELMAkGA1UEBhMCQVUxEzARBgNV -BAgMClNvbWUtU3RhdGUxEjAQBgNVBAoMCU9wZW5TdGFjazEPMA0GA1UECwwGR2xh -bmNlMRIwEAYDVQQDDAlHbGFuY2UgQ0EwHhcNMTUwMjAyMjAyMjEzWhcNMjQwMTMx -MjAyMjEzWjBbMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTESMBAG -A1UEChMJT3BlblN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEjAQBgNVBAMTCTEyNy4w -LjAuMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ9EE1He6Vr3rDMq -GkyRoXO886bT5lmu6OI0aD70QMGhGmWao2fpLLl5nACxfMHmnt5Hv/HL8nPUw2L+ -gpBvtHXKflaPmT0GUTxA9P90l08N0uZmdo2Xv4nO/rLXiXHyoNn1Jnwaer8rj3KA -5x9NSkCjuZ4z9lXgQCseSeSMcZ0RMs8hQeETKMbW9uCzJhBtW2Mdw+7QxGZjOIlr -jyrCvU/kvAOPovJcHXMRnHuTPdaj0S3NZCMkvGU8cSAoYKDq/ncOHZU2dq3nLxwn -YlXjnRHB+0M+5SGs/Q5+PclE0r1viX4Py4hUV/2NIcg04UcBKA9FoX5gGpxMDLjB -Ny1GqxieyknTd7eSOtJ/ytUC8XWBZjlRqrzX8JEjaehxrkR2XodU63L8rP1gIuBq -5K03t/blJLSVLCYOdaDp7Ve+N0JkHwJJDL1ddG3m8tpcVIL6/P865Bp6qTw97rXf -CQxpw1GSZ4BxmxCLIP+iXsXyhqAGZRxC+ZEkVCntfuzbTHtU7rElGzhTrgG2xZMe -o00b6HNHUFfo7KCAU7E0dDeawYwUZC4W3aEu00U+LEZiICqTepJMssxkR61jMgto -DCSYIIMINXSnaHrv1oQH0V7XwGw/p0p4YqhwdTf7zh8JHnwRNcyzWqPMPzXJ7iRv -Y/hUb3xbtHY98oFtrWRmENDECywvAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAF/o -qJMgbA8SkKbiZCHtYw6M4A/VBBNNKumlkbfkUZS9CnBLlMcclO3XZJUHa6FKvAtT -tRp+8ZwSWSRfNnI0yjPuKEb9IeZSGQw9lGu9y3ahRX8we3HxhLY84KyvE4GcDm48 -m4kZld6OnO9wrAeudEJHNVCINuwyGlUkCPJEV2f+Crtrp728r78q5N1ThGveHSoo -ITgGelvYgxVlMW1hZwCeGmGFFaJMmuttWY40rCzVJE4A/zBNo9WAYxdSZax/9AqO -VqSXUTmBrujLUgmzR7T9G+IE+fJ242PvkKpUmJYFBamRdhjtXZ5uiFCa9yzOXlS6 -Fexi/12+rzUDsT8yPg4= ------END CERTIFICATE----- diff --git a/glance/tests/var/privatekey.key b/glance/tests/var/privatekey.key deleted file mode 100644 index c7e4cd1b..00000000 --- a/glance/tests/var/privatekey.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEAn0QTUd7pWvesMyoaTJGhc7zzptPmWa7o4jRoPvRAwaEaZZqj -Z+ksuXmcALF8weae3ke/8cvyc9TDYv6CkG+0dcp+Vo+ZPQZRPED0/3SXTw3S5mZ2 -jZe/ic7+steJcfKg2fUmfBp6vyuPcoDnH01KQKO5njP2VeBAKx5J5IxxnREyzyFB -4RMoxtb24LMmEG1bYx3D7tDEZmM4iWuPKsK9T+S8A4+i8lwdcxGce5M91qPRLc1k -IyS8ZTxxIChgoOr+dw4dlTZ2recvHCdiVeOdEcH7Qz7lIaz9Dn49yUTSvW+Jfg/L -iFRX/Y0hyDThRwEoD0WhfmAanEwMuME3LUarGJ7KSdN3t5I60n/K1QLxdYFmOVGq -vNfwkSNp6HGuRHZeh1Trcvys/WAi4GrkrTe39uUktJUsJg51oOntV743QmQfAkkM -vV10beby2lxUgvr8/zrkGnqpPD3utd8JDGnDUZJngHGbEIsg/6JexfKGoAZlHEL5 -kSRUKe1+7NtMe1TusSUbOFOuAbbFkx6jTRvoc0dQV+jsoIBTsTR0N5rBjBRkLhbd -oS7TRT4sRmIgKpN6kkyyzGRHrWMyC2gMJJgggwg1dKdoeu/WhAfRXtfAbD+nSnhi -qHB1N/vOHwkefBE1zLNao8w/NcnuJG9j+FRvfFu0dj3ygW2tZGYQ0MQLLC8CAwEA -AQKCAgBL4IvvymqUu0CgE6P57LvlvxS522R4P7uV4W/05jtfxJgl5fmJzO5Q4x4u -umB8pJn1vms1EHxPMQNxS1364C0ynSl5pepUx4i2UyAmAG8B680ZlaFPrgdD6Ykw -vT0vO2/kx0XxhFAMef1aiQ0TvaftidMqCwmGOlN393Mu3rZWJVZ2lhqj15Pqv4lY -3iD5XJBYdVrekTmwqf7KgaLwtVyqDoiAjdMM8lPZeX965FhmxR8oWh0mHR9gf95J -etMmdy6Km//+EbeS/HxWRnE0CD/RsQA7NmDFnXvmhsB6/j4EoHn5xB6ssbpGAxIg -JwlY4bUrKXpaEgE7i4PYFb1q5asnTDdUZYAGAGXSBbDiUZM2YOe1aaFB/SA3Y3K2 -47brnx7UXhAXSPJ16EZHejSeFbzZfWgj2J1t3DLk18Fpi/5AxxIy/N5J38kcP7xZ -RIcSV1QEasYUrHI9buhuJ87tikDBDFEIIeLZxlyeIdwmKrQ7Vzny5Ls94Wg+2UtI -XFLDak5SEugdp3LmmTJaugF+s/OiglBVhcaosoKRXb4K29M7mQv2huEAerFA14Bd -dp2KByd8ue+fJrAiSxhAyMDAe/uv0ixnmBBtMH0YYHbfUIgl+kR1Ns/bxrJu7T7F -kBQWZV4NRbSRB+RGOG2/Ai5jxu0uLu3gtHMO4XzzElWqzHEDoQKCAQEAzfaSRA/v -0831TDL8dmOCO61TQ9GtAa8Ouj+SdyTwk9f9B7NqQWg7qdkbQESpaDLvWYiftoDw -mBFHLZe/8RHBaQpEAfbC/+DO6c7O+g1/0Cls33D5VaZOzFnnbHktT3r5xwkZfVBS -aPPWl/IZOU8TtNqujQA+mmSnrJ7IuXSsBVq71xgBQT9JBZpUcjZ4eQducmtC43CP -GqcSjq559ZKc/sa3PkAtNlKzSUS1abiMcJ86C9PgQ9gOu7y8SSqQ3ivZkVM99rxm -wo8KehCcHOPOcIUQKmx4Bs4V3chm8rvygf3aanUHi83xaMeFtIIuOgAJmE9wGQeo -k0UGvKBUDIenfwKCAQEAxfVFVxMBfI4mHrgTj/HOq7GMts8iykJK1PuELU6FZhex -XOqXRbQ5dCLsyehrKlVPFqUENhXNHaOQrCOZxiVoRje2PfU/1fSqRaPxI7+W1Fsh -Fq4PkdJ66NJZJkK5NHwE8SyQf+wpLdL3YhY5LM3tWdX5U9Rr6N8qelE3sLPssAak -1km4/428+rkp1BlCffr3FyL0KJmOYfMiAr8m6hRZWbhkvm5YqX1monxUrKdFJ218 -dxzyniqoS1yU5RClY6783dql1UO4AvxpzpCPYDFIwbEb9zkUo0przhmi4KzyxknB -/n/viMWzSnsM9YbakH6KunDTUteme1Dri3Drrq9TUQKCAQAVdvL7YOXPnxFHZbDl -7azu5ztcQAfVuxa/1kw/WnwwDDx0hwA13NUK+HNcmUtGbrh/DjwG2x032+UdHUmF -qCIN/mHkCoF8BUPLHiB38tw1J3wPNUjm4jQoG96AcYiFVf2d/pbHdo2AHplosHRs -go89M+UpELN1h7Ppy4qDuWMME86rtfa7hArqKJFQbdjUVC/wgLkx1tMzJeJLOGfB -bgwqiS8jr7CGjsvcgOqfH/qS6iU0glpG98dhTWQaA/OhE9TSzmgQxMW41Qt0eTKr -2Bn1pAhxQ2im3Odue6ou9eNqJLiUi6nDqizUjKakj0SeCs71LqIyGZg58OGo2tSn -kaOlAoIBAQCE/fO4vQcJpAJOLwLNePmM9bqAcoZ/9auKjPNO8OrEHPTGZMB+Tscu -k+wa9a9RgICiyPgcUec8m0+tpjlAGo+EZRdlZqedWUMviCWQC74MKrD/KK9DG3IB -ipfkEX2VmiBD2tm1Z3Z+17XlSuLci/iCmzNnM1XP3GYQSRIt/6Lq23vQjzTfU1z7 -4HwOh23Zb0qjW5NG12sFuS9HQx6kskkY8r2UBlRAggP686Z7W+EkzPSKnYMN6cCo -6KkLf3RtlPlDHwq8TUOJlgSLhykbyeCEaDVOkSWhUnU8wJJheS+dMZ5IGbFWZOPA -DQ02woOCAdG30ebXSBQL0uB8DL/52sYRAoIBAHtW3NomlxIMqWX8ZYRJIoGharx4 -ikTOR/jeETb9t//n6kV19c4ICiXOQp062lwEqFvHkKzxKECFhJZuwFc09hVxUXxC -LJjvDfauHWFHcrDTWWbd25CNeZ4Sq79GKf+HJ+Ov87WYcjuBFlCh8ES+2N4WZGCn -B5oBq1g6E4p1k6xA5eE6VRiHPuFH8N9t1x6IlCZvZBhuVWdDrDd4qMSDEUTlcxSY -mtcAIXTPaPcdb3CjdE5a38r59x7dZ/Te2K7FKETffjSmku7BrJITz3iXEk+sn8ex -o3mdnFgeQ6/hxvMGgdK2qNb5ER/s0teFjnfnwHuTSXngMDIDb3kLL0ecWlQ= ------END RSA PRIVATE KEY----- diff --git a/glance/tests/var/testserver-bad-ovf.ova b/glance/tests/var/testserver-bad-ovf.ova deleted file mode 100644 index bb89a2ea9773cfdb5a4ebb01d0333e54cd3da17a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10240 zcmeIx!H$A35C-6$eTv4rq*H3Suo!jkUiJlubXj7d8`>rN_}kvpnAMn=X!7?mDKn7r z!QWYH+S)FDdz-oGlRaXIxU^C%7AltFxTcm6g=QqRmQw0G&5kv`Y|2&q@_aI*@7(Y#+agWx2J^ zcSD<+)$DcN6DPea5R4DrjPVCpG@U#67tE+SFmE|LMM8hbg!95L5{QK3rZ}O*sKc}tMyMHq8 z|N7Iv@v~*NPv5cjMx$vo8=ZFs8Vtke7_e&?t!AtJPHTLVXbMi_Fd~7bz5Df764K9) zh2HF^<3-dZuA8;|A^y+$@p0>Fpg(kc@4RN5G-^Lyd}ockd+Pc-sv#-Pa=jYt&s^fI z&ui4H-_2{R!3)o8n<(0y>3X=_&@Bm1>}|9<@q@Jv4>}d@bVHb*q4l*x0utKKk9kiV38I*| z^QSPP+XKEILnn>>;o;%rp~=B98x2GMH2E;+sMQ@0+2}248qvak%niFwra^Ox?UpPz zjz?=3-)Wk4LsvAQ@DQ?}Xt;9Rgv3oifq0abd);=z&&J99*8W`MEIR;q1#6?Gb60xa zmo$7Hy~|qSKh`P>VxqYmUczc4YaPtf{5*F-d65A)k3k z(CtJ|=e2IT+v}nBdu9?7b2LU=h!X!N8d;EtO-9RIto&e0qC8aF2NKYerN7l#?FK6X zRP=C1m(I#bav;>%@=rF=*8OuB1(a;3yU5u(f2Vd$y9dZ;am-+8o-URxYA)(#r?afL zR%B6Mbb8A=ZFOk7Z7)fyk)p;{rNCre)9Aqu)F!Z!Am^_UhpTys#c{xdE52ZlZ0ULJ zx_`@g=Tr}AxC|UD00e!=`%bu6Jb?=*)W~k5c}dMsF?ahNoBRv)9U2$055#l6`hOnl*)rkPPy3o5t>_zb}r>z6)hbd=%I=_7q`(HqRi=BY7`XFwIN>j!wwxTNZ!;2EV z0=)MmuvvO4nZh~bs}wfcMi^-#fGzc+P(!L~HDsh94`z3kp6kov;>~JuUB}x64OoVX*fb0e z#dy`Q;6r8S(9~*wVW>iYAtq9q3dZtL$Z=^3^IT(LfZ*omn1H_M=|WCXY!|~&cM7Nn z5zd~%cb*f47ZbWBmrqCv5lykb5RfFOZJ65Rk|DjNTR%YZmgQ)|?X@{}dNHUZG0#j{ zjvJ&W53nRR*qylfYq&vtV$>I~W7`1@Lss5={=WGA3=57;2fh~tKK9~3j*%1<#;7Qd zha)W;DCSZP3S(7_56-C?6viru%Zyi=3dR!edA#5?G8>OS6}gao&kW_`+Ar`|rbqMyvExZ~B^v&(ZaXXe@fhhH(KK7 zrW_NU(oD$wre5fK&TmGuBey%rinJ(TAiFu0O<8NXRxzUdBO#?s7StQ4jDlDmeO|%j z(hmHsT)CvTuws?1!LWWaooK_rxtFu4-Gz}VX}Ouyj=~tFh`LAF6cr$!J>qAD02sPio%p(pBo>13dEN!#K0-P#M z$7WP*3vLuv47r;qZ3SzIt=#KI2K0LoYO(K`p`4ahWrZEIT>SnV`m&7E5cDujV zv_xymG>e*+h(qVKR0yhRp+|P%#*eOi*QUXFZ7zOeHkX9aJR%VtY@nM%wlf<>yWYS& zom-=MX134T?Z0J87n9t%4m9w$JC{aO6ChmrkD5@?w59K@oOR3;FXR_gR>dX}LoKGf zCyoRc*61+`2vfWj$s=uJ=P$9;pYF!`2LwHE{gC##CI>x~xG`d%`42Rhu2zXk%yc2> zO)ok5iK%n!P5V7$*wjtOZ2Zt_G)jB9W&*GKS5Dx}4;{Ab%vsm`!GuHZv@N9ABcV+k z&$;z|_mTvC*I9c^8A}~5{0OI?Qo+s1J)Pjh5>dM!-QaI$3;AAv^+dyRPB#97642rC zmY%{a`LkqQWQlI3i3DDtJRwsM6mE8E8nSce#<;iX8McRa_Y0 zoS-gY&-5DXQhP?$P;p-|?Bi@ae1-Ylrx%T9{ZZDRtd3t|Bss7b^9K^FDMF@UHtNVi zuYBAdW!8IhSURwlv|vJ^Fx~VgEcMT85N2u`oAN47Zrb)eyaysPA4LKrv9-)EZy5(j z%D>R3g^%Mfif?YpA1W!<``PIBrQik|XBXm;6hShD7YG{7-1tl1kFwwq{xrZQrtL(b z@$9kby|cvVE8Gk2p48z7?6FvBKm0g6V%*G(SsJoa%#jhTY{a#XyDx=oOV2Nc-CONk z@}zxCxnNXmag2}rV2~CESuQCAEbP+Fp{x)sYcz)+Av*c8N1c#&uoGJxP__LAB4b45 z^l3#U#A_suHtey7OPttsYsqiy9e#{rZR5F`Th!%vI?oZ*AI!!X;|G#=JnhfMgBMM3 zN#tuQV$(5hez^<&Ysw->e&TS)u)9TE=e1l!;GE$+pId z=jMHexU#D6{2;=2276TU5cNXbmM+Lt7h~lK#v`#-=UXsbh*;n<{Q6iU zo4zjEa9G5fH9ebFWynAg9VeqmSjoZA#sy z@K`=x8H4`F=v-A~$PFs_`-`_JI7mJs%uc)3UH>L`lDB3!KWqE1Hsa_$o?hYuXuP#N zHdo!6Hop1!yoM(aJI%8fl=B)+cMu(!Mvbo{bLV;0^MNyU=Gja=A8@ser!9we8~V>;;WeRT7S?(ec0?C4DUokd`-BF_&ujmG{u*A z>*jWV7bJWlIt}h_2atqr2Wt4%e1jWZI}w~ig%NAuIy{aWneXts26GYz@k zlpnNbXN|V83(dxc6p$Ouu8(ghw2h#+E2%)~2Zvi+vKAkeT@rjUj?cyU0&}88Vm1SQ zG>Dx1D}1i+NxZzv%M9rFI$H@7`SnC=_tr7AlkqTBeyR5h`?j8`$@nCEyMqMRYBI^8 z*)yAX2Wm8%kchOEOV)TNR&lp<9{zq@mM%qJiIg>m1crGt5k=7<_B+Kg@ZpwJHu$Ge z&^58M2qg`t9*6*ZbyJ=xQWRloks^}F5ZDH2^`nR-m7P$|rG{vOlv!`&>%LG@n3f1t zGD!4nsW9eQ(f1aW&Gqb>*w;65QS9@UV`~zdW?@kpplM8SaYBP6v2aPju;E$|>zli{ zEhuzh$3&NLa2U{JeTRlMb8=DaGQ z%-rLn+hUr@&I4WYA1>(p%W84C$L!O)K=}zT?nA^sE4ldY5Akntszp=?R0vcER0vcE wR0vcER0vcER0vcER0vcER0vcER0vcER0vcER0vcER0vcER0vcEd?^C|0eOO8#{d8T diff --git a/glance/tests/var/testserver-no-ovf.ova b/glance/tests/var/testserver-no-ovf.ova deleted file mode 100644 index e251d16da7df7bbf20dabc748b598cb2bd70dbc1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10240 zcmeIuy$!-J5QgEanIa4TvcXQz&mxxMBC(-}O_Y+A7=?{+L>sy^5%1Ued^&x4vQIhr ze(?RSX;aYq;noCMHmamHMxN8z*Y;hW=3r}?=HFP+WuUcD>S#o1YgH|kTDHdgwMWXX zuPW&}mwXuKm&&xgXCGaRKNr^daXMeFn{`&FH3A4AfB*srAb`=7laY~8LDaxH zWMmh*NWU+h2mT~RvcU`ZI{!jL6-;(a`ju3Zfg~flO$Gr!(DjU48ucz1-;5vM?w`x} z#dq!|Bb+ZBk_Jys;=1XrUP1>rs~^Yw#hcQ1xx z8N3o~|26Cojv!{O>JIIxFWcnG$?L5+_#PnV>8n`x@GoUURnGw{Ts>U@f{-b*k2k3B z@2tEPR8b7(1q!1}?{aHJu0GnB;{_7s$sK&+}?AF1TvCf;_TU*vqvzB1C4H^9UF6Yf`-wYiIP6HDx z4R2{}M%}$FclYj~dwiZh3n-il~`c`?fB6&upW*s#?Qn0;;O1UIoO&HaDyv)$J6R>hF^v`@KB(5UPl=mWd)FSqm~e2e@w`(`Y`tGkAXP9C)G zde_qmaqQ4HV5$A!1slUs>{F?(9)_kdp?QIEGqv&i@i&hahgpx;#%5%JBseb6lhz9m z>0^<|t9$mU-8hDrmbMexpWeT=9cmPs_|k4%X%nXGAXLYlqFXcm zk<~Rb!M}#j%p}Bol4Fbz7*9;6j+olo|&_VQ4Qj(Sx9hdm-1ijQ)TmgWe@@6A`WrZm4u_OTZDA2>@BVIs8Z}bGcmLl zelc$fh}zoZO8x zl-W8+MK~EAzBOO}{S;F;*BFztlWUX+Ur=rJQ3RH`20FnO4~%OWK`*wKaxMG5XX@DtY#_wAmyQ;$hapMo~B+Ef#ts?)|y4`%o{pTYqh#egldp( zQ^MOGY|Hpj_a~NPcXTJ|FiNL~VceRC6yT!~VE%qEpe`#GYwcnsmRK^FI;+#HH^s

JL>U%8+FVv{*!~ga z!hmSjsbR>{GFn*me?Hf93^#z1Run#6QESv`YL8N{uPqH?X80k|(+8>@ezm!7=VbB0Jrz@;!M3p;NZG}(Z%nnW{&Jc*uhimcu8dzlW zv+5T)ej#;|p<5?QNGks?6x<4$x_^+XKbfaJ*V_X;ebqO!?4F*CLYTx&U9MxJ%>zRQ z(4KkC0zy4Dx3@#RBd_KA=NN6?IU3X)Z|Q4-l((&yk7j-{G^lVrqV~H@zk=B;{TVW{ zR!v3VL;*bma!e8lv0_u#A&I#$ZRrbc8f$GC!m*JR{DMt?Ts@u{`bO(9HCfNSx|@>t z9Sm)qGS3f#ek91vo8!MEv~4A@=@+>Z-1=UC^RJj|WuOnH>I!Z8ms@@0$;h7FyhH%d z=6D(N(s^|@JSsIpqdz@j1N%}um%)4oXVa5Co7*5|4x`pIVnJ^a>7xi5w1;(SMMZ|J zY~Fob6@Lfl|HX<6wj2kL%eD^})=ayok{5aftCAl< zP|iQJGU)gAPR|g$!FN*!x~a-C?JvONgHJ5Xbu?y!6If8j zqIz(L(kgxHq2haG*kixpCs1Es^vP+!C#P98+V)>q*a-SnaX`hlX8s=-2<++70V~Mr zz7Bf1i3>^S?e}&#h9JbO2Bo$dLrkrzyqm-zMiq}r^SL%^7O;gw7n;>qVO7n#>G)4q z?AB$>;0u_1fw^4(YDiTQ;wDNSpY|{aThyB!$wbZ9J=XVw*lNUUtjE-uJNRKTN0e9HNAJV^;<|)j^=QLr`t8;-7zMPnzJC8SI!zo0) zw9^=t(=v;{ISef{V{%q&%C}X$Nhuv@{wE^-w_axwPtBkGjn-GqNJiHP{f$;`Hv_-H#$#ZFvS@D1H;e3lNVIse z+cmJ^iKwtz_#J(;q~w51Wx3^jd^qebcUZ^YKL})LJCf9f9E0}$KXL}Zw+5e+eg;KG zzA`0DC>-U|WR01w=ESr}WUsICNM#_{A|TM2b@hbB+$Lh2gq>Fz%tTI?71M%Jo>7Dz zdnlLQLjJ0mGaIY4=|8S+n*)+FMnQ^P{XUor=`G<; z!Jl+%dimzApg5PSIUzH#hn`@6d(9?#5FFOQ#@kua8ywuS?LDJhV|^SA_sh|v#Qq5Z zxZrQ?iK>bF`9IUIBfFtDNv42L24GQGzLAcRzJ>E%NK7lQ`#L?679yYOtish9o*Seq zc!Rf-bMuSot|?4hGuuy?Tj(R^7H{x=w7=W%%INX4lO9wZMxyHgWG2ELU^tigsqWJG zse{;@2F9pKtWQn@`509&1MhxN!bVeMLdz}$VOlrbb5z4Q$L(dGsfjd3Pgf-4yRzi( zEf0Q)G+Tht=$B@v!-O`c8`*1wbY*nRkMc=8w;i=+3xujljZ{H9=SBfJa*!kirzvzKn~=Q$}@kJ@O4btzRxUgMyL>bSGUIexUgQiGrRNA6jl|y zSQk05++G!BJQC$@x4P*F&12==hZlKR;u+wk%c$dh4H=JPXseejP_CmVg;X6XV{h#L zqa~(7bY4SajUGtQ)G9rYB1zwB@e~@2alww$q3PxpBQh>(`h9Yd0u~Qhp2sSs3C8Hj zp-Z%C`aPmJ_Qn#1BslGo=kT1z<95IjH8ZlTK(fL}!YV-I=W=4}hg}vXp}gmktiQ~P z>C7%q!br(lwV~`W^$sctOGjC?>VWeP*kHRG5WzM;=OX5^7ZkOGCG^I}vP}Tyex%c{ z*|V*?-clk=jZ&^zSLFa87B@yp(nQJTzoLhwO1kDh8r^wkGBvKA@ z-me@G5hc^_*-I4!HhQv}pX?F}Lrf0`?=L@sQYqvA6wVoyFT?j%1y?Tt&-YLwfnn?k zjs0NJde_gu)^S&^1o7Bj;$GP6JcG`b{(*TRh@#M3XxOc zCPEH#&|MgkU@WaGr)Thy0ITrJJh+2VR<@4QV_EQBZ!TXM+VqIra0E~{%tNA5hvCtF zv-`u`*|W&nl4}ZULqzG?YtUJ9F6w%W-SeSEH2ZQ9sVt9P!R*zY9|M}i1!Eiw;CK@m zX)K=$=fpNQ-QU)4-87#;tFt7eTjaPuBP>JhM@tu1OHw1(NTfY{o3v8Jp^L%&I`gSU zK+gYX(FbfCeV>R83bd@*Z-T z`Md|o%GE%aF4O4vx^&M*a_osf0B2R6v87k!7jxv8BVfBRy6_Nk^~PD;qH6pl;2I@1zFbTBCpMJ?z4a-)l0@ zM5f@DJS+V5o)!&(#h09#l4P^n@y;kB{2;SdZQn zJY$o*l%&?-{8)U|@mADHBwvv_(qF+Ui;bpTqSF?YenxrOcWQ;oQ&5m)q&N-P^hX;@ z>2B!_aX}C*Y+Eg=$hPtl{R|AxlZv%>A%2@(8;?r&dVy-t8kgTHdtr%w#kL z_c45HEw+ZUMc;2+AnTBwQkn@ZTv$jR8siMpAoj9_*O-hmI3T4Yb<242!wJgxLzGYY zw!40syj1SdQ!YM6M4>)4zpk8Q`NsVC5&ord?G)e}^GQ|oxAxKh5I_IX@5si2zR@23 zQr0q6v$mh#P`K4>+;1qT|~EU_6SXyL_rS`Hz;>F}!T3_q`-#V^*SuZM&` z$z5n_Q5G^SJSwlNwi_<^NNTX!q;hg>fxGsk!}hs*T8Can=l?75jY28Ncz#Ig*_mbf zyfj>HbaxFk-9?(Z!MgJ_oh=Q$as(a{qndjo5XK_9EBU<^-VVdOn`kTiHhf_ud8Vst z_(v9;@Sd;lSpS4!Ph5+3%&7)C{D|6_)bUwU*9Blcp?m*RO&Js!-czEWG1JY7nf^rF zdz6+7b$+QM6K2AUZC&0-N@H}92r8wHL02=7@eE@ zaz*&IR}J)kIr^}e+6m^)BLCsL5&S$^{d^7$EJBM9XaGk!0szYm!#hzB`L8dZtJQjl zrfm0$pp0d759a}=Vu#v7vu`@)$JKQ$JlbSVNS78oG6Ho@3e z|I6X|Cw@msk~z-K_q3QRt_fxIRP;X zNK6ZAp$X2i2?o4Amq%Z8Ys5oh^134e%C(FyY+|um4q|unfZ6An4#S5mg0q6*txKP! zg}20(H(#>a*)%~Qb^0&n04e;`CAb?{lIlWrSn-_SOQ>?r3lZPVsDtzXr_L~aZ%OY(M zAI)cu)aCbMLdF(XtSh{hmOE1IZ)L6IPrs!^GP5@OCG}6QITcOn(E{TyZ9!m=G$BX) zKf3)ZKqt{L&u7y_E(Tk7>N>wdq|O{U{XXZwfD0CvAI$P~B^<6;FTdl0k1C|k!i)UZ z)VHzUPH=1tNYeUY*XHm=p{E8SQUANQBb(54Dn`AkK=pn=P@$b<+16geLgV>t(uW1stj;%*QYn^_l-K`y)6x(fRD7e2pmUp zYV_Y}&xy|$I}zDre?{<*yj~i*{d(lZZ3U1{$%U-`3Y2l|OXi-H?5tSrj|63Beb{Js z&q|J|1Qpg7<~Fai-V*Kij9|C#`usbLl>Y4mpeI!4AFPS>`y}Pw%3Ct`cUo?n9!c%g z(|Y}6vog(d_6b_6)tG@oij3?a#X8f+!*H_@XyF?BTfoGSGZsNi*y}O1yPu+O2QBi- zv0cREHs`slR^%I+Nar%&Fs$kJPLM3R=kwrXHIi+Ji)0-sdNp%2f3#?XZ}x6Y*F$PS zOhnQX>`|SZgT?S}`2hra#!nBJ&)yN!t(DXDb%syU)eL#S@dkhh5z|}Ao1QIUUP3kV zZ59I7nb%{x1%S6VeC7kYmzVA4!oA)+_bn;^<8sMRLIL*?1cdGw%+E52P+P28LRDJIOnlQ z^L&@A(u(q((kxCwG6Xd9r+YizHHJEF<_!G{9v1gg z*ly|oI*(q?clfEDP5y`ps`CoQySXKZeRJ}un`h3Ft&ISVoNS_o#NQdI-u64c6E*f6 zVCzwc9M8i}J}>L!euL$?Py&ikUu#H;1YDlsXt?K4u~+g?UZl?*NY>2$O{4tYSx!8% z3O*CbsoK;Fg2>-bcUZ)i={b9>h5-}eJVP~8Ld~Zu*e5pndAo~sg}(cNujFn66Ua_L zBh}xXqRFd;Z&uhW_IGoabG!ZOT;uE1=7V8p9M(3n7CiKa+EHZ=?WuhwCB8-$Y@#5o?a00rTkO)GF>3ww1)-e*03~HPmV?#S!n<3V*;H186+2c{ ze!>_SgjExPj?6_y+ClWRtewpbY39))X z)thv3u+m&au7H=-Olocnr$t?H2EYRzuQhAsmN&6hLmj<9v1R#1Yf{7TJcv~NvduKE zz33yyZh5iPaRWR+TKie$P-3r~`3$J1B}}>X&)9}0Q`7jtPm#{*k~lz81oVu4nDHYc zn`ql`kl*_PWxYv#{2X!2k6obmD=Eq_Q9w^K2@D>%AGJls$yj-nmM>{I|A_U=DBZLg z)WZsGwq)~lH}w@Qu&|LPC+QI9`P5PeeY_iNFuqUFGB?*Iw?+rWgESs9?BfA`0ER4a zL%|&pSc{&8j?~fKELOFl45aJ`!|{dg_nHeiW8CEjEe00HST<%!M@s{mOJrmj_elV< zs*Te;+H%t$?L&-fIAe``>{RlNV_W*U1!6bE?c1!d6?|FL;kh+#V5kf$4*8b;1WGwv zQF)eLnFt%cWeg`Hcf}oSa-?l1U^pXXif`bAVD-Bz10_d@LM^}iZu^c&8D@vwn{~n+nnV3Ou*A;cW0}?sN)!bo z51uWg`^qS-tZaG)mYY^;-v1lWqLBNktheB0n%Q!ww7M$2nb552=xGs%Ft>*uP{%#@ zz3yPlSou7*dfV}ffeB@=aw^MtVuOftK_zd^OXqB67RJ9XuQhKAPOSl1DtQ?Qw1Byz zoxN;@(Vinot9~KhlG;*t3k2UrpOa5Fp9JUbUh2#@g&A*$*(FHp08*iKGG>m7A-~af zn0Igh2|`Y*+1PEy`U3YYW0c5}T}Z1kL>zz(+-6tNkVV@eB4g$Qq^K4lgS3t2bV0cv zr_OYy&8(Dcp(D5ABYk)U#_g<_3>O?73{Xx7ly%N)T;qFh8O=&aEu;JaAQr}+9Cy00 z9nn}A9)9Dmja`dY@6#VDH*5_$_zHAbDoE{`I|y#{fdCssZWV*}qWLuqsmn{m6jtj1 zf$R4e=uYE_(?O}rnbSWbw*t#JRI`T6A$ifvbsdMw$b;jhQG?I%7JlQ~tEP$h4=CIR>(EDg$Z(XC%nt*cAiF)Lx+3FTMbgpCN1o>m6S2oRSpi=BhV=+A(DEi^_*=~vt@<7k z=fgz2qh6Cf3R28n?QLyK6dSqT5(g|?{v(`o%5hVBF4%sHChAZzO;+404KWz*L0e;m zphv0@SE9Z!4P#-dt|GvvYVh6pGjgbDU(6Q~9HXiumxw=^A=AB(ftdJ^+RFoX*D?_- zcUcjhrWok$1jX| z4-3*}wva#Q38e4Zz3g@-h1@m;@;>%j&{KuI%vlKxAG_b#emjhO8iWYmRwYE7-4;HC zxZaXHPp_Gx@%FA;xl5GeyXGIASk>c%k z|AFsLCds><3vm*HMfzY1`;@JBwDF!wF9NV-F3=Syu{n8OmB`U@m6?`5ej?@p#E5>y z*5Y;22Axa#-YwGUUaz8_DRv7^)c#o>Ik&;Ju=LP1xfC-(Zb)ybI0>1+eRk*e;eE#T zH=w~qee}I2Us@mD-RF;fWV;I(*n)^iYyd?ifT9|qqsg(&Y>-P>ms2R0q83j|e$;@{ zTy@iP7y42z?S;ya1e?$9m-U35nDu{Bgs+b?x-$0cYl>tuF0S74-Tpx*`LX>;`h?ShZ2M+fXk=i8{i76lG^xpT@{Wehc1p) z>sI`dVEy|G#Pykc3KU~f4-XZ)BohdW$GF-t%3g+F_9UCc9mWB9X0Q6^A~>{2WJGK~{cSIS1~g9bel znNazR2iGLEEv$WW@byo+r;!Bo$c2OEtBdFk&PoE`y=LXz{lQ8#O- z_u~WxYZIU-)(>`}NB9aC3yVE@QXt#publF!$$qO=?5r5>fV+S;q$A30=sx%7?F{F- zE@Lf$6gKaES{CsQRt~s`0a%|WCFA?I(MKGTiy`);Qjq9T19u)gr2vXEV3@N#<2hvt zJ#HRMR0Pqo%-DDNhA4A-1a6)6xe_CCQ&)MCvd(A!P5dVkH@tp5_LIbaV{bK#59{*8 zcQ??oxI@_J=ZK%bJ6|r;2Fb?Vt8DcCH3zqytKH!>JYOw&!B zvHS5(SUt%)|6!Fz5^QcJz!PKi9&FrNQ-0>Ps!<{?bL>(-bbjyM7{UIr11T!*Pt>lQ zmAy0)MVNV~Qg6r3Ao|z$2X`oW!^*oEB@fODxh_zW#gh>b@iLW1$iah@b|nj@S8}4M ziHB)k8}7b`2fHF7zKNtKy%uRCC+#_XnMN-!?g@Uz2$4Xa^Nk+Ye}@Q`h*3pQ&_OS{ zfus*F357=x=!^nO11kI5)q8aauT1LF_Q)EF=`^BW#%{f9zchaNe*Hc(jw|=(C&H!T zciR7Q4yTpdCHL3PW}q5Po|I0i!h3(b>$!gfbQNFHIFn0ZhxC-}a>0ynI>q(gj_<79 ze&@=K&APkRDAZZAjB!X%IG+7DmP!im;FUFo6=28Wb{n%DN3gES^gkAKcJoKlLt65P z?y^6LvD35-e>*vSastike-D1p`ZXs!IOgR!UAe1FmHp)4*?|pD`&)Dc<9FG?TTDA? z*?)Z}eiBy%=lbGBMiXNRA3n2EmGC`KMu!bgN2G86L*wB(H04TFe9rN|0Va=nzjL?2 z|IFw02S4N|oa@a4%hY9`rZRG#*Z9+X-IjOqz1j*dZLVTDF6*cfxKLdl74h&mjdJrz zy^9MRuQr8i1Qi`Fvge*t=eALXGY^nuVvXrvRCO6DUa~o3mm%^>`=?F=?U(D8?%P)V zh#~gEARL5P%Pp22^43@(w3hde7bGnDB^}GayB+s>^`MNgjC8>lem%QIPAZ*z4c4g{ zbb7CZjU0J7p{Jy;1$qh#$v}==k&Y?28qHUzh8~Bw>W84nJ0%L97O<9R{VhnHqZdzi z)e!8bApu6bB({{8uTX7|z zy*F5n${XPXyJ-5qQ=ezGro>28(>6vQ3d4mGJTyQSDV-_{4iijoysz?cZE^Bnv%P9d z&KfyDVXHQuiV)LT(&uSPe3OIPn@=>Wp004H*`39pYcx(sI=1-~(MvQE)H!c8w7(l4 zx;aR1(ywgNkRrazYzfP!3<60PWCcpSL9&m^hxgwjE#(cBrj5^F8n_}o5>6;q&e7bF zc8qwI)-a|G?vJl$NDk)OI>W?y7}ADEn9>1prMea~YjM=txRG$&gx<*SBxuEufIZ4d zO(Hw-S7xT)|4N8>-A+Z;Pjc$>EM2lq2VYPwMEo-8P znW5`&g2O)rt$Hd=w6Lpo16}$^|B$CcsX-qkA6U@&a#@;%oO1<9x8wp>==}^Ym~Mp< zZ17SZP6hkD8n@OAU0>`*LtN$9@7I41VYT*?^rMrLvDry(kWCW9$)yf-1+lO*T%6UM z+REmLEJIpwe7t z`fmfe>kRJDB>O>m)Wo?wg(x2>f=Yv|79B2d^E1@F+#pdKETqD32Xv97{&R@)S3A#W zFnwon{rQiiFAC&XwntPoKXeTx7e1zQ%}+9$K04pN40g49;PcSZUpY^Yz>nctc4s&-2jGx8zMtLqeHa+e!HC?VE@ep2k0`l7rTz&{Mg=uGeSwGv~pV@`&j-I9>f0n z9`bTX&nH4FcR0cF`lOUJt#9o=i*nOn(H#v*=zjc+{hmhmEkqsJ3aXcFQt>DA5AH68 z&|syZ@%n%_E~$}cr7w@XU#EH`cFpHVgOXwSxDPGD^Lm~2R8ZVGLA$&1vGr)BOxl6l zGeJu#^mx=$ikKe7?E2?jI!OwRM~W_idfevaE*bN*xO_Ai%D@ema|I?5^R(AB@6E1 zFc>iL8l~dC>!JI~VLkbL6D`Z+pr-^a%N|o<>?M#iAB9{ZU@aB93j=n0JpWOd;Cqj{ zHmJOKKIm7`*1Uh3h)oo}g`-i{OR5S6!GXF}hcF7eB&V9Ik8;gmzJ3s9+i!XEUdR^! z_td{Ocva}ceO4vs^BeCN)z4~<0i{>~w*@==%DGK_De_gfkYs?UYF8BIfsRq|H?RB< z!2x0BLGn#`~Nv0At(TJ*QXl zTz{^H@F3^xGC6|;mZKp%kU{hRrSj`4Lp?9YF0C2l0;$5;t15LL>5`I>x` z%J!T*OZSiSde`Q4(r4dEk`JBr;!U%hC^UiXzKnKZfgjwZ1*;8vaj=x{HjgNc3YW+)-vYISr9 z7)d9kCHp+g)CViv=(gn!C4`8D6JL;%_k(qOo$GKA}{T;f$La)9(ZNuETn|_oi4*QSp>uY6)yg0DBjIL&Q6Ah?lyK=wz`O zx^ALNq9Z|0yx8NUYi=Dmp1385uh-h-vGgCdGs9eZ6k$bwmM_NtIZqp}t){@7snpi- z^6CIiN`Zyz3Wr_zeOi`>-=r-cC|A=?r`_-QL!s2k5lIHy_DFl6@z%$%dq)5tMINlI zn>2_VS;!Kvs6zx;#p|<@XWuGBhH^;fd>x1?CEDH#(HLEu+&6@0Vd`pf+%Z}J=*OngNTCU)HV`f5ES4@On3`&A!3rnmU` z84S4ut^r9~p6jLoUW>5|Eq5W=;^J7Xf}*PUed&p(?(z|R?Bbuo3FP@=Ns1mkw%2`l zL-ci|%(_y0FB7FLzv*P?e6^h{6cRA~J5)X<>+Krk&_fa#OLC{~6>>Oo;8h1Af*`Gm ziwGYM>A<}$O~FRs6?3%#mM)q$r8Y{UGQfV26#WmvmE_*J2$s=M_nGg@BY_W0m~alA z(@K{*aA-Ksa~OB02LB6d;Ge4JUpy0SOXvFv#BzE4Lpl?#F-ubpO%Hbcv3_-*$=NhL zk}Ut-KQjw%x}2P@g!6q6=`ZoekWy+V{q!TY3G&cJavqV2Tivg_^u8uu_7rP8cqDq) zI?YtomeVZ@%=;w5&kJ{8J}&FBLil&mRCU@6==XTuzHO!OL#DnX3U|>Mg4RRrlWJ=q zdGB^;3ohPJl;wMRrcL`kln{LD6JZj6hCC)-jzju`({M$xTN|2CE`c4s+>ynE3q%n^ z2%&qRlGqOPyt$TK3Vl9<)0d9E$S2q7DA2oSyTM4pA^ASNz0EVLpL}B-rvob9!lEQO z$QZfwU^oHQFq)(Q3cfWq z&Nvz2p7PULv6nninD1(3{f{0tj4QWU{jO9C4mbka=qz1vK4-}aF#_JwLp8YkqOA!W zrmQA2{4yHC3Mfr;5S&`Q5EgGY*0_lzYzTW9O}2^Ank>;y@!$+-F(#vPtTvba^4>{> zfZ+a2pWc7Zv;w;ZekiF7Ty|ag)~vDsohQ2|@%$!1&C)P=>=Y+BN3cFMB2H73CX;I= z@Bq|BQLhBaKseEwz;oFhgfhwO?sh!Ld#ep*(>drB>M+~+mY*o+?fDPRY==a9C;^#R zOp`xuP_^a%Zq^}~=tcHpH)Nd`y?y5F(v_LFN$QU3$u1lcWZ)vBN%zWmylu*m2p8U>pNwbQQWs`Tgk+zhtN@o6HEuY&zjW$OPnvva84M zy9IKosyD+4!)G3*NIRx5=({X^EgztNL2=KcZFNYjg7Ujy3PrGTCDl{$d8Xup2b8>B z>6jW95KwwF{VzsE%}xfE@*KRp3wmJ;X_)N3M!jDB4_UgQI%$__OCNC+88iM;AVYht zjo>}$Ihwh(RW^UrJO}`Jt zj&Q(46?^S~FJU=F^6UuBqh}dSU?QC~#mL~Uj2z3fBe>2uZA)GD;#J4*O5bQrWbdw7 zRT>pCmdG`LTzPs>g>qlNJvQ+w;t=GKk}M*;jJV7sDwv`_Ew~v%Pzp|qqg+T_s`!{njY1R+kODBdZ z zKuE{d=ZAjhP4gShJv;nc`F@M8qE;0=e%I6Wz*k!=<(upP$IxdFKs}uO%~uc~2S{q* zoeLg*;>px%5Sj<%DHEfC?@e@n=B+cW4jA9-M(hpqNph%X->@>ppW zGPN$pat9Cn-LHf1pcE44OG&vwkNy_eWj`6hu$dXjI6fp4bj82%B4fA1M~AR}c)LK0 z3oM2?3jbN@Fo$#hZU@8^>av{Bj_dYnUO3^4%v&6wgC0Xb7OAR5CgvQK6S}l>0aU+i zXq<(T-RkYk?J*cE4{no|#g+576yZFQFr938pJ>mq_oyhhVvB!!ZA)U1pF)6l(ox{l zEwvZ$TJL2ssJyT3t~AC}GSlilrq4v^Ud#I3@fa-r5MhyD>#~mNX*TuA80~wszF+6Q zbKPO?WNgM}eIh&ip1i3ob5A0_NPN>ZWJ+t|ct3<9B*Mdg%%s6rkEeoHQEM?R+DYkeEV01-v-_qH298gmp5m3GDjDm)yhA4&143%jh*K z>dCFDagS2kCOTwd#<22?*EVqs&!p#YgsabZ5(WakI$_0EtLk2u!aUPmn zUF|->JK9P41uia4kE0|zV~4V5#+{qH=4DJsSb4H~H~zO=6f14b8~LuwWM8X3h|1!$ z>yawAq(E}Q?rKtWc&@RFlHXQ#7hR7m&FXKD?u?^QG0RH*Dz2g5UR#8CT@P$25=5?Oip1DC1v}}st&Zw9+t_9mUzsyK+ zP1jr?po6*G7qc=6*+#lq#n+DK519gLS34YWiiOd%H#$K=X7xNFN>bBQg z{qObI+$W`wsqYCW)|WH)UJihb~D)}!OP20#LyLU=mjsK zc$d!3s0pzodR*h!Z{zWBg1(LCc_O;QW_ryq4VduNuh4;pf6`l8(1I-1XeAi_eHS40 zf-I)+F^jhTr0dV7FHCeoLC#W^f1-!uJn2|iD8F3FZdPz#c4N-|L%cwblZr!^leeDO z%O$<84-9Uwo(~JoFhd9ZtRkJvue?f+$x9!dlZPJY_1mv^*KRl*Ihc)Yv?;{TmqwGY z6+Uy~59EB^HfHt4jphV124WyA_qZQ5A6<^*;y-h-Q0uRObfSRZIk=N#3Z>kWud#q9 zfTOf^+1PNy*JZhg_Y{AIM)syN6;QbI?L6J~x?^dfNr$7g2$RIToQ`u2J=EC(M5{{s z76D+6DHUt`YdG3wBgf00&srQD`>%21Xpn7M1R=*v>XXa1eaG*w)nylKF_qVNmEB;k z?tI8-u)DmzglaurN{BZ){-Y_>KoS?W(I<4fN%UqCC)W6G#(R+6Dm1}d-Mvb$N?d-U zI`32a-T0b!;02lf1$0XFg=i|{kS;zzw_#21_Fo!3Ut&{2>3LR~wk9&@tPL4w0`+~M zQc~Sf&t7_a{OC{wJ!X;A51YF5YcBuo0Nab3qi!CrxXMC1&@26F+IM>U!a;kXh6hgv z2Y>oWr(eg(g^~z|Es2+RuhJq^|M8Gpi6HE84BhqQJMkM0zLDQ+>V`y*?Kk^Yk32+b zs7YuNIW;wxeB2xz^~|VEZXHP>mm!f|1G(K(isv+%Rfwtt%^N0nKg`W*=7H911FHfD zFON}6s(HLHDu7P20Qya-sj}x{Cbt(!$W+ z5P%VTg!r_r&LMfQ;faJ^0`W$hlxAv#rC1uKt-i>=>-1>iw!H#GXWX!2dDtvr{uK%F zgQ~Ir00)oan|t;*KXbFnpz}%C_OCbU)W`yXq(3U;}J<4e{*$i?spooUcZ`-Wr=o z+HdUs9B-bKx<=FRrdV{G5r|kp6OI&t>)S14{&j(ctn^RP`zf_6{7w*+%+Ah<*(r^Bs^V9?dR_NbyfQ|Tt#2?mRSbv zR~w=q=&fr1kePbM_|Y-2?FNP~iK|Zg`OVRv6_TgJ^&Btmw59fM-ur56#~nrp|5S!w zp*MUy@o0wB2)2@SRkX5ubsv}SXZ5`|e04|t@gw!ex*S=L^acl|M~|f>7S-2EVMeUS zW)}Y*W!gUuNP+>>%%qZ7Rx9Pp$PKS%_qbikQITk$be>wj!LD97Ab^L9^~~idV91qKRVlwTRN*B!;K zby{nJ^BZo*TWx5l>`pp@FKk7n`b(zh7nl$VSIixHyA6VPaL@BO`e{^MlP$9DTclDz z8Jixu)tGS5AV72Z%8uY^X5e1>n@b3!lbtax7qzdL7{63$YA+c|-5lF?{wWLL8Oz8# z4e27}38SK!SXgCA<~1#rO&QwFM<%>VVD`&8%f$`tuNxkjlu8I4j%}O9%)dVcdsv;h z7~r51{A_v=VSA8u2>L}$UlxM;I z2vXJ6^}LhCw%8W$Q=ad+8`bdikD&x|sEsi=;`iHIOgQW}_SftJY~F8GY^jR$fBS*D z2JeE2E&_!-RZoaL_s<%|PT%L=8HsdE!P-*lU0}{pCe5F>zAz{Pz^_afGK^ZVta$=4 zNO}K3{VGbQG)d{xj0hdBvE%obkTJoAC$1f8JQIyq`2x<>&(JNA%a3}oZmC*m?5HT}SynwPcTq4& z?gXm+=E+%9!;bRVxN(UiG7 z#l)AKDK=4Ep!ntiGp<%0{)LCcxf}9ACd_xx8V|w(_$`N{L;t#nNo!lQx?iilS`cXu zR$a>Ns>ESk<(M2L%sJra_5=TH^s`Gbyg#cKQPUh+=E4{k>3U1L=P%N;N^ga34~?nL zFn0s(fiz`JJUB%1xpOv224_^$JfndRKlot*k96hEVVky2DHcjV|}HwN~s0}4>^4GCPdKl?r$zq3mpcgkQ&Va?eTNC zFde}Q48T}{%7h-beb$z2*8TWMhgQhi792c9E2=4%lBgn$(T+=OYrM7mb^UD!AzuRR zK)y&4UrRNY3}MNi7~FKDvMo(=zF9Bmyt}$6pu8sGJ>XR7>i^y?F?#9AI%|>uTPCxJq7~)`5ftWOI2&0tb1~ z^&;SM*+t*l{efNewOiA(D>h3sIH&}0g$3)r{R=tYFs78pW&(-=IXk4XGFLE4K7Jz( z3~;&1vQ)trI^Mf7lfwMHR|%SQFiDkHrCo*lRJQ({JA%L%u*ptcdZO{Kf-M$_m(m+h zv}$f@=b)jngTbA{d}Z=}RY~Z7s3o`kG}>?>a+ceTE4I2y6`}WOYaaSu;+xnDF-?t6 zQmuln>99v#Q6*kXp!G@LANUh6e72SJmwcvL>tidW`x?_lE4}cDkdCn%#WgaHX_pgt zs^;>6nKJE_OMM?}nw9LS)^7Csr%N+V-mu)D8cT0Qd2&opk|_O22q9w#J$IJr42$(S z60wvgefUtTH^;wr>PkW{s_VW>aqXbbxTwzIf8p6{1>DzmeINxZ$M;WlOR-Vx@tQ4( z-VbN9U}3QN@a=WxOHiK6uk61*JhNGt)#G!4>`X%BcgE;*qAGA_$&}{5B^4z%EQ3AO zC;zN$FbiUhrRis{m2gdLbcGUB;4nvr?Kuis&|C(H_~IPjRm#eURr<;)fMpYcaUraZ zZzg!-i}9-0#D>rKMlLGo$J5M8B^Fn_hGY_P;}V z)xeC>rl~D-Sw3DdRU4Z~T&2T1<2-vOStqh?ngiOQe9t^8VZ=1a?I9 zs}c?QJMRxyQBqmE3LmPc;Q)&Ca1mD02d)l4&fXB#G!L*p%Kmp^1Ip0H?C*wzk8(5m z1z6$j_Z}pW50MOj<})#i!n(N1r>H&a>nQa*>JQT%oy(&CaGY%aTDLhH zFSYX_t_}YWS#JRqW!JqAg9r#HI3OS(prnF;5`wfcfV8xZaZy)~-3|wHY zGii^Vi%Y}w!O2C{)5<_HWEZd4|8}D*m?L*cgWm^g&}@Vh3rfDmbH_^=I!eD*xuRr} zJC4`!vzq(9p+fl=(b;cVVy{A|+#0g@kVr#S}$qxG?zyJvO%CN{(99WF+w#P|*nJ zx#M}V7lh=024yZ!I>eA#U`VD_B9o#0L30x?x|OeCxUK5OsD@*%@pd%t&%cfheuBgZ zbnFt}CZKUStu<=k6?KI5G2CdozH$xVWaZ|>Gf${9A+b;Z+_+(Pf&e~OU&*m4;-FUl zn+Ka&l6sb%`67l!yKA}jg;AywU9iTLXpJ<*Xs&2&8-dq?>9Cn z?4A&>e~{j6^6@}y(8w%x*ml}mdh~p3qV@N?=5{oqeYCy#!!1h|^aAXvCS9~L<~La~ zk?F;@RluLNEO{<-)6x`-RBpD$8NUN4j~n*H&G6@D_?EaX2Y>ROc;k5jQqLlKS%x5k z@yJW*-r{>Z0*zqX@0JN6;xv$(PI%3fHkKmDsGp?xQ+S46993hlB6qT#j791(l=sO> zu3c~b9`oFepFhq=`b8E<^ZFS%dPAkc`b2h^$i(S>sW}~2PHBeC=|zw7&dp}5eK1Z1 z=Se*8fcr?Id-?lsZ#*(-#6_va#^>g8EYxFZVvt#>KAcFVY$aLvP3VJcEV+q8(dxyY zns@Kk9g6)8x2v~t6C-+Rb)}x$zc@Dbi1+EgLnQvZ3{yqc8AI&Za|s?7@kP4Jw}6IiRwOR_=;5|Ja!XJ22(~w=iwm26e}4$^|$cY$x$oPwg*W%;gF&W!@uWKa6kI zdim4_uy?|z5`K-64WFJe^{OD9kdJIdKOe;_Y`vwTW{tRHKYQ|Iw!K0_sU0kit}o<* zx@PqFvzC4o0C__`Lj8-kS0qRBABv$tQjl?$MTC4vV24I#>GoDAHWVO~Xr3wxv{zCV zLza)vyT0O!f^NCat?HMTrGCKZ-K8$%(p~i=r%EF>n?yDeNs2^duImAd zvvVVQttUG#qCMds?fGf)8t@&5{) zC?}kUGG$fL6^RH$7!sJlZ8bQ*f=m+rAnC#FVW62#2&RSnNq2Uk@4ckqI?v7c8AS{m zhOWgaXWaV58Sq1X%d~!`aJMgPF6G>UW4Y8c;3W3ZZcZErmsqPt+H9B=o$DGgl2#5? z1gEAvGud??ub)*9Jxa+Uu?9(&;5acWM ztd{l{{Y022gn-*cECn~Kz4C_Ev24`{@?Vnre0el~tnOn+f~ed#0o^DszkXD~pFx!} ziq}l2np3W{T4>>Em;B3MvZr7Xb1&O&V^gWz*K+Y6WOz>WyMT%JJQ%2ou`XgP z7;5RY`4bX%Ukfae{5~5|r~bLvnYHSvFWB@8@ditzAiDmgp&@hloX*}*-?Yuky4+0y zEK&s5r-R-P)US0N6e9grc2@ku;fdeDRKRnu(1^K)9A}O?dEdMt?FS%x+u>9%BkY;N zwZ8053qdI)(>>pSkF@PP2$S1-fKH)p9Z_@9Dyh<($jdC~Qm=Cwj;^@*8nNeV3bLyS zq0Oh>O+BkeDgX4fbdQV-&ZGq{);Zhv4NKRQIHEdj0>UmGYWQ0s%1qz;^r@T>9mfyyZkG)s8T_o8`l~ z;mx1ha*=H}E2*R%u8T(OhR)sK&Gho|Bq;-vKWv;K72g^m-mwVzssaxdmV#_Ao~a;F zq>3<-K%T5OF)|r{PzKC;&Doy#?yT_0wQVY|9iHyjF4!e2BsE1nLKoTS73HSNs;7T3 zbGy^<+yZhws`3J&)y)328SR_aCOk2L{$jq=+)Abx-bShMW@lIT8I?TBuF9!8JSDi8Q>8R)?-KNMd$(ZLI|s%+ zhB^89MD`>zkgs^naAPqUxc6$8HD-r51@rlw$XJ9urMOPRAr-Y)YO!nyI7&+=O1XsN zW3Ilg{`ORWPDv59c9(Wec6K(QGSoe}f=way3fkJhWUozE4n?kjV9UEr<1WBN(l8>-znkO`+r0bLm(%1%hL>YS-aGq=#GM?>X76 zL4V^Qhj7YDyexDU(N(o(c&w-WWf;9~+6GUK2& zy7|kxY9;buM(ShO+~s%GteIj})-9qlZ6>Q#Pqb@D#CuNhtoIzL30OU;8Yo`(9a>4H z*+EYaho0_nPgyUATFY(uDXDCbHUA`#{K${A&eh7bRpD1x>&RZPK*rZ}WiM#x@?>W& zw0TSlEsiuYWTnYHWWU+Q_K@mfVl99Rvke-rA-&&W0D9WMWWVi#DeXx?Z}AlT*uy^EH|-0T>)SwNdh zimv`)SLl%TEpwWkjprI=+7`v598sxwLYN@Mjw~QvdA$!%Mb(gH42=kWL+#{`dTytP zdL3&$aSKcgC_`(`x5#bLB%;SoR(8cDA0+;slXDT#m)%N}Z{!o+*p2OAiVrh=e6EgocnvS#wrX-7}u#?TnkwP=E=+DKcI!%Q+jRRNh(_&F0rr zGrA_oPOkp!3V@05%D1vNqm?}j55Qp+X$fc0@izS#ewgK9-L*KkX-7os$8(n!)83^e z@5kq2Z$gcEv+%T=76Vd6WB3)H;mLV$T7uedj%-`U;KrJ$#7L9Fk`ztG z{o(jMQQ@l%jbCbTchT3ZQ4lveOl%$L$Cr7PKcYpnRkW$@j$^fy zUJPqp*m%wnRDu7QYj<7RgK2L5RxdEEeJk^&IO*_*wd~g0UHu07F*`rF$P+N#=#w5U zP6@SqDN*rK5oG}o5~#~HLE;IvGu=hfVm8f;nuefaFhlvo@LD)iqjF19q~uMcntYQQ zLG8flRo0Et_@f~|xbZpTVEAIt+^LDm(j*aqo4;^;OLaaU@@FKv9xfTkk%J!5n^4xK zqRq)1)YisibIjQ!Fu7=9ShD0%I#^IxVC_i<%bib3h&2C`soG@VKD?1NSgy! zg;J>YMlwIxYI}BC5&khrnOHJAD_?qLC9fv?4TVe&NS+y zDD!QkUf??5KzF8-o~vbi(Hf|`3oeFAl&j_rg>8w7qO%bMmIjT)mnjI&U(4sZ-j7el zi7}vlhhFJf^|k~ZAm2!x3tZ=#kayX*aEEXTy7tW`G=&xuC4Qey`_$}Uoon*i-D)|m z{CH2|kNI3a=fsCkuM+s^$d8EKME=z7TcW3KQ_9lFPE+#&ZL0Cl3=ugN zJ_w35bgv+i?s#z=^v>6iiw$Ot8(_(+Xoh{J$z1Kyk1tj31Al`M45Z|> z7Y3+ivueIL2mbp>s&Y1&Pd2(&9u>am%=ShzvPI5$>|w|P&zQQOU#hljr*O~-i__<$ zhm19hYcVU{Cb?Y6Uj*1;>FKLA&J#a{E{4w4_g95epkeSfT2TSW-D(x_HOD4x%QU`3 zXu*q@dk?;ioDe^0;M0$E;~e5&Ei~~3+pBzXf;l~{>x@ZO+t?WV#0nf^)6Htni8DCZ z6a^bRS7!CZ0-NJi(MzLOSZG*P{`x&geo}fSWpkXAy*HD+n10=qV%`?DtH)eaCCa^F zNwEDIZmf%GYw;2-&!Nrvcxjqee~ku%vbUr}?N$~G!7e0SHl`}jSz~|7Bh4*xxsj1r z6kI28b}qo?@Tx~y{sh6CatUBIoZJVZz*jGw&h>j$a1`(Q-qCQ`SN%n-$XBn>%o zIL9=e&f=N>u40ki%}R0RcYW`La5Y(*aE3(JIz1}w5cEp)o|(|y;eJu?k!bhF#9az| z2fCmu0r07g9@^SsyRJnXb(tW9Vi_3!#W_5A1`%yyO@NVljOcTsSG2dmNf|ZNApVlA zxf`@MnW;l0cYT$%7sKa-)>B0{#zzJUbiaL$YjFYR*Ty3DDO147A6iLyodtK< zne?=w%32U1)gXY8Jd4;eMnN1}D$da2Tj@0WT{}h!K-=yc#?_%HM)@vnCSO_R$ay9L z_*iQ2$K9aZQ%-RCE4qU#pi@nS?!XO;=+`pr$LTInAV967Md zeQQ~}%cs3q{tLtEZuyJ5Lj7@+_S!B2>MF)BwOz7uXtcFlEH#F1IRZ;Ed>QM05P@wW zQCPs+uk@`?=X%(7EmqKxFtfunBJnX~UAnT)WA5A{v1|9|?D|!=vw#6Gl&$g2gBz_(!T^&DNmY2guv9gXgT%q1-)0rB!MXGFg z?wT%=PLtJ-I@`@KE{Cd|wrsnc*reOsX4T8{s@31XC3aaTtB z*18P`pW*T4Nq<+J4FAbZX4tn)1*W|cW{n$l9cDGL4)dKWgU40NLU}@*t9=eXcZltV zT?UU6Rx3>xtHkX3#VbDx>8V|92WB0hhK7a={4;zqt5!LXFy(-v$YTyU}d^`@T| zlkn|Bi!rplzBXxb@T%crwu2*YXx)mxtjakCUyNJ-`9662A{P zJg&d7)3S6*80s&Vi#NQCVU6-8V-?+PWt$}kZ}nY^16+~WZj7-xq_;}VsN2%c!0Y!H zukxjrw&1*nG8HNYgBU(`&^AHE$omWIkARe?*Rw(FK%e=`T*`mMN#4xPnkD{iiknnf z*=KR+R76#|J>OSCcd}2>LkVb#YKV8XLM>uV#t$Lxqz!#X{E7D4=I=|_6h(SbtJ>g{ z0r&TI742q5QTqWSbqrs#R92P@SXF-12D%4%K#~HY=5|#$X|EH@Z6U550g);&CC}Fc zjX9sz)s~gcEbUwxe6>4clu({qX>QB7o#72|ffb^Hei_bwH06x#G!K1v@QnAdTB!i@ ze8il}EAB=8-c9z&91k@aymxf->7Vw=9L-pg2B=KL2Rd&SDx4+FohAbg7g>` ztfI<7NmhGpZ^bGlquMP^plW@CE6Vxpa$6dEqJR-C*;#sfJO;g`n@Jxp(2#ZUnN6=w z@Dg6>tN7snNJN@>y(jjo=_$?;=Y10RQ-JvTg}SiXj1rKojhl@WRpsS^UwNYH7LUaC zl{ut{EGRh`+!M#B{gjir(AFTYG!W>1ry9bz_grk_ssrgK^pI}zDyBc&yNE3UvHxbJ zD&2xKVn=@Kp``Ryn=bKK+)A3KFVvS@#9`aCWmo_z2LHav?CU?v?~!$Cn!wScHd<#s z8+a+th3Tw;sYRc$90zh1lXBaF&2%IN3vjP$>MGhy&OyP5X+!9!;Zrg6B3{5yR2+(D zkaHj)1EC6Uo65SqFEGacwd^v5`A(VHY{+sm6`({r)EkFjt76pp6Sq2g>mu^ON-&-uXcw_x zG;hvEvs7p1yk|tMq02ni&_5dYJX;GPW^ zy6u7w`D6Yawl8ig9uiTNoMKedU<-p3F|S*@l5g}0oSYfJ=BbHHEtC_fCa5B=`hy!K z)WQ}4P{sGBPDng%>6QZD3_)+`+~5E^sW;@fM<`*iTG)Q{@xI36Guwd3pU~C>Dmh^L zvSF6=3ybJrJ^_{O#yXqV124_$uiOByYV6qhhs}U-fEAh5kQ*)lPH!CQ`^mqveto5P zUthr(pZ+Nloj6Rx^zj9_PoP-c+t&XbVks~3oR^{;-;A5&z3a%>rv_2lRHX z<4?30r4?H@j^BgQ!&i3h(6%nR&Lr1fm+Uu_j0Y;}o-K`)iOz(cXy>vPq%&o-#&$T~ z^Ld|?05COHW;)#lKETJ6n4Ph{moCQP`X+=Xw*!JJZF?8*P0!cfTHB5jf4@dGC+A#5 zi^&P>OK9B1Qyk}3`5}ga8;`A@z$K|X3TZI{9_i>DR?pGhTuX#R96+p@b z(d!cdOq7K6LJ;e&ZH?C}TkDq)AHek$IxeuQ;yCBrS?xs!9DlredK+RDH9BcA48NAN z-2GkHOhAGzPK{>6#SHae&Fl7k0^=$_YvE9ku9)XwgQ*l2hfu8ylU^mKy+!ynkg2Qa=(54W8t(* zbgZR`pXj=t_5r^V>vK9I^kz0$Db87;oOy;HQZ!# zRn|N=fh7{!&As5HpBD&k;Hhuz+aUN_@@_HDd*=-^tiG^|8~8dfigyF_kh<-^ZbLgy z+l%M`W}P$kTg4(r_%3i16ogs6!d!BsADDgg3NtXTdnPhjx zf|$OdRIoVn|ddS@f0KxjbhGi5)t>rV;k^AQ(#{e!a;vzme9?s zE|N`Us?b>izd%?~(!S_%)EphksXsLdW{XVTt0ulTTH3^^0;_&mqw;(V#cO1)9T^uHOk3^`k;gsjw*d5l~l%rI>&HXiRKszB#3P4YcQd zF$4vo(}+%q6!BXTS2iB|LL+LpkSV|xW}7XwR*KZ+p%a$?z1twGf}o;d_sfZeAzr4lj4m4`r9Z`U1wlV#XNl}FO z4neymuy`8*9QX0TT7$^;hqH?oUrSZ;1Kf!}y^ko5a+|Z6Rqc%SHW_-=>zQ&Jr&MeT z2TNEZq9AN}&Yk5=&FETbXgVOl5D%4)lqYlt`;C0PliBk&K>HCO_O?KPp zex(~j)i%b{rv~e%mly5h`v1i1$ID|24U6GlAjHaqkRNBBxx@*MMU~IPN}*j}d?_WI zm;su2(Vc9gU{~09+BG6o84#vUU68T!95eJuOi&&PWqc!&xjgb@x(uH<8-*jeeUPX+)*=zy!2NYj z-yFjkv~{Nhw1c?3I;WE0C)cL@N4)9#qR_il<#kVrt`Vp{;6w_ee9Il|tND;_Wlrs} zpQ$kA`Gf8TjFPgTME$03|I=$vqJ!4VAQ*?hqS-xUCr3(qE!_C2Jzy8Kd(SQIs{#m& zSB}KZU0Jb3D6!R@L*Ce7tGV)LsQ83t*AAPr&7R@AjV<=#KgAHv%ZH-^a{UQV6!$kdNI^kh_+ApA#ol-$e?ESjUw0H>EQy-|Bd*NnQA0|4~fTI@%8%#UKI>2Afc({7c zLKE#|f60sL#D}lO*bWS#?VS-dvhP3GtsN}SA1*N`>BYtgVR>^) zJ=psH1q|W@-cOLEJ289P*eK~bxvTwXtX>s9t~b&P%pi~GaM&e zJ7YnrpD47o(6y9MBGjYW>sY9)f^7X^{OwpjHSlt^ZRcHuEKh%mTr{ZosID-O4Vjq0p;r9Oqlc8OWM@|hb-_% z$x6*9aO39L8QHDb>*UV%qXtEWp>QgL-rN+!4QfE@6->D<{7vX!t!zo-OLs{hZ)gS@ zCMjStKT_kylKtwiYk=J(2P*~e$>TVypn+to>B<`HAz(s4C04&RX{cP4s63& zdPUAZv5iZvD-92>e=biFO)rX@Uuh=U`l8$5bUvV&c}ueAeY;)!M|WQqSU(3Sbeqf)E9;UAHI&@IYshiA z^9tmffqhix9I~Pf?z%R95^(;b*AZ8!L~(7GgEzK8Nl6-{2)$ zdwMz(l08=WcK)G{Z&9{4FZeBOpY(s&YoatWiZq$vd3HBfs&NQ-K+yG@z<)f~WBNZo zAIge*jUKd8r5eA(J-~u9*?)U=vxAE8XOk_JwSKDc8}KvslSlMVxhGj~Nz!6PAs%^z9#)W=WD}rX5$hcUFvoKBaLOpxgx1Ku2suuw%oSbNGtgp+or7SWdDI2G3IUow-yTAsgeJTNb3q+*iC z#q0=dJJ~KJ;ex$ra>MC-|+&V%3bq|dP3XA4G`Ev)cDxdXN-e>HfXZf=qlh8q_A|H`x($A=V5dCvYQ ziR;i!u6Ai=W=U#nQ()@<^GjYCwjn>Bi2lcC(>v8ZjlyFz!7kb z27k*5KI`|q_|J-)Z9TwgD!i%quiB8NF85tXAq6eB`uxA5O6zvDaW}#Si2B#R(f$J8 z|KKq6&D&-ViY<%a?W!O)VEWtH?@H$AlZuihJH zVesFXKE)WFLC10P2Uw|tk3Qv@bVd9n?|s9vWdM&enEfS0aWVMb%4}x{eAt8?rGH2R zxM#*asz4TpO(m9G#Y)ubli;U_O9fus^4ss%;HQp< z;~XpCoBr=eb@B}Tl-0rw752rFD`#b|UEC^+)H@sR{f__9vj2h=XXF2Z6(Gd4G2O`L5Wd-oN&f#~OmJ|O zB!vt+=>8K1DnK@Ze?5R1z3oqEU@sx~QySqnR6OX~|M6RF#Xt1Wzy4Ex0~bzV(i~@K z{!cpC=hCMCrx^e;u(xUfSV_IN#3|n2;fu5?Wu%p)!oGt2kqswCVEcZ;?JclsN(0x3 zQ7?D^e&mk8LHP>N*4+tBtj_aXZaSkaf7`C1XZ!Q5+FNWfkKKu|^9?lq*jKEEa851_ zAE(3r(81FBSEI7d^z6)l-&7zt`1okFlD*Xk$X14e`lGX)6n0PY4W4_8JhS?1{1CG z9qiJ_TUyw8O9q9vx4nYIxkP;SS?a1_jyq1}D*4Y=ZQY7omWfPKUrZHk=+a=@`8UWa z8#iU66aaGI))Y?0Yw_MGx(UCiJB zYI8lRGj<=G`TkhFaJEd${Y7V<*7)L;uuyD6|4pyTzEAeP&s4+4qtLO@A*NJ=PD={- z{bgLSa|V1KcFq`j1v&``B*s;K+uzyBjP+8XZmGR&wXvX~ceBQMP0vls{GIjlZ)ygY5c+j$V-CohKr9ZFMQSSIW=dzS88rkcVV@VoUQttCD0 zz0O2i3lRZ<*0?zCl2``~SfRG}>0{CsU$a=Hgww_5YVisyvO~L3#l0RWp3lxW{rMXd z9;BB0x=b+a4sF8Z_Q7`XRO9xAnSd979PlV0KHUm%rO6kx<&6#e{l%eBqIExxt}QmM z6AY(tVp45Uc0tNkN>K9s@+7Z1lSGMDG`H^FvTe0h<(KLYkEjb@jpVU_CKncF`{Dyf zT{pt)v(~_Q=(c(ihVD3VJ#WIe?WvQ)_8ksZ9%Z1}o{N&|DmM1R#r2vyS5u^RxB7Rs z;uFM`U`p?Q-mo)|F_D>13K}7OAs~Weu6q?Eb-Kpm<2HL-g4A~>w(841GYc~1*}nV0 z5v16idaWjn)OqdNtC}Ac_+`kWlDnI4fSR~IJSdB*P=4h<7R4q>zVQSt(viJsWU(Og zrQ6eb(BB?)ca98Z+8dr)-;-dB=V%EcvB(5Jbi%PHcY`+MnBL1W#kzdIPD;FSb*Uy| zO*RxLSDHCa!zH^MZ*fTzai9|(Ejm`-GdLg z|Ds5%z{D=)w7)IZ1e}6ye;8Np-qt*b7Ey>{0H>^+b0K6GJ55;CIwUV;^{7@DHUzM>nMG4w372sx35RjM zmO*a~x-r5|!k+qib%ij--ykmYGXRrH66mfwbvv)9q*cIFJ37FC>o*gFn|Beg!2aOYXYXmg=A&ix0(lTI&^)NFk9pMQmfm=u4pN64PYrdilimEFIWfsf`;2VY!^GF= zTfZIsJtVQNg~V1${(aw>(?@#|0LC#*fKLg3_ODTXbt6^OfRHolhp7!(}lJ z%MmyMF>i!tp+58hZ__O_ega&a*fB_3GKk6fp(k`UCE=|}qSSZI@)4nl7^^S0^st2e zAD0k3H1YL57!_Y{T=m&lFFchPm!bo0m292Uur zo|PzsY+P%UeLCFXJ-XgG+>~@7lajY|Fj|?b{Xy?#*+%5*9pL%Xx_@FRNZ>-9&vtj5 z)Von{uio%ep)s%|!V|b>?oa3`ML9!wb1@qy>!z+=H}ix0hS>|P6TKXM9; zuWfsl+VHHueQDDYx(Ts@W*o}Zld_B}T0JrFx}_*HeI8360^3IQ0rUHF%q!@7ax(6_ z)t1jlxYrIuXUFVzzwHqSGBI5>=m z8gX5ryJIWe`(F0S-`E0IC=#UUM!hV1!_S|Ds25n?|9xSzGA;7aO-Re*3h5~;mNxZ? z7u5UD#K8SpFhRZSUcU17Q=8e}d@1zXo?jaH<(;qp^85xTANC_J>Fmk6VSP5-XFMD^ zFSm8NkmTh`!*U8%`dL=;Co9osw|V>#xE#y60dwVOQtJsY)!HVGh83vX@A^7JTWzw} z^I4rw_h9LHw7;!HgY4L~ua0jQuU6t&C<-i?zX-?zk;(r>HOK>_7dyUd<5J00mR^+W zUa7_rYLVu^&gU01u$MXIXvnt4OijPxo__?D_tl+NB zqw^5?w?0E(#f>FuPINqB$f#53mD3<}6h-r-8i$rF3Ds&~BZxD>BNm`r-|J3Whs8+I zs}aB}by2;iQx;?A&AW|%-y*=43*8LwjFfYpFK&kOF9?AM%4!-LhNiL8zTY1i_ua|Z zUAj%T-!6w@QsD00Rt=)%t+BrntCN1MfMhEf-OTn{jla^oDsrN_Z+cgaRY1sApz=~35T!NMQZ z{W9Zt4|@m%nm~D$Fc`;8cSRng?n)g^+Aj?xU-lJWC=Jc4hvic*_o@VyM@?$S*w4hk zhQX>H;ner$p{FcKOD8<1B%lz6nEC(20Q8kou+wJW(=ek!vc|fr+xkB=iS9Y`2-5{L zTa9Vd$6Zvl{P8o+L7X~vSw}h(AlO8hBBeO*e?(L8=Gvn+_Un#*7pEG>v*DZF zh;2~6NmU={r%6&C4o3SG=6TB~6R@;t=C~}-j~~pnZtkbTEB%f^V+pCozn>^vlkVM9 zkiaf9nRVZ0W-{OO&G;yYaUD()O zCM`=?ooD6oJ{ElZV_0)VALqN-NJW*Qn%GD8_0m8&Tel8Z8GiYD4>E{->Tfm4^L zys`mCi2gyFS?j9(;fl#AA2uO0oHl*6Z$a6PRYdjGY$h#hSPtU%2Cj!c5^>Q<_sMW| zdG0N9F$bj%n@i6PbcW8^u=f)=l1=G69n6Y|$h+aGTUyO9xn*)rso`fNiKTDQ4%^I;kxBjz2v{u2MV|;n_ zhofiI$Y{X8DQ{Tr|nEN4rcS_fc*B5D^;yv z+_>6kgcYh?_KP#Ao~;8VJ3>qBiW_L}^YL#wt=12}XAorw(sgpvucbw@d&h^?fdyZK zr%%BShP2N&r}%wY^$8t0(dw^lvATVQ<;6{9%PGRK+mMv=uq$W`8$~SKSBt=l zgnQGIQni7z~ob7Q@u}v_Q!@xX6&_G6Wpj-gu+g zbo|5Tj_mhS+=JvtrR#zL&`aC@IT;O|u9CxCvn$yfL^8twc>E^B^Or)w#44Vn! z4!}Z%=RJh3I5yr?cq-D*gndY&N;}UuvLdXS#^6w6*hjPvY)N>?X4Usan4;WPA(bJJ}zExrq8LZW8 z!nqQ{Y>z1Y2R5AdDZx%ylF3RMJxX8Ls9W9PNoMa2g%H+XzpUqyz*c9r(77B9beamk zCVhHBAorKnCfD!#7J+0SLbXocH=7}i9Sj*Njp9>X9$4QzUS;-iuY+JUllv{MPmk&O zvvZ_AH0(SIr#vEMJ)m?tZxnKK|LGyw=t;3UpA3RVNe*6)-lsKPVSg1+N3opzc9+04%=1Q-KFAk%qJ;t7n}uPJT`KB~rvNI0{N)W%FBe@<#O3 z-Js6&JflxC1D{?H2ue#{`I^`8V*kd6B|ehJ=Wm8yw~eCVL8PlNaAqwxhAs6K@`QZ2 z9fY{)IKyuK@_N(w#)wnf$m0AvFDnDlLq)dL>UMq0)lrI1Qdj>>%hWD9pl0{M7OIql z)i9%%WNswjnzbji=bZ7z-ruOjBtCB{O&QGlD7B3_UVDi(LtXJ zUap30?(ETiio;#;f}boJ@Y;tm(A$9_Zb7RQAtH4je~vtd%+e6AAHn$A_-NK!MvQAj zMwzR-3NK)5A;Z|hhUX1ZB>x@g*q_w{Amq8t9ft6NatOI!;R z-V-uSK0METuTFe>&NMr->2BqCxt)2bX>&6~KC|5v@iRlM)u_6BXm8$&*>_`kmlRYX zl71fC6h^t%IWzuv(2Z2WTh7z3|y z84~@Wsa|-Mi+o`sZExhu9hm*qiT!<_nmeN*wB+OyLona$-5_SK7?0VVI0ojb-9pY$ zPy5Dz!bNZav91!h^6#1k-L|5BEA@U==ANRNI1_bwZf%i@HiXu*=};RflUN;9@-){vu=b*Gv#PNAHUe3<*ZJ>C#;8w>DQF zhb4Nv@a90mIS9nVT1)&IU3r)hdiu7M=<#)9(I<_S4Vd}G>#X+u5NuHwaYY%=pFdzm zS4eVM(rrJItuZ$bz*o^~4Cy^xD3HAC%7`b=;shUPyeMIp6a3EU;f(`(?|m|i{12y1 zRH4#%^Ayiu`N&qK7qf&{FK8_uUU-n*(m6c8`FWas`l=Qe6{vA6B%-9?!V7Mx`>up| z@|Jz|TSv&23LY*drt0uml*lM;vi+yr3Xj^{>gb_Hu|IV>azi?i6k$fh33)7bo;$zQq^^VewVp0u z;qZ*^JM{^Pu6$#{LpV9m+U`}{i&>XH$wSS$mg#OlEw!i}I|WR94xF$ZPFKbbYKM7L zs;ZjKkA!sRRSpM?O1t>xRG$#uwU{fPP9uXWB#I=ZTp4X@7*h5(IwOvD-G&^u`vSlP z&ipu#>qP16Ir8({!E@V7_cs0RZPXqb(*q!+K0kN7s5L?dp$Io3VpzR2OU?^32Dkpz zU%?F>y~?M$C8*Yj;lr(7gxHq*eJ&gifeRWuFJ>rqc5&8S{Ja84E$J-jj3Uzvt;?^xPn(7nJMUh zaw+C=5=par(q(Y(AP^uJ(!8DgKg;ln(zl>Nkme_7wCLiOiMHk|NP&E%8waB`^XZE} zcnR_RS~;YN@yO?OrLszsBJD|Th~j;Hz9Q`#kiF7UB5Z0#S1`D^fD``@Htd6X*U)J? zwz7@Oko~ysQxu+`O3b+}-CV8}N~Azl%i^vlC;)E~hl^_s0Pn|nEDOBYLLcVT#9J(D zH5tuEUzdyRUEN+^Ye$V=vcFbMiHCPD0xJWvkIeH%pTST+b?IQ7E6|Z0qJHT7!%fBQ z`5`{GSa}s302q(eGrXZFZMe@9%W5J#T?>b|ht|%zBpA9;p<@NygG%x=pt!4+8aW}E z<+6>I@_SxtvBUIuhMyn!ozQ<#aOU{Lm5+`ZOgS`gt6K>yw8qDKD)0h*4)5JGEVufh z0LzHJ-oq^=Q668s3YIu)bgubZt9q| z2M;edh}7+qNmh1+CcLe$XkvkERt;xl@8g5ZKp#n+r0K|Y4F=3X)J++Q@bDIIm~MHu z>R(0*kZ9|gxLfV|*E#kY35OejO&*IlBUbeB2lf>){zU;(T7KR^A-sx*wQ|-_cpKEP zdlj`shwX3Gbva`53*G>a9QzM3yWQeq9s79o6 zND=#SYs%=TPB^We&$9&b7ME4|^v&~^3v6obcvP)cQ61|T4)3n> z6zkcz4X5m-+>SlosZ7nH2Y6w{;Y}cigZh)aA!)l5EyOL%hy>38Rzct`o4peHi}VlxgTb$y%BLjY3RbHh{hKnB!H6Ie*rcRt3&OIy54`vS%!wb$MGU}hr`AMYy*&dl}u^h`Wy zN$(ntS+C69Mo}G$z<%N3X?~!azPy_nNmoumP2r+muAQc$oI8)1?;IZmmnd@M#HV*H zwfbT4M1JU=VUuUzoCoZ~IlS~=zqc-!|5%2^hC7*jzdVeYr<%Cn_OFE(Y|KqW$CliTWBW`FuepU#q_7#~O3@;{)c=+>pI;RTCjsy4*-o`3eHOuk^T~&Lh+N?zu#{lG!3( zw7_FZyP-c%x=|E-wP@m@S9XA&;+nwnrdO(pav@uW40<9_;_J2uzb{*ddQ$#2J8{h^oTQcrGPXy&iJ_8U8a zEwciP+%jFdPH;>9MNF{govQvQ;*e#vp#z4xt!7Z@HC-WSQ2&qFYu@_Yqw(kdp)#*U zKciTcdvYfjCX#B$Q2jwyNq5aXEAz=kYGR?rn!rATmdPO)lh^ml?d`jhP5URhqkSoR z1uLScW9#%r{BSur`iCigiq_JmD*!)FWpTc?e^;gpw!|M_btPHq8ssMzv|Y8>Lz$Xd zM(Z{>s03W$^9+u92H|U+UH>+5y^U{YvJ!RdJ~Q@kiQ+r=K5K#Z?c~$Jx?P*2r%*5~ z;nH715E%_ze-hu3;`yC%qibe8@iS%RvR$SS4~wOTSKBW`LdGZF?wx#vZ-#g*@zD4R zvY&i|@fnMVz@0Xab?Jla^zAbxeU@kKtL7zECA@Z`a`qVT)_(F6aY}rre?^G3*Jlc_ zVw}DI6^tl1!jfyDQ2gEvPhnU5sB#;{qoL=Z$rKYucOqsfb|(6dQQAZk{~udl0Tor- zg*&t;h$GUapduh3oq|fKlt?#-l(ck%e!xhINJ~ga&d?x@Qc4fqDKK=`z`%V6_51#N z*JZI5tTkuOynF9wKl|DHJ#$V$^GaTX;c;cf`0iUKw_mHQEnI{~N$PPAQr^5K77bG; z@hJsjo*2tJKOQdV2qo}sE4;kV7+n#n-!4@0Zp&S!>53GIRMY7_@EZY~;u#|INteE9 zXQx)UUaN`E({pRq%Xjs4LDNe04T+OWLQ^M=l3+-}#86G`R~9Yu6_;T!5{hM}-<*hf zu1T~a>UUb)rprEC6dJdC6a8`_v*4Lb^NVcZ>=3!E4bBAzTM6(gLY3p;rI5dp+V`W(?1fs_Zn+OnT=f=l8Z`i9H&l!TB0 zZb!qr1u0j3EhQqTD2W_=it4#kXE`*XLuQ)w=u38$OL9I7ktIwt)r@{+eA1P;-nchW zr3+X89gLfC9Thh{z4h$J!#4G% zGM=Uaug$?kpIuBuGG+pF6IY3@pR-e}y zG&JEXuBpoZI2hA9;{BVWW0REBh94sBU@-GX*yCN|&wfc&)Ep7Da^X*Z{dp}_`Jpxe zEuB{k&R=3jBzBex*AK2R&yE0Dhlq-tkUR>|wLSV=bO}dTiM_YZ?CM>-PS95T%~d`z zG0{&!`b=1@`{Kt;tj}qT(AgnqTIi|RDwF>C=-eB}3fM^fTvU%2R_7sW#`oNLqzyy$ zFVNX!6(7qza`Mw`VL{=&G;yv4O6z@JpK<&7bl-eeq?JK>URKBPZGNXz1DI zV)ytiQy7S9zuLu~tkitmFw&9#+$b{osX$9V2};HM!guxQu!XnG35G1Ga!HaAd+ML| zM56gDQ!OBy*s&z@+9|^ztfLR=d<9Bm68l|tg=KaFbHXWvVFfU|M`5bWM{G)C0jHk< z_4*aIUbiFXy#}_`3_d*)de;?)ZaYkobZZYgTLNKHcyzgO_|}!k8!PyrD<{J8uL$n$ zP%5($cwE^(sL;Ibj8|oqJ?WDqT>9*bcd}9{UL2Uoop5Nf93DG|NK!lqXd>GMki7ICB%tYCfu`8NSyzE$Ln8 zGY+0k{^xWwEnG&5HZtn6El03xX$3i4v(0dXr*ZYlA}iWQc;KLfTwh2mx@1k&yP{;m zS(mj6A&GR>_jOP{ZAs7NY(%_gYM&pVJMi3%byarJ`N*!j- zRV(E#C1WBYE9R`#KIIHNVn}EACDg7JHwHYbXb?Q_kIGV+563_v->>yAO68-8cca^3 z2xcYOrb?KwfXjWWRI~W*?kaI%q|I8=>hbW}>qRx86(yI6F54lSdM|5qCfk;_bdBw5 zT7=%^gw6(8Hj~__X%5h;O@GCfPWk94!3gZQPg=Hy8rC|y|HoZXt!GH=!L4}le`B|MPi zr9n0~6Yqw_NpCQ7SszS7Gyc{8ydb1AcgawJmF;mC7a}o?wJ_*Nfweyo z8_Ld=wF4mbCGF(iOtIlgQ(-V5879>PP=a`LaxP*x6T<|BF1epxfv)8bomfkF#FK;j zm*zIROx3%kZ7hoEgB8|E4(#cQMPh!|Z74fUSc~yhRCBphaaEg8qV0;?9tPrAf20p& zpSm1|_|Dv;A*u}qfE3ue5UYZqpqGAS$XZr-qiw9wph^H~czT{gk2@E(Uh38C947{# z8eLqdT+p^>YEwf>Lv~5aXz@t>`aWmfF&fM}1esjcjx&Fvf7tWlG0Qs}Otg;KIDZ;E zE;&C5L?|o|F*WKlQw3i;l^)uw2DyTCp0G7rcky0z(HCkLr=emUoQZ}SLFjJx$@!~3 zD~~BPR|cN{y$<9&y--ti zCL1+>qNQ>P&G4PKi62-$*Z5j=-~n%*<0tcZiThRSK?|G52LZT=nS5w*VgrfN#fH0J zeNxJl5Ut!tCGHpLMcXg_xaSx40@s^1a2topCR!e=5wLTF!jBMdS|@_@^WROn-+9yR z2=b?>YOxC>vVNu)lefCn2oY5H;+V|Z5C}3%mNfUG&6v1{C=W29#?0^WA{g{E4jBrTS9Jc|CCm3snN+JY$voWKsp_ zX#e^&aAc9K@It938ss&NSEwK{+eXIy?rR|S*f|216tL*&@Dr=~ct9qLmv^5OUI5s4 zIDbtp#gR52Pw`2&zapPotWtpH6~#nEJWN`xY=iY%rr)cs=%WQ~x^Y zNY9_TZe^9{+hpq)5?WA=fp*S53|IT~K=b5b{(1cU|1vNaQMiQO_t|XX3 z!S&Ic*SVKLS;a0r=IJ-i{7ioT%hqvR-q(aFDBr}Kl;i#lbK9-xsAoEzrDnJc!}s3C zfo1t=J_Hi=?)(K;E!EpS{-k}04^l$6y>COno&Yw_&XMsniw1&FSh(jxtoKq?O5PUR z+p3>Ok0PN{HI?VFRk%HoV8=bzf5h@x{tH03*$%!Ieu5)lEo0GwzMq9xO4ZJ*3wA>B zmHxhxmK2yv2zI`|-ILY$yywFrF~n~GOS`e@ECM)h)lr5`}I?22#}*10++~Y6tY0}5I zrN@eIKs~%>2%$6E6io_sA_5ZrkCPQrl>b7b0Gm!T00fAx~8ficT%+9m9th5m@v7j>>mYNJI1wTVLvGLaySR&$XVrf;bS1uG{1g zE!#Vt3&i%rf}NRIuk$Kba^}NR+-fHQKs!fX;zHyz&32@<2uE|jnbS0wjZ{tZ$L0W> z?FR%gC5Pz)wpfwF;#=M0Q`9q%RlNA923Zk%&Q7P{;hy0VDg<^RsUnFAkn;!zIH%@x zkrP&M z%0T=$Ou!ss$l6Z%zgM0De!Yv!VT29BND5O6!aqb0^oW?#@c%3k`R>iENBPtw`HoQ^ z{o$P}gBlej1r@}Q2abaAfG7n39lqhulhIY@buuckuK#s;7i(!uNPsuUV{E$yqN!k| z=&zo7zm*n}XgY;2nh(dU_gjOFK!}zk2!|1y+*jqdCJ+nG=Ea7<5sFl0*%5!N)S=aT3;3h93%Xv2*gucsXvt4}2$x)mJ8l~V3Stk)sL%L@1w1in@UM5KU+)TM zHRRu0a|acwnBzfE*+4v&Cw>QcoJy2<NShEw77u{x0&IU7<1l<e$Ii}~EwfQs`sRkJ zmRu6c_}@jd2LaXM4|}XCz`qD8Y9xeVu%{0DvKG#Hc7YEizO|CUqkOIK^QzVDZ@RD+ z1+dUUTr9Ys)4Y}4xhDKQsf8UVP;l-(;zIc6GPp4kY`FfL!?c}I*O&4AZm(Jyk*<+t z{+k-lX>yR_1i+$H9PWN%Q1S&=#8MJ*Aw8c!ozZH;NAqjp>*PHSGczJ5HKBU&kLDHl zJTUDq@Y+63D7{fnBjHVN7Hta8w1XTt~cC%?Qw{op?M`N6H5udW0tG%a}88sh{Yp62^>>%DXaMZz-|h7 zN;SS8SMZt4$vkYBt~y-N-0_JOjj16XU>z)B|zKOQ=n-dClA2W#^ck|71%YaK5 ztVS%ZI_c1f{s*ep{~YrRFMk}&Rd|JyxKtaFgz;qwKou$#6aHVvsQnPN@e|mH!jfriHR!7YKA2Hwuqw^@_w2BZkf+i;% zEKs`1lgWKn=+zIfx5o{J&n<=38AWv^1vns2u`*39!8;j$il;|!nGdgK6$6IB1$lu? zmu{dV?-X38rCazP>@^x4x?ki^(8f`wXh_*SZB0SLLucezW{--jv_0pqI*l$@$Nxvr zy{Mg=7JPjPau=ry4c>)$O;GC;e-_1krJuBH`z#->D1vf^#91Z+9Di)RCLjn=w% zgrtw5+JO|sR>yEzK4-~y{uq(n|M67jFPE#&fk+GQIA-)csSBP7VxHwN87dBMs|#$Y zLhL-l`I8K}_^2o_gM+8>C*aiilZo&5Z;)wAQa{?IC@lF5ul%2U?ZA4qEt?Xa3D%l; z)R3<1bMhQ^IuFcZy;Lo)BhLiHKu_}jWLTdmp_{41CMt@l8{u=0xsKn3bLG1jZ16R5 z$p)K@SgXe0V+T<;9%Tt+(L!ooZ+QRuDVa_0bDg>Fo(redjlPz^b5{6$-rT5hrMEHH z!`w3<21y5!=zepKAXm92Rg8nE-DlqI2 zq<^l_+6`DH3@r2Syaji55d<34?>tbyC$L>o1U>!T61)B66E%|*G$~;FET{dB-Him= z-v0_+bes+I_{s%{>H(j!gnN0!2$^(KRx84^jS^Z?WMx#l`#(GqZJYB6e@O%m?9`ac zV)EW43$k6AfI6HFmnp;o=UUwXN%g0IMgwPkV=lz~e#wvGsDULy{C*-sjTp0pfD5Op zn!!WdEIsdAY=zJmv zu;wg_$I)2NV)Q_V2qf0CGJ}hjI19T7&xd>Gb>_7ciAn4OX_1%@EnXjS?D;3prkQ-$ za;vkLB}BpJtITss>*_o0w*fUEbl$ipR6XYGBJP3W`_0Wv!Y1;%K2;v4E;FcvRG78U z>RFHp&{`{~6#%T2kg)NT-s;%--fp&^kXQ0nhA9 z0AR;%Bj24b@3N$379g<9eVR@){CG&SDFj+17k_n-UHQW2YvNbnWod>GV6rWg3q9#`!@jqz6%k&v&*or@3j{Jnxjb4lxW+Z{#T-& z;-GJ4==NCVi>pAmtEjGNT zh~Qj{Cat<$b;7rVx0w)ZmTl)|>-=-YdyoF@BgduBxS->}R+;fa>J~14*hAGT5Do4f zC%qH5XRynykE08K)xSL`@DIy)7WVbkY2$c;UAN!Zcg|@PArO62 zx+q6CO(z@=rBUYw+j}~%3opY>m;dG6kHou{)b#RCOxS85hh|FBW`3hu80lQ`WSRX2q{7RbAqA895)wbnyDa4_j@9e|C{i~(WO6l_iJsf3 zH`pzH^lL=Ie2NL0!dM-nn2A(yM0Y?623^c-G%p|v)J8zJb}rd88h}DR4D>-G<<9I; z<&zALc2z!ti&5g7}cHQO!F6Bg!uNZwaex@?64mI=qhVcuZ~AH1kt3Sp+8`H z0clJN!VBTYp534*#RIj(-DO6!@h(&u)Nn%NwG19&(FTg;px`{-xk_*uj%}M5tflxr zon#Pw{+>bW(`49T({LFC5}HvE+(&a8X-IJ-0(7YfxXj@^vIV2ZOec-pGR_BTO>(UW z0=MYrDKx2(b<7C%*2?-9w|lg4=;;0)`x9IDNAlN$Smq8|5T+CAmV$lnTZvOMB{&dW zk8hwkrYWQym)id-)`F4q<7?x<^`Z(TG?3fyylgA1WT4`8w$zGid4=uf&r9Z4<^u^d zO7q~wexv$UBq;uxgjN>qtTYgg_fQA@3&{E#LCZZl^Qr%eaf<2#q+I=IXSuI1vk$m} zb1>3?4~*<+phMADFxy__k3Eyw|EyZE=3pi$rQPRPj(HYrD}(LzoAJ;g8DeDYj`nr9 z^S?Tke`hj?$;Z~t>Hr*A0RoxyaYxCrEtRq%*z~1dEYy&(`h5=D#&z127n^^Tj>YmM z5tEmX%gi*8LX9RvxOeB?lK8Q_r`W0rb^{kle93r4U5!78y{hk=UmrM1?(Oz1no(Uo zeI$F52Mi^eRe}g7(=VRxy-ecQ_%Ca(o*&3XPvU!n`Lw>sN@N`X!amegj|pl>UmePJ zr<(S>EBM`g#CxWAEN9lfooVHp9cn?$yY*QU`kAbgvPqL^gH~6Y@vk#DoHtaJYn^6{ z4nv?v+(2d=_EKfQw>=?+PhNY1z1>!>bmqw`P_S7R&U1A0Jw>4kA)t{kK8Vr$y7zKU ztPABH6~_wLalLc}>JTx72b%C7hi_pX9;>3vvw=SplclLX_a$J*Sr*E3BoN`NXPqM@ zi^|>Hz&yc3q*!!agiZ`SS+#0_GqW1auHE;j>W?o;+ zH==`mCBvN!xdrYp{q~~gqn-TwncPSBA;Z+V6RVx^TMsn1!f)*GMG+&@1$m`|VtD(K zv7J1&e`Rsrz#MFBBvqj$Vfntiph)M`ZGOMxLk1C)l`N=6FV_k1N(^Xv9rw)RLAQcT zhA1NGTv5X-1-Zc&ziDy^BEsj>1go5?|aU$*Aqb!S?0u;XZuQ) z>k2|nUP?BJam@1Zyt{JW7WRX!AE#9nyCZw<1M245gQ@y@*8u^^c<9*y5GzUL*C926 z2a}H7clplLQXF)pz4Aj&l3^!qvR*@(SG;k_r}w_Z$;Q>cmbjQ9F!mCRnz2eHbYb#% zW9lqssCJ8$WtIuZxsRE1N|x5gyY65;bTiAXO!d{>ZjKt_ljb$|fh+;%%s!nLV|^QfCF-GI}Y_;{sRT`V!Y9El$s} zln-WRqkY175ZMFGjp~i*^46}?mog`IAUP6^$}1K@;91mSH&DfseKKA*)2lPV^$M<< zO|8eMdLed+Q8}-nn%Nn)e^5{dsr(&>b)|vN0*2TP3S3%qNW;yOmrb+b;EeOILqm|F zuK=3hQ&U?2zx$_MNfjs0Qnr%0W`(LlJ*&7R1~uz$F*C-Q*0AtNh_ZdXSHy>pjfrE;M@a)z~2J|ULW2U4#y_uXFK2n09g!bSCn9}icvp{J z&U1a);_cm>vc$Ks!n_d*>vn?rit_msbgv)t?YLI|*nkHUam;NM$w!0=$>kfuxE^Fx zf-yO0BC*TQ(7T8{d;0PeQ)Cq%{Cb=%Z2raP5t96}de_!?yF{7Okx=mgIiJk-s=W5E z1?`6w#~o8Crjt~+3uq)X z(!D4k!NHO?XEx%Dse#>jYzX~;Kuh=Rcx?Tvc@ecwP51h;Oq0}@O2w+2xMYZzZKT5& zpbpf$hTc!ep%Hc7)Pc%-+EeA(=*OBzAdTsudl>aCI(LT@-q$b#yRIKuuSg&5mUgE9 zk3L(Lsu+JgmX_$dsIliNLn=h7?<%D}aV2uhYfn1L6%kNhT97ZPBe_v`K==g`b%(%pYcPQcd>D%H8~AB2?T zZx~4Xh^MXVsd*W5>>%7p$ef~uH2XEHI~=9vIrTMDf^XFGl}OMcYaLY@TdO)Y=-E#d zncTZU$?!X;uQJ(yJ`6;B~e7i7aIc>Z5^IE*RE=6U^4QZygW_)mGuwf>!-3`9n zA3;jtTNZJsx&t#@V)n^kWz^8V&a{bDW3-W%#D8C1f*`r6-my(ljas)l;RxslgQR0n?9=~l;%^H;$x~8=L zAum6A$JjK}9NwW0o*|ZH6aAF+F5c|jE?j)-M7|48%BBvE=3m72_NhvE0 zEqBjjtiAP4|G4_Bdq{-l}6!pyv|-8~3DjS$B5)C2=vwG6EBI&8P|5)un=_Wm!mDp)Y#`|7t32#gQLT@ z3?K6@=(1p%xf@{X*LI=M7?fX^S<8YO{8N6kN!eVKOW<7D_BW%3thH&OGZee;No&Pq zZv7d0#n$%af$1(>M`=2FDZREXG zQOgbr`-@|GudPhT3$TZiz&&a7nzosMeI4 zKtsmbB)K9g0cU8f#$h7Wk+3k2%h`Lwot1lvBC41y{&ifygel-UWDS9VpRWy9dw^eG zC=GOOK5i`e`GQ247(8$I(tsP>T~}86u9l>B{qizeZ==4Tzy9J~+f7;dV;f1mri3o> zN!ZVesspCQ-{eG33<@lE8pNMpT+wqcVudpe2YGxf!&^osYiWcaGPj?bPigktcskNQ zP*o4JDJ!mKRV=p`O=H2clJ%0ey;ftoaCXyU^4g)*Nie)m(vrxlm-MjIM%gAwVtK&F zze(~0*_PPuI_0)gPrs+bjQyz#$KM63KU5^vx+%OQy$d6-?Ahb>XM|n~e<)dLw>O1I zI;D02P&;yUB2i&pyXH4QfltJB^+a) zhPDKl@3u)2OqD36lk{kgJvqCYEi&2K7_4()QyRD)0MZ>f{3uO^+WU0f%8)rKE5%qi zJr(WZNa_#(PScyMs3bAZ&SSZ=GPa$9(GyNHj{Iqz;K>RMbTiCns|82YZzRoF+BYA@ zfJl&?oJy0qnCZW!XEsOnZ(vSFE#b;tDWmO(J< ztG#t*MO;-1-|93bs*Y?H(ZWY4&bcExBzM3fe~E6EYamT3JlX{OmqPt*aA`+{?KGVU zp`%^r`YR0FqJR?%aAya_^8B!eBGI(HFPxZdE1a0-G)r zc6&WB{vywUH>^0gcY9hWB}d;n#~3u$l}E)0%nl&X2w9IWC4v<032E;xR)+*Z8BUUd zPt!=wqP!0-*wM6bkl0JG}ytq4vJ7t^UiyW!P zv@-Z=-0eZ_e~l;D-C?prE#7#fb(}dFi0P!4~?JWS>-`?vNb1 z7lY?kYB^r9_y`^n;W@> z1#s~nIYR>9eLE-iUL3auY$uJ>5kjTpmWx@^XKOJXXn{w%__I`af+C3hC0!hH)Hr`f!x4 znok*0pH4459{$4m9`oFMO-A9K*X3dlh~54)>9;mVNmm}-OREKi@!c#{1t0fv=E-<8 z4}NjtS#cM_uMG~hDa&uUG)i>`2us4eR37aeC2Hvjq&fL+EuHld ztpm@u=_vt54=u?i=MN%YxG;Zlz__sbjFZHQ^ui?Qx_xo(GR=#2@lW!bOKwtBD4k_6 z^oVu`QRd>F9RZt7f4Ulqp;N;CWS61LIZ1p{p8MV!R-EM7B)+d6VF81+x2K%k=nRe> zoSZJd&(H`@e6Q&jgVXM3OdlJgLc?^sllQp1dZfYK40RIhJI3G3V5Bq5Qc8M9P>5_A z&eM7S-6T1gtC_E>taR+xM_egtCya-qGdDd(OloSc4OVYI#?$58cV2k-`JgCbHv`e^ z?EyZvUURS^9FnXSrlVbQ30jiFY`T}vPk3J(D*L*vbaoyX`0jxF`ut{XpQIXlN|1(* z=x=bLv&0XlTXv4{8NP2Hc51)04>0LDG7Rma(+y{O0abWmt-}`<&_w|npsnYGCbJSsK3`RE;uAWv0ou!a+RUM>xHnT7sE3m zGbRhMd#W4}cx38itiuFMPW!EtQ`T$Vfnoad_w0Qy9oyK^KO=ibPxm7|!P~Z*T54Nq|@h zvC%hmr4UQ_-ty}5H8-tB;(a38cQ`(otTt9#PO8J>8p)_{gM*_(nj`$F5}MP3>E+iZ zbE548XJrzJdhBs)+p2@h$a(bFk^jux~9*QB}6`tz9J6gQmkPN~nhVi}nXUXtgOWkO9o>?U~iT8~51qKoBN5KRbYREq2 zGqmRcbQTa^)~MOpdE!))H4AT1O*MV9N3|aAZCa0i6>0M8RnFNQQ!RSXmw(wAt1YEF zZjt}41F>l(UH33+cW*#3E61aTfv}Q2M?#M#0@EIBu+X`xvm1`D=^K*Qnnd^HoP3nt z8Ilmx4YZO>`%OO#EHJ#wz&xbS2KAs2J0>F25>7c9lhWjh7D%K3YD^xg{y$r z)0rEttDG+q^WQSm6$JIq#(G$!MQ70s_kR1vm|(HCRJYp$kwi9(QckMQ#Ad#hFF?(A z`J`%9`z6V$He?_fv!m2p^6Y;A_zlg?)KYJW&hl!$IuettvdOGF26s6z|5F)+HGiv& zC#gIAHsr|qFS|Q181i`Jiy~%@FD3tz<5PN0ys3YJB&S&PrG`lH5bTZUHSW^@M0V$dUGKimVWN;y8Wat}E(|>@Gp%d*Rd#Z`gsdFpBzi#p zBbO!ze-KsYdV0bx*AGjPCyA3<=VUR9Ca>%2>Ud=nS&3~i1{gl~mS?FQogsR;_GnT& zpKbHzM{k>)v9Y$gNiP^`Ry5u<3C74`cEWUOG0>Jdb$`31USPTGJV>9|aIiVW6)3%1 zz~X(Ys|4|jWEaMrboI<@%qb=LS`bAm5mQ(?cRU!K){E{eI!Ih|FlS=0U3)7^foyi> zlQl7ss5$M7Zj#`_*CHg^jQ{H#lQ5Nn-_`8nZ`ssG;Kt)jyr1=0^V4F4=g)S0 zk56_ss#kmFKI~$pU+uwU)!q)9Lk-Ew?Ozm*3h%hnR{|aOI@lbh#p#W&E~>DsU#q!C zExSuZo$TPV7W$dH{(9-crJ)8q)F+rNCDpsGy~R{I&lhr5Ur3^4taB`s-&UCT3?_J9 ztg0mM-oHq01MkbUkcT(ICTAw@@7jkj6`&BzXT|&;6)QDqtPGa$Fv><^C#$D!#r_x= zd|=foqcv2>U|h^yFn)!j#?VNc*(3BLX2`enRc4*%udsTF<;?RqJ*aya42m`PR{EIj zQDWfPN{z44F?Db6&Zz^paee)&4q_!D%!9^(ZHYU%#(8Re0#JLCCXe?9yj`tD<3Uo= zwUh}#?{GE;i4?(bd6fVv!UgYdw*#RJ(~S}}w;T@ihNu}C;+#Kic}m&WIAX3ikv|A= z8Ck`DRkm+zCnP=^U+o`%nTcS1UC&jsv@Fl#+Q~EXHAFw0(}%=KQy!DasqPri>{<5O zBq*iyz9ql<+7s0S*TTvK%!im)O`vWpcz=X3?)dFR)E}Vfd~N#MfiW=H4o_1f_{zi`UWvQQ=?+ zjQ6Z_522r%OYAOCv&A3yn`#)a1|wh3pJ7hvT8NSxue1O^PI}Ei67(jj;8I z_v`b!bay2f$?Z08K_?d8eY+RPeSfZqJZ4Ig0;y{i8w8y|DA2j6?Y=D2quaR886{o5E)yA?>S|;PY(c$1BXZEzlApN~Pb)OcqL=Mlq zVY4G-R~?yK|8wR2A1qZfbGVud3cJ(-KYdRpSnN*!s-4&rFTTHM5;_x$IjsoUT?&s} z8?Wy-)N$c#=ksS=jBJfLnY8iDI`!&bffckq+qc#7WGPFW5UkaWWZU{WuC}9R?rXLb z%Gc$b&swi#g!wcy&8oZN(ltq7{%kFUPfofZ+7ggmSN>7d#HYd)X+#)Autc`tkv(i-I&)PFgrQ;T3iHfr}<3~Be+ z&)bUa4|e~6e_kj_YB}%JbFBE*TBP)N1s5kB>-o5jl#RdlV_Z}zs6RZ=Tph!J8#sm^ zrkM}I={)O6Y*j2ZPukeFX1WV-D8oEDq3IekmF2u9Nt4O?n=T@L-wY*fU9Ks5 z?&>`F()m7<6 zBq|d0DM-~Qh6lVn*aMm~>ms+7fg~vP*(p&Wc3JnvP@iPz-024TM6`xe<0bTA#v2hkl*C-9J-(k64$?rC)z9 z6gD}_mdL=VKquKBKhz+vb1^sB(l}==HV>29teofm!#+EG(q_qWgkk4;+FOa+(EAHP z`cs}D451#_C_o*S+Jz*4hhegOTM}}noPM$Sar$#NQQ;cHLtIk4qbj*`i6oRyxm)1s<_qU=x2zEP%%~TH7AuUE| z$jEXrl%Il83eJNgP=@fX>GQ;fSJv!mrTGn^c4Ib-F-n{H}@B?|YX^u?@W-jAe74GZcpV<&6 z(=KVr&UAwZO}OWB1QRi7u>QBOT-tKh4O>!_$@cv7JAFw zqnAf%loL~LQ!X-!AJBf&4|DojBEph(5K@{p%8i$INkM};`JKV=6VuquF2RvyzwVXgem20n_Z01<2V zf}ZjaH@d~*etQxv1yCK{cg8Tmt4w{FvBNcKy~lrMJ0@XHd9)5P%xJa|{ySaV1I5?p zT9pS2s;V|7I}I(Lo?z1F2oF!}csoy3(8VcoO_^!1>Xzxs!J2nppJ$yuRF6g16)NQ&7i@nl6sikP_)H*4i`^ zKV!(2#q6QlWHaDFQ&TnlBx52e{?O~H{;G9`=IAThwwBA6cYpotN=PegR9BsT@@_(F zOt&Trsw=wth^?+g-S!tbBA>lCeVWMnEI&<=biQ(M;#D*{n60mvm22?+rx0T{`%vj! zYMyu^-_qrn6dwRM?p1cIInXWVKSOeB{!!?z_Fyzj`ChEEbcAqUuCULbG$pQdrxTI+2I2wF$?l zoj!JAM6SDg__4XnuDZGOE`v)vVtfDj@W{81>``I6ugv>WI~hjDenh3_=!k3Qy5NX$ zDj(#pF(eb(!8dW6+pBWR+17R1I-4cb)P=4cBQGY>I(^L>M?5zS;3d0_EW387a4(q; zIroB&W1HEs@asLTo3>}HR(8ikXXu>*%uu?4<~a&={tVK0z>T#&8jGLdb9KX#M@EG= zP6k+eNKH2a%Y>2zZYgO8iigfD0zwYwJJ6m*<@<)hzEa;@XO!yhRIATprdEnMFz;CK zF;3|iTC}jP|P8ZTM8~0xMvxMdpkt<`I@1ig?vCQse(>&TA`|X zHSO@bcv%I%EdMnI@u_QuYq<}Z4hUt0Vsw)8bLrGX)H0558lYCyf5<)P3w687p!Gu7 zm|B(D!;3WJ7B@QyB}JJ$Z4xzkYi!K(SlBn_96F9yloTzdh-j^*P3@-P)ricv`E(2g z*4U=ZJLfIhLdJ`co!r;@#HZJsXp-Ma6x3aIAhwXFNvw12Tw`CmSbeuqVo`jnPj%MQ zh-N}Wb@ni1WA(>JH|Y-5tFC)=$&5}uR7+~t>MXoeJISLuqtH$!UiMjZp(t1)pn%D-pakf&ANRI(JTnm5Rzz#keMm_x5q{$Q>Vc828aOnOBhy!8wZ0iKZwOb#YGFIbLS> z6VN+j)vS_-^(xKDYYgTsTMF;wnr>yMdQwtcPO-;4{-~-_6VIcIeo?Rxj;c6l=D-_E zr88r)4KB^CVkDdxff4`g)4$Jv3}dBRv0$Bi)kE3tX&;aO<&|+(u4{B!Qc2V=F&+L; z)a;S0-jId_+x`pH7^;D+Ja0SyEcU9b&+cXElc82`1;l(V`3oAw=&6RWy&oEqe-c<#)bMt{u3$BY0YM(dXr$uKZ7@dgT6O zm|F$FhPQ95>~$_T`I==M6js-xSpydd+m6~a`SI8JZNhy`I?FKXmoTZ7NyD$HOI z6sl$gtx?rAIG6*xDE1c;+ltADe>mX3XK9uDDM4CJjB{;iM2vlv@HRWW?Zx)$w$`wk zuX(2KD&Ajaon^%JIS%;kGo*x={i`RkSHH$pI9v|#BOmt8Fl(}US71>C9diSos4EJ%J(oGoR!|eyH~PLm^Tom)j?QK?c%FwS zar-vl8vUKGEXJ91?{Tw`mW-N<%CI}n#vFZ1q|%R;cPJK~YG={8fGTO)yX>MxkDWC6 z6KLf4LTCl%(uSg1Xp27f>26R3s(39u_Qg3z-Ol6R=KM@fAxC8$<+k5S>ptd|0inU= z1^s|HN=eR1D!!wyG_gGaz zOib1ID@Q{blnax_);5L<_8FOHPvn13_=)+?48yk7W1HhqON$P5BG{;GyS%#U{Y8Vq zo3UBo9{K&E>JBLKlFmSEIEh|&DC(*Xp3h`qd4tBUKIp`1dA!ADw^*AMscG7CS0M|c zdl+|V&G?Tir{8ZZ$o;(*fYN5eNSVfV@l>tgaG@OC$cUEZKwdLIlp$<-92w>jkGyLG zRxaTNj(s;QO%$$>=zTyP1@qy;2??eqziVqjl}CbpUR`QzR@5K<#DJM!gWtetYjtpG z(<=S&#CPy~_qXW%pF&;p;9_DAMVvA+)AP1RCk@IWIR`S4tqGbw9ho{u6=Vsk&CPmF zC-T&NO=+hYb?;~vK19~3H{^+q$+5c`OVvDPVDxTLqnnO}r{U>*>RfahuZnSQBx@0P zE$dnQT{qt;Yr52>H?C?cO9oqtW!n$Q5bJl!cXKD;Y_kDf^WlW`BQqOP7B0JfKL20rT zWiMOCzK(qv#x5b-h_TJcmKkFSGnTRR-lOOHec#{lyZ@*;95dH_-q&?5pX+m-_j%5Y zwsm$KDAqJG)IZCrt1UA%Z~;qJQPVG}c-?pK zex-_z<+CXWXywvsIkLh7DadK6YIED>g9kMn?Ag@0N;X7>I1@?IzXP65ZK}#i4v0%K zu(4M@ERLZ=DI--W_aZxgV~~3>ywXEUCq9iW{;y9Y* zeW8B6Um#7^0x~(53riDu076mm>m&Bkt0WX!l+Mo)w{1=Nz%6pTF-YtNocqKfLbzn?UB8>YrA?GqdX(RX0ss zeyGxkJGFY&?@H!6t4STV$F5Ca42~A{ex;8eWl_=^C<^ai^6TH`hfmjNFnl*pn6jRp z1QoEyvHLHGhHGft*5zn6+e-ZsLCL!b)@wa`UC$#c^F~YCr_aKDROjV|`w#c^btoD9 z*7+?xjD8{|XxDOyo}}-u-gQQ>#-VwWzNbu#W0T{TY}kY6%PAT@?a|Vk<84`5{&!hN zHlMbk&Uk$6YaegXN52`#wDTxCH7#{+jb&BTPS}BQ%deB?`;M);W6W=N8!cM>6wP`h zWgyd6O&E83iMt6m!-AUnY^>DRbb3q(_A;pUkLtW4``Y!v2Zjh$i9<@HvLc^esGni5 zb~f?pS-?8nGxX%ZFCZ_pN6ZmU8CScPCse6iH1%lmKgTs@CsC+0GX&2|wU5*lfEV~` z0t+E%aa*$WroGjZVc_HjXcBIla>rkNBc}drdh9yGka}wY#$n&-6Avxy71k@is*tPVNtf`xQpNYj&jjb(`UD~u?z zEIV-M$0q-{Ux5CDX{jlR^zbh(NTP1;9d27?p&a>9h@%&ScZy&fcbk2`1KY9W@ z+`?a%)Pdu7bNxv;%j2A@bOG=KPgt?<4jo!wobhur3Ng3Q(C3o){hnv(-(fL)TJrH> z2euONb-2~Ld9t1m9nu)&b=&X2tom8ES>sFdr!AE=BgRTj=jo639jTa8-SW~2!k$O_ z7O9^=uPCNM0~c=U3I8vdY#IB!;1?LNn^GB&U>e1ePoEF?v3F$C6GQgX|JY|ZP>uP{ z)+o>>{^zxd8ipZ*(EKmj?M4E1a`>>Hb3lH@Mz&-0gPd5;cRBvsgr3 zeyv{=y!g+?*yU!W?eiY#hmjb)khYHr8{1P+yLZPku8{i#_9waaTeJEw|1(z5FcC=mbtV&P2 zPQR}$h-`A@cIwjU8p(h0T;Qz6l0bWvX7g#wu^gXBy>W$@&=A{olao2@$`f$>n?=BO z$hPkI`?omBzT*Cn`jF+ol1ArlgGt-#6&<1uZB6a-pRsi=PK3hAjBEV!9q|~+m)nQh zzNY<48eR1^S4M(v8ry8qr?iow4?uyv^RZ9k&&ZYHQ~YQ z&bpRly~qVpg)}m2YIy@==I+Zd%AiMozBdc{SZ_=uXoAPGF`Ne`>?p8h1@Tswlr%CPp3uzTzYSIkNlzQ>y!WW$*$?{t$NQHKR4x4kMGct4ogG|1 zpMs;2IVDN2Kn}=cojQ}a$W;Hu+L#xaSx>Cql228f{UVc~1PxT*V7*h=+;x@NN_5vg z^Ja^6MNwo=aU*?z*?idAV-#HR?4E&i0_$U$t=!+o7nRJq1dMnRN#Dbel^=Ki0g2rk z`&MsCtmSE`O#-uPG=SX**g;FoZDEVX?$Y{w$4jlDBkQ(ZXY7{B@~L{_b^M>6_?&l` zo(wp?N`DjH6MSnXEBjQ2?b#PDOQ{2nRy|Qun_h}PJKGt0kjbCq2RYe2m*=I+x%;DZ zn3?IrF*_slrw=T2ht99R2Dp-&W0w{0I_~nc`yQPN?^{kF(p=h|R`?e@`}4i3evM~D z7hC%)XOETk&nEZ4-vtBY63-g}kt|$P`Qg~F%)aU};#*BzL~ZTo0^QA(sZBfmV-tFy z8|GR`^r3qhvW`|5wExD&{x7|v-Zk^z#LFUoXbw9{vvKVO99D?WuEk zk}a1Yn^kjKnQ%6EsxBCP@kQ&eal+Y)yjiwxR~L|AV5~0HYi6TTkmvc^FQYThte-8s zX>RNP>O8PBQjmkQSK`L`7RVYct;<`_VoVtYkQb*~5VS6!0It4RJN=w0Y;l~8l1(sE zP2=*GYXl~o@tTU++&&r)>Wmz$w;*89%MLcq@@GX7gy?*vxJ)M~Q~vWtx*-}OjYV}%`{;Wh;bPg++pV>&Ex6@KF{}$R zyig~{GIjv@{jAnq3n#vBQ-(wck6-+5AAj9JfFf%YY7xV8V@xIIMM7z(#0>2r%b`v8 zP>Sa43c&5dhzc_s?Um!=ZkG4SS6~A`+w>FNzpMLq2cW(D$iT{3Fxe(nfDK5_Y8@9s zvbK4w6FqxN2y zE-dVyw+a5CyKfAnSa+tlWrURa1)~Q&n@yCjFznTEzk?L9f2Z$pEaX{ut8pTI*NXJ! zmbQ1oU=+S%6eb0@>=m!ZrcC%|m zP+;buXQZjih%oU=vxdaCqLfZqY|HWK*fHBH)`g&g zA-Mv6q)yh-k;9LEAdd{8TDsmT7T)J3$%O_^AGdY}S6#Zo)3>K$<~X6l3u#jF$^d+nH^<`H877M-Pp>% znCX_=q;P2Y)W_8H$Wp+4W%%7H+sw}Jn8fk(S8mTPDY0F$(wnWKMxn0m|1x}UX{1cX zMs5S`?LRR4*b=H{6`(EBzdIaHH~;H?GRaJW?!4hCaZNar8*%iqG(Huq6SHj&gU}-l zOBn}~K~D%U$DOW;B(qvMy7iUbq1vyaQ_r^l{S~X}y88Veo08#>#eoY7KPGqY%o7HY z1tQ9J)n^GzJ5W}zf&d)hGLWD#bNw*Nwppzw=+}x#2yOMi?}FE@FPS~;|3=H^k0+_f zUQ-P}b8SZo*#o36j;`Zc&JRx$QMCEzwX`{JxXz*!e7{bpHDE9_)|gMioL-?a5v5xLv6LtFZ2suYugLtTP%Q`ay#Dj!urmRlA6Y6}N+J9Y0ptlx z=3g1TFQtsJt!Ld9SN+`1!wEDKy6VAU5jAf2voFd*ey$G)FqL&n3FS)*er-LUSXy%h zsUA)I^)!1);C^nC@#|`SKqhiTF(WqS{L{db1v#?3rc zH1v@WUjd$#?_is>;a9Ka_>}%>p$*8t?Z11&qo%wh=vS}FjLTts^!kQq1QW4D-e`C& z<0!l9QkUcVewif7bq6$X0KxnhQAX(k>aqCMXn`mXq_8q5i2GM27fSLGQNzb-+ zjZwiY+NadZ9EV0p);!(Qe}7DuvhCe@;>{XL6aSu+{1To_*^SBFNzHxd)AR6Z03o0s zbQ@ck0;EV;C;3hXY&*1q)c2Rz5G}% zf*<8s=Y#$jE~SaRaumC!67HJBz5T&C#QH=70;vhOz?syj$97Kt_P?&X%(wgKze?L7 zxOjh$_xhdWH6Z$sZcpay7d`%oONTU=c(@NdZHtnZw3v#JXCXj{X*9Q1kxM z8&4YQUzxrd;%Kh@7W76|4qxE$xvPOV2EWee?a$f=h;s^L3RT8 zZhndS{lW=*4a%47nRkK4=nb1cA_h_uz$+)zD^*zXfc+~#|FA{QioW*VF-dD3@fDrS zzf5!6;lf~O-}I1q zkDG3nAACBNwdQdJXTX;)1)L2{v{L5~?$Ka23!E6GG!+EFPih8V-gZXVp8i#tIbr#| z`mu;u7$Y5+KcK##>QcZNf`7^WZ6H4k{3J66*hZ^&{WpV~mZWpa2RYx5_uNz*@6Y-P z3&VR}pkBh4UQpr7DL6=9!zbo+0nggcSFZxk-c|0=&}ECNecdxid1xo#$#mZ{$XelS zNI_&!^j*5+3g*kNu^gHtN5L{ePD)#eae%QWl#x57u`vRCV@sl~f8fs$E<>)@c*!X< zG}9XqBp$DN?%z|dCgNrIv>(Qcii*V7xQh{sZ>9O)@vw6RtECO3`aYh1_gLR}+GqEE z`6At?^Utp9Mr)iC{8#mo;hV+vWw}swOTyPN^5eP6P!$Kv`n;JV zRDE1q8BHmaM4+I*AM@;Nl9W8@#f=cK+Xce@1H3-RCAfcSLGGR})V|94w?(96cV`|{ zh{f@lQqkgVjG7OzCplApORJ<48UK^$7=Ipn;H$f93jyPKtzzB++r~*DDeVjK_YEtml%!)MjbFXTsJpVWT4y zT=*dInK$Bx%oMC2N(mEdYnhI$6kEo;DA%|c*>^;=>yvtq1ZSTpKyA9u>uv}W&zQH0 zud*l1$Mq^R$KHndfXp0q_W5)-1PQ#Vc8`2qn|?xpGPE?Z)JkSHasvPIUVhCe?+)K~ zeDVs{Cn<1lr1D4)ouNpxDhb?qF#yg3k04>Wj_;%8B(w&ScKlf>W5}nIQiW{rl=2_KS6IR5C98g2aZh&Yyy$IjhR&rl$h+xyUl@ zZJG6~-Wkfd5u+@g)=kz7l7nf_4Q(eOoO zg`q7k;hQk47rzMZaV+~<0O~{NwS)H^gb_h#eD1VFD3T4{>PE;CJW3O68G)_Y;^F{x zwArC*BgoC<)m{i)d)hK5+iSV--%p6-qmm^(H_>o9(#Wc+p+Qva+Rj|x{VZ_a9wUuk z%!gS2B^UF}XX=MFJ8k}y>;yV1XZh0U%1oEH`1nk3`~_`WV!I*FH-%abI59mJ5l_l_ zh7#TXq^fNGkg@zN9*MZ&lW!pI!v#xnCw>zansp>#pS8Z|k<#hb?WMR8NM`ZMda+G{ zP|F)i*9naA$I`lgjNbq+@YB%joTJXMYraz_(dpFY6U>AQUx0V(6TSeYbFCsQ2sal= zxq9ET9}DmEN}b_T@C!>2Q-ap03MtU6viZ%7)*`fz>r_?B>l_J+P(orZ0(22{VFabD z?!V;?BZIVUd!Tt8@yeB?)Pxp4ylZ?&Vy>wKv}Y>{o`Vg%m`dY`wwJ|Y}7bx=>Hc9PI@_nkI zBG~95uI^9a&0r&@mPu@O{TbXe_iMS+n{^AqnN@I>>mTi8?k{&Lx|ZmdUnEKM0g`k0 z&D=KKbAG7bO<_9f4Z+_Q8La0TUwb%PO@=`2zs>GCM`lW3L85eJZ}E&`1Wu7b!P#>o z2~bh(-J~5YZH3JEA??pn89KTVIs~KmSPP1aPRNt!EFt2O8T|-1)%dIYP^22WpwSr7rr*VU-|{{4uQ&R8N5<% z6jb$DrQy8{Ki#%|$!o}R&c*K@k25e!0)&#}90hK|8EqAWN6jHr*W+PdT`U5I=beU3 z>@10@*ONc1>gy;czmPB}sUZyaI7l zTD+c_g84W7-s6m=P%v>YgI$2C6isYYmmqNc`~WddMyzMLW7FIyd?Q)sH>e0m@MHEV z5jy1AoX9=!tJXL}@rf54$xy7D?lGmMcz%)wycs64p&-F9c|J-r@>pH}SapO0&Td8Y zNFY~JG^d9^Ej)04!^C1$pZ#bSM>+y42q8p>63p`Is)YY`Opap_Y4N&|v;|)pw1^BT zFoxle&8xu4r2a*cm3FA!_<8M(q&Y{|KeF0?xX^#Z05|{YL*NEb=qv0KRU>Aiz1jn7 zp=ftZT6O>^RjHICSPJ%2;PhJZ(bqM_)l}Qj8Kkf1u!jcuJP+@pEhqyyKB{F(c)(8M zN3S(=I|Y3|T9^e=HpRmnx0~w?G^a6yjL5<8kd|U~Q%lAXm6vF>UW?>~ZBP?pf($#p;rUFXyKH1 zm|A{@Jq{$toJ-C0%~9O_HAFLUE31^m8qbryAfZCDN>{>1aWtjf*X~7FP(BL6NE0#y zgtwteZWD(VD>tD#lU#icW#m?K)XdWQe%PCiTZ%jD0zdT1c)0fQO|ek$Hv~g2hJQXu zHN^i*uxXJ`KBC=9(ueOtP3r12gxNY}hyCcA|Jy^b9>m;SMMPDFOYrFK^FP>YB+a=1 z2ot0rlH_W=Z<>6pjc(PU>{=Ybuip&%6;lE{WU7+#vZL48#phF z2+O-mz4=-H^1J%M?X#4(OmKUxyR}soFg>7WQw&j*eEWh z%(lEPT*f}R><4pM4n4D`oDwQhJ)F#x*t{UR{SGmB2&7CQ5vEMxxNwq3)X?{U>Sr@SUD zXC<|*Y@1Weq!eT8UGZM8jFk6*Cw)2V4$bN%|Dlv)JE5KgX2{r1oS;Z76JFAe$0ILg^lMKkD>*h@5Qvt z4<-gnFQ-bCO&??<==O;pF6wwyJq!u_Kwfxu^f23WX+H1O*;a`~$CvmuRYGIS7DDjI z{^6gMF#rewm)|{Qjng#mZixnP%NX0*7MIEK^7>24@bHdh+V)p7R0&_G@mNaz_+WpbC=I%*X!nOTu2NR zA!JDaF?C-8%1^bUpKR1xEpI;w-b2tX6;-0-dD)BWjdSX)>Ra66>*@x2xc$X?Tyrl4 z#}nO1pGU!_b_^u*q7a~aV`Z5#O2ObEB$bL2&qT!fm3pIwbK=1Xq-a6dp^eI+t?<$i zOG`nZSr4S~DqNU2d9L-UoT|@m)ocQ}ux!+vl6$W8p68+ZN3bXhFC$lkZl3qiH!XS> zc`YQ`Yi8`}acLrV_G$U<2#vWEZ+xvP<;g{qXkLo zn)N$XFM9hdHDox%0uf`_!N_3R_2lZHudKBz+TO zchgnYzO6i$&_qW7FfR`iKaTX)CoH9J6Jcaj2!0oKh!sC-u1Kz1?WJP#Fc7sj){D<~ z?s=S@*RT)bsDhelPzsq)jir2Xj7<^R>uwYBQ{(=R&neH}OPJw(u(2vs78|2SV6A&6 z#G0%F;?@oSwi5H_98H_ME#ZaM@z&@0Iq}xyanTVWEG9vD?L{<`62ZL81g;`Ph}EMb zSIs7o%|7}lR4I!trg-laOm2p&Dl2_pR~&~uz}vhedGFb25pbDXMTR@qdS1sLixMLE zP@)7aW{5-{X;*&PI&qxdd{d9Eb-4DpLP(!uhcSi@sINxzohvKtADWf7XFA9~n=Cx& zdt<)V{R%0nvsQ}Cx^v&dWx4EMe9pSBQGk;8BCeNjuW;wlwCwk{tbMnv4cbWJy~**V zXhS)3D;S$0`k#njF{-+-n#6ddT1clVnmteqbb)5#2FTy%xm8Sn8QP`uwsXY)@QB& zugL#hG!0f-t(B&Jf}SAB`|G+^qhU> z@Q9@J-B1t_LZ4?}l$N;Mscaz1t)n3>nAiCn&94Fl+D&#K?e)fWy42#WN1b<9V@1{V zDG#%0IU*WsLMGzU)O-|iec4*#+>+cY5xC?-Bb}Fnlxy0d!UT462p??dBG;4aR)i%p zaf#hL1RdekxzNPKIMNc!mWX^vL-{2vA%Y$)C4p*48e-pR`9J^)9n#R;rj~Fnba`7( zE^=eohCEbu<`^$K5M;$Wf0CP4EPoxW^7d!WPS)bS!J|;6M{znNs}945(iKl7^Frhr zXT5Sy`*~FuI4ZL~&mod1vp1cN*oF4$&(JhHx5bXs_Jg~k(ns~5Mz>-B)FnZ5HKp7i zuctl^m=FOTK*u?!W$8uNR~0i^alFA@Borm)rjtXej`gxJ)uiC7$Nah@+Fp4nwPrdo zrwOh-Kr9GB7|mRLS*}VVCgIUT8!HX+dWzJASbYc-ilT7}z|SxYm#l2o>1-Ryo!T~B zGsX{8qSKanOwIsxT`$jsv-|96UMYcQ^e{O!Hy285i z#f8jXnpz*D23t;vzSEjo?w{d~4cM5$4*H#@u_1W&jLSH}NKgBvoXARKE2ZA&cql0D zPMla7FO~%!B%#g-bt$`qJ@RCQ2T?)54FZ`9g4+s&NhWFcAAQ@_26MboCQiidsBW;L ztxh}H5E4syEhvG^ly(%BD!`t;6W zcGV3w@iC;HzQu*k^IiYYG{xq1VxN&;s^1MUf#LbElZ=f&Son~KK2^9r=GpifBLzLu zVm%84X;!8lDy1!DaZJ9y`KYQaGKRY#DRV2>c2@QqrPN+!-1Io6qepO8ZNFxCjo;Bl60`RUD~E`$0*lhZNp+$m?bD<>zgYT4NAB19mw(Lm$;e9l z+q2H>H#dAuT$RM3YaX4RUe0QQ5Lk-M>wdx9- zyo{(aI+U9@38E1s7jq!~;!#s>563;>5 zg1b6e>k1dxB!)ms?S@#jq8sc9wFYKZhyzZ*DRQVP%+%J#FMSAg?_sQAgIm)#x}d}e zmA|}z5Sq79%vd`8^}YvvOPcX7@)Cr7om@Fo zu+{=KOB0@y?X@3Wy*&_;<|H;%ae3YQPbHmh_{4MLAN9k&koX^yy8%skB1N)zh%lPwI@YsAC8;s$czL?HZp#TNrI|Fn z-A~BfU`Knl} zt9q!Xj5@|1^_w$36GR+5;?^D#ChVF+1SkDr^VNnHD_Tgy+`F=od!-47WgY|j3lb2z z3P#wY|Hn!(CNQvd`KgyhsQ6~O;i<2B9zaLTH4bUs@fPhmk29(cuF0y=uNFVS&?i#i z*v8T6xk(lL^OzQ9IPbdlijIdaq&UzrD6+gDft+%Vn~`A9vl+k&SJeJU_N!_k4sXzA zYtbEKuHMCdbZ>GfW0}iSq(fn}A%42FJe zFrs|)ADRi34vWtzJ~CY9n`we7h%wA@r;9zjRfs_N<;6`*3c=Slw_G~%Qapx3A|;RX z<~D@-!-0)hOWL9e13!=67V0$7NX|!45whzG9Wuen?MR3MG#1TGZ@^cFIn}&3$FWrv zBriEEQOaVW%H`05v5=NsC5cr#TuVSh4O`yJ>__wwbO&D&E(*yDC0RiAbBMz!SK3DgjcE%F`(J;(lC=%s7Y7lhghFd;RE>)_MKfig8D^N63fs8 z#a@MEa8+R>MLG1@pdK0u@&_uq1JZCh9ZJ%KDY+Wq6|{Z#8g$4{_znQ{>$2Ohy{?%M z{J^$<9UHVDkG;&3CRN_zQ&CNtX&;=wybaq}6Xcf6>rw>h&YSOi3+7pgQ%4aPYe%!?Zqg2OR?`sl`5FAJv_m?=*LGu*^Ow6f(AS(}xcH@#E zuGJe19|Yw`VbsWZvLCqFr+0AEPV6+6+xS8YcR&L0_X_ra%Cy7#FMYhbK*cb8_^>@6 zYQ)b-1QoxSwUW<-+Ov~T-$;&iRHLAlG}yg{?9geuiIDq-m6QI!3IUG+8~XZ>O(-Yp z2Q4=qweg(FKNaNnXE!;|=kNNSYhiBRrryJp3x8MXGfPG(Cm-Zt%+c@y-%u-hUM=X+0tFmR?PDM)d%BDWLdsS+kAvK4!PmIH>^p#h5R-hX=goJWwW znVGWq3&aMd?jHTWbGN@qTmow6`8+p;0# zkxBG$6Q5Ob?gutVc7dJmqwO@7t^Qt0(n=D3JV%7^J>z+EAZSil!nYoOVxKlosjhA6 zUCB)Z?axM84f{aLVVqx??b@drY#VOj%Q*m!eebiwgv?}f%`pOy2yLlCTCHG0C4$9s zF||9HO)gdn;1^yGLQH&8!`JM#L+=B^eCGm4wJ*1dMVmik!Z#f}l_ekXvKGrB;{fgYYvq|$`)P(D5 z#(*(tD$Y>(X2T%N+E_DBtQ;oo!G6tL;5Nr-op;WGEz7(&FbOEv>;zI`r#!!&vxIue zkF|)*=wT_MqA+1)j{n%RfJCxHAI)R={ldEb98rfnPaSfhWBW<_euWOHed>;=C9h05 z<<1%wA95v@QNy1BSM?9jAX-{31*pK0#N~xX3ptftvZ7>mdVB{7$K@ZUrx}kUygWQTcez#9SoJ&o6E{qQ@e+}uLvgF8Qh^6ze z|GqG~9^rS-ANXO=>`eNRm3ipnzJjJ5OA)8R_3KNHf~?IFT^GNy7yPe>LOQ%qE>#7* zd4l`DOXRR-2}^wo-a=t4*ALUUvFTgoWeK>;6)r+&S~~$*?3~=Ha-V6$1MiwHJtOOi zh)+hN(GPTE;*s&0W`F8FSzZEe+;&Xexqg^Z`vqyp;M{f&ylID$AM4zfCErI)#W6uS zH{SqvU**y^tTJ+VUeJ)5+Ybwt5aHL(5F8LHDH(>By001ND%1dl8-eR96)RM+!ZK0eDk2U?j> ze%AepNXw=Z?+(haDDDcxgh2%#nF39-0Ah`5Q_qc9A+?t8LEwRszAc9jIN9qEsP$N^ zK8F$yMUW4mCq@cvGQ|Kzh%)vFNXX-7>Gk4+m9vhI36kU2SVkU?ze*O}JY!Bz#6he> zg*BB=oEs>L%0B!sfk~Dj9wJ`<{?&XnUXItrZwWl>A;3qu(|-4a*S>D24J)TzIC^Gv zqq=-zh26!fpxz`3lHMR z9wq8U9fF2xh)!m#5Y}{z+B#$c%LaGp3Qk=)EE&SSHnA$XHA4MSJjG5 zqhkIF*)~q~YYd~M_jzS_ARr;6GjS}~C>4iUAztF6=(Ph*Q1*WX03>siygt zG=Dc*s+r=x-iLCx6r$WT4gkM^vTw4g0`;W-0rhS9L*$RIh^%Ma=2Hmcd}|V=XvoJ( zm(=^2kr0+QIHh=_9|^@qHSdh~m3YkVf&Y5Y`jh>{E!TS_iv2+O%(Q5K+BA`9ECT^8 z1+wOw7BivbWfk}+*T2y3W(YjwzG*K)DsT-Ij=&*{?OWq5Wt5T-2 zlhEtJ-Pa~3ne)nEEEdl1gm7S?b>G`G6YfTUr>3#_b)pjt^dluUP4YaHFLl<|ofk?T zd6d_~@Wy9?$6D$ZR)F)&l1<$xbFs3#67+QV@F-oY?|>J)=8ut?zPtZ4%{#}kv-2W% zZj{A{1dGU<(22w^38wgo-)LSWFGS7yK^eTt2CnLIQS~RxmwLa1D{OGn2N0Cvu7lA2 zm*+v=_elAMM6uB{EE0%aklihSe!KZfkUOfBRS2c@Av5~7qT!6=e#SFsP}4N+s@!|g zO|gqs;)~!a2y7|ac;1AM5Xq%Ao+Ukwt9_D6wQriXnv)wj(~qRt`YEmQJU6p$(`zuWqxa*0jw%xgYeuOx; zUlAAB<2ws3hCU)uQi%ZZ{RV&g(!t@e|0UqIwc#)Y#{=a8E?poWF`ZmauY@**SbvQ3 zU96{%Vd5?nR`wd|?HI>1Pu0z5-o%}V>r zT1x~2uL%^q@fB)Wq3jGob!0F>F9*)z_6O}nU2fWMk7s)xR!WJzbk$NtQtpJ zW8~k;@YeGHuSjDGOVtarj3%emYkHtbH_eSYr^+WmiM*2A@?VRJQPgRMr zEMBC3aPVBB-3Msyjd}X#p&t@jg!U1p-pW&t31zRDxSJjt3%P6eT1fJnlyp5+k5A&C z0SlGaih7kHE+N2l`(i+63*e?L)N;uak#&{d03g_je9kMk;X;-vIo>2|s*#*tZ$i1? zrzc8c1!uPdyBwMK>E+E&^FG!{7ke4Z)XS)qn{dSCuN_kT5aQP~9dks4BsC;)du7*{ z#QWxb1HuMOO~ZD;T;T@6b8T2NDZoJJ?=*j<`jeSrNC0>PZ3P@WAdns&Z|Bj^cx~(7 zM}eW4dXX)GJJUz&&kn_JddFx-FbeUuBrd4x?7Jpodt09FnCl7)DY4AyS1p*yU54BH z0fFkT+ka#vU9+>O%Zx~74N zPvae)tcE6Cunr3mRnvH^7)#LyoigJnkOlfZ24{%o)42Gs60rlBy8{t7S60ft)$Ubr6hWNGb{_&H~GAcB> z7_kMU4dgF2t?kXsJ?-_gI}9lH{%&Enj-3j zFp276E~YY3TvB$xZT~t5@%I3yL$h`to-Zp`C#=_Rkozus6IxrLGP^7~$>k1&N5b>#El~Zd3fxs|cgOn`JPvrC z9~D2Oq^-Fh=sA?N+$?MH?EX?Q1SGV8jygWl6&VzA0R}AK^Xab*Rh&5fG+VPsFhbLV zek6~Rk5VB$lonT821FWi`66+R!(lc8AGDQE^UkMcA@q-t&Yy*~rC_(4!!wqKq1QsM zcSEy%N^|aBu8Q|HmD)NZ&s|UI7d%=6WmRwU6rkUf7@l>eZg8aI&$`FOM33gidwazxNzY*M%1J;KdjYJmj-!BUo{yu{ zyxlYIjq4i}CKQMMJ^8aDJ|dGTblp00E7B>^W{*UYS->A^c2Z{R5%jGtwgJDT*a2C9 zAdI2*Ky=O5RwY3ul==#KGL;yVoz6|d7c1c$ zEO*z818{zp&_i-v&-oG1>L<>BJayyecOsTY|M+-xSt~blIJQLLeRGG-_VuuION;4= z_3L=m2wO#Q^`w&^wyh23ZdInOd`YVEB9empY3NE#3M>?P_}uo*cJ6}0QsPAPOF)A>^YR!92L-oiGdnLT7<=9=P1s7Al`;cQ{lfOF2OvP(D%DZc zSyZ1wwahS#(3df`m+crdD`%<8>`*EgK@L~p8x99^Jiu2^t_JFPYX!F^V8=I6SAXJk z{MQ$9zmkQO{LoC$);DX^FID8(?WMq}xEZ8L4dIeNf=Drtti|}fV1zBd-ndd%?tlC| z=(Y6;6b61ZL2Z^2wpKF&pk6&np|g(@*%#3&JA}N-2YHWbnxa3sKOnK@iNw5_CNX7=a<90Blze;MNw@Z$IS2_LeC!@1u09jJtLX~4~` z_$R6ijxq>}#Bp&#tzmd4|2voeyG7V1s*$d;0CGbBFdiw1<${~`)DNV^nN9(JGTkEk zU8pWl^${KRzt~URkkW*`^>o>=1wQ=c+5O_{m{2^KrrUKoTkRj#I#2l!z7lPGEm`5f zS-V1rmdQqEl{kmN)iJ2vSy z&{6Y$B8%P~Ct$x5SAP`)4Pwo#=QiaHMXhZfF~K)}_^qi>_lSQl&@1YaJsk^Qa&81) zRW7A8DFjk;jH|9@MsN~hWaHwWBa_#rWZsz5Z? zaKhAGg*VlI_=W#3O!G)H?I%l*5aZnLsApZjG7HQ*LH0lWq^TG>`8M+Z#aCH%Tfsk4 z@$Q!Pvv2T~YEvh2m%08w+$#2q`75xs`&&G0JmRNG&<${wKAa;6IzfA`SmP7 zHJw}&bW|O}+8F_7OQUHI7Rr20kiEh6-_l;esa*knjAexPFd^Swy-X~d6#g<{N5tCkw+p{e(=on)S3{W7J{ zAcPt+Yi|BsR@9vkAWHbr~BS2H09iUU5i+(Geox z9s@x_Km;|(Vtgx(5gVqJz!X`xPJZ>;y-7g1M|q=;2_?+dI))g+gwEhHFi^uQNua0a z$z|h0UH6^WCt=Tu5k1|pRkuDbZMEo%LpIm?l?G4|j(6NnBV;$W- zaG@%xe6+IUT~rI1izSwX5C$v~zzgGAN; zG|&FgiKja-h`m!^1#GtReOy2NO$`EmSwTzHot@8h6%c?QL4kH&y^X-Hp2@$&REqJW-K0*p}dcD|=YI$x+csF~v z+|USHx`#<*i-__DS;f^|cgyaX6u5W)DFNcYp5+m^umik&^?Pt^_#X8zH(C)jP!prH+MMK7eDIUOL?js9&q> z02}P>h=q?aLEU{H`G}6loyh@KgoZ{=`mZHXDZjRm3Sd5sj3J;T2+Cz|<`zJ5I(=vv z<3nPuXS9qT993k{1t}9WBXKfGRIf_C!mu$^XKDQoK&?9!O=VNf2(mFD2XOl2eRm+d z7%R4V?Qf;=cntyve&3SXS5oobBHT^!+m4*BCyVor<(d@_Lib)a_Fu0^GNP}yj*y&js^Wl`1=3Y z9!(Q1b+@Gg@FM^MhAN9P04A)D&N%q8q(COhP2XMTxsJj-`8?NH!$#%0?wAU zy3gzVSi6heAGj7OuI;VYBMIo>+dMLAh5DQya+~Mos9B3)X(xtV^8Yn#ViIux@Qd3H zpcP`mOlR({f*vTqUAucMeLKj?Gb7JZ|WfRaVNN`9`;}oz@lU) z%lfqeCKO_4{b$-TuPhtIAp-CIy<`~RuVb8#tp~IIbk{~Agd0=WsuDM@|Fti3)%;V` zP;in2GO;m*e2k!-8u;BT&&!%#e`Yo3tNQE;6KYaw59vN_Og|)))Gyz|+tPUN2`f@i zsOiR1Q)>|nZ))f17risDkO_3wJGoAR3QbMKSZDDt5nIIw*cAISE=`ylHFK!#f}8!v zIJ%MP*IQ4pO#ru`TmsnseFYzH%ods2k$yz2Qw^YNMt>KRC|=W7;ijeJ zg-lZ*dG<8P&~SN8%(NxgKX6~}sim$h`?lV$702S!_&ZtXjDF_->n{9Q0R5wJe_`?k zlS1T!u=>RTTa~g)sH;_SKoFcDrg6hfr%sWaMS+xi|CP}{%o1Z2qFQwrNmAf z9w%7jGFzLzol*%wTxtK2bz-AACySIuZ8I81zJ6gO!gm>0s>wTs3c5`tBHZTofw>*1 z94cW#m7PKf6p1;l>kLw6Tkr0A97VbS0{2R?S*ABkSaV-SrJ+ZX(ue-yBkDYy&}203 zRF{qoydBSUlWDWdD1UaC$~ah==uLeBQ!NsPgT-MC8dhd5xyHZ*^3Zpas6}j zHR#NOad#lJv5a#Uz<**z2Qj)Sdab@oeOW!01&=@LcB9x2O0bPXc|JWlUHe=ktuXKZ z0ckfc1Hp)fK~7RVWvX+%&Uw4VeH{&@?x2-tSD8?Q9J5J5GOaxDKm94zsO=sxQ(W4- zLXyul)?I5nqjIuIlXokJ7eJg)=K#1Wtf-cB1=#=N7#fLV%jRoVK-M%Vq<@o4w^Md; zByVmEPu3y%vJ(F6_akseHqVzvMG!gS`}alHn{Hl7Pt0}K(JI$@gTKJjN(J5(e-?}W zAgHVN?nHY709)b(h+V#b>QhiN@_wQ82yhBX-f=S#g4E&|&=_ALkKsNquw*;d$teXF zN7Qs^t?mV&E@8nJMofER|0lBB^i$o(%}*zA8MBhrt5-}(OTIlq7IIrp4UAn&q`T*T-{tyKwY>s0?nzH2wo2#0~7X7^Nfl`kqb zQZ@PzUSbVpmT4sWJP?1dD+Yql5!;G8_;iM=7erVNZ_%vdS*GD^%zH_(DR;q@&9J{H z0^(|Isf_?$%{O9%Po3w?$`{qw#Z=;aV=!OROj~ec%;-pASy4YLs5IKYpf`5|fN%l3 zto)sKpsX6$b$0hD#9NXr^N@>yY)x|3mkleH0u=_ac_F!T0hYPGvN^@(x$@yOaP&qC zjArIO;-ufUG>~RpWy90C?Y7=r5Z^KVl2bBoy#CoYM>JC5^7(ciQ}rlH1izqdfrwDoiJ~?Q^?`b+s1O^(M<1-@%OAW=vsVR<7ny~%k=1Jcf^RP zV0{}#3X5=Ug9B#2i!;uU)k0T#B3{v3A2)0?4z(zqCWl2I;pbWhMaPWu^E-EvJ&fE7 zgkh=ER@KmdkWapHwX>6}Hqp@4mtC;`)WkdC3zl%?1fLJj{-TJC?BhOh`?@0#y*?S! ztM@i8G0L99l$|lZ`+S0#wB_aX>l*JShADrN=i+{s@YtLG`N*?vUmy<~&a}3J8aD{xPuO@PZ-qG3*O>H{lagr% z3$;<1`6HBB<}FZM4V6^^oR{yoV}qrpogr_lhHlyE1Z<5x-mn~y?|OW5DD)jhP01>c z>s8Im>n(@|;>nG2ptgbf%foCFbJu3Q#YZ0e0}^-y;1x)zoFu?b!*w4T2EQzrI|iR0 zRoMnj(fzrBxkBjxwS1@wPV7B|$XVO^wv*?D;_A6*QG{EGnnKx4KE_$6dxw?*Jg5#- z`m19%N(}z;&VXG~_&Oi655#T&@!l~ZJg`g?d*NF6Na|8B8W~#CU;gmc=BE!dcUN%7 zU&}fTqy2{pQ&I3f%LJaG`WGx+$%>D0banH}J0~cqdNm%bzuP#FEzciQJVSh|L5>>) zD(tFoSex`;)CMx1U2UfWyn_a7*8LvbMvr_{z`}@pSsgOfF=ak-*XUR^Zc(Iy-y)w_ zXY_{T0nU7G{)XUZDwv`#=Kthc{wcltKbn$fTMc5&uR=sYJq$v>!+7Nw~ww> zlyuJ#Q3Ax81(mjMxHOlOy|wNnYk55Gv&Ww+@#7<|zO#~j%7%I=w6Wh-x_Zg`6pymf z36cv9U<0NI1@lLzjU)c=P@ zQB|mvxA1FMWPVZg>chd`9#-BX4hQ!4i_722+EybGRFA&eng$!Ai>W{L+5pU+*IF5$o4< zn+kQkBC0f&U0UKhBo&6szWJcUl~8Q2RUDl@PWTUbe5KiE5G+wcxBslTxY*?pVT7cT zUs~4K$|1p>!M6qGQm<})qVWhEu@1Ga3@At#B80k9exRLjVhg$A;XwhC>n|4)b1IVr zNlHJt@(@IJy&6pM^~1}M&B?S-mQ8sz+oAVWzh{$Xt}I4lq&F!rx5#qU{myQD*gmiQ=8xgG7E+Q*9{;1Di-yOxM!>@F1pJeC(iPwGPha=X$HH zZc`qDos164sOt(H`s*k*x95i)UgWF?_c2Dd3ueigcJ>byn}mj9zUWb2ju&5kH3PW@ z7x*Hv_lElIS4XwX&uf}||B=XhJuF`S>W*cN!>2S4DU`pw;_h|3^ImtDO~2HWdW-tc z#~TvMTBc+^f2p>j7|RXI$;v648^KmKX|{PjnIY2!h1K>S;ySn!R_d%N>%-K`i>k0=$rAle;t3&Dz~km{k^EZ0~x zwO>^_eu|(MEhgwNfM!n>SH@1GD;( z_TkUX_4jiwzRC=%4b?)pSi;OUaaf10M#xwgGF_^Vx!RBD6caLnHpIgbFZ;~cM`vJ2H(t3S^$W!V?>D$ILqs=-31 zVzwPeTEOzh{w`k*Jg8BDd{WwfwkE)kS)ncDPTgM3ukhs#=y@LrQY9Fl!+6QKijahj zcJ*z0Dq*xOo@S%qzbq_U10LojXi(IIJ;=##RBjQMi?%HRk0~=wrly~d%D2b&UdIh9 zhCm<+r##}fBK)zSKdSBI_(G1Chxd(I-yCfbFnry85LtxPUC`Ltz;d!GLR}mpx#r7TdlHps^{n0G7Oa@?$g5WMMNT21yH@sL? ztPobj0Au9IGB&Oi&C;6k`M?si3n{k2k_8!!WwrS75IZg?tA6(FyaX@#QPg?9DEg10 ztLv0_ct+}q1y5aRbkXg@Y^EeGZwLRGk`G;vrXfM`J>of-H_!+nD6cU!TS;EoNGq0` z^Pw0RSCdZ|+``q41l&c*xPA|rv=~KN5=!D}1EqR;FkM;K)1!uDKZz_%jpDHy{B7QR z`^xsZ#TV>N{JC`AV=3D{8S6#DAA&%*c{o6cIZp=~Jmf>}cHV6>x)pp=5QaOu`4Ng2 z&5TP`z|fJfpiui@dEo$e=EFO+rLR|e&-|sw=@o*t9}pn#k!T=5ZqQ#Gcd4ll597&| z2H1=*F&pnOBE#}}>L?kqXG|bg0vaq!8tibE%^DR-H0Lw9ke3+sEYWZ{Lw!^hY7LCs z*r8bo=j6}@6xIr_NPPxmzG4qJo`vfXsq-9y)faZ0WYp-`=fp*iS3^6s98%w7d}i^g zks*P~2{T2Bu|>(1+gH`4qD;QE@VZ(CWCPeFfEA9G}yz) zfrN8CI8Q!i1w&wE^UjNbN}JP9w-ry}<$%_JRd63`+6q6&rbx<14bAB8bp|xei+sbu6}Qsl>OJi267c zv>`nBV#cZRaHpv|v+?cbv~B$KiB8NNXYgx?AfJuPu>SU+(Mpd87ys`1@1Saz8YY-_ zb%P0TuWldm1l+qj@P*~*dy>B0Z|0oS34zX0Ya4=l=>-(R*NfchQ|>u>xHI|^1HJW8 z0_1)a1dCI`H_36Vp8^^FQoDd&=mG1&vP+fyUWgMhco`QpIdSfH{+s6*-obo1GNKXO z%fMu|r9JScWoxChyNYCCNkK(PS%?=iM6NWH)+9K5pZk;tAd?99>(k z;okuyify#FWDL|XJ{C1(xruW_O$hs5Ien+!^+DbzC*aJYlj`)`6~QDlQYFCdqMxTb z_dkBW)e{X_|K5VyqSO!p9$;&#T0*%QtZa7x>qKd~zv^s>j%AU2k*A1e<1&ZeVm6#qEExNYe+=HPvP*%JP-&^3{b|6!Atg8M5@z3Rq$`8Xz93x zT(qjke#WDy89hCFQ(al3EL7rDn^}C(S-Bq5a5Fvjc=+bWOgT0-yP7agMf$C` zZCj}Th=675-*(yqlNx3X^D;FduOq*5>-~8`H(GU;r{`hhj3|8EP+balv@<%9Po8t# zEZ#)SJ-SQK6e}`u0`volxos{TaQ+F2Y(7jXSO)1s|5p22bTD`{f99>pg4G! zQSkhf{MIUe18BNkJr4-W9jgPZjA~qfOSj$1!f4vKA8RYm=|#Y(`v)g1=ZeF*X#t{- zyPaK`+A*u`zExqSqqv5lXjvR@16d$^d7%N0c=?xYpQuw6wVUQA6zzd3C&!(8jw z8Q2}?hAg(F+*;v!aAI?;$;O{n##SQ8IY8mDu>!6MK?c{-tY;GfWrY@E;6tj(tW*l- z4AC_%?IY%r4h9Y{a(4c>D$hk!^8tbyEZh_jZ(FME@JCxr2rqqpRM8|y%SSd#O&rL@ zC!Q&^rAECJ;A4!9lJC-Nk3SYb&Aih=JKr&A$e<6!>SFT%HL78JPQPfj6KGc)*+! z&ed96xOU52jXon{A^V(arrUJcv-W7bxuxhyM6V;uQl9GpNnJcc54ykliz~p`${s_p zyrG0!^`p!gLI`WcUkb3mM;926aF+ozGHyKln9G36K;d)ntfm#A`|2?Hb(x_Udp-4S zW|1Y)HVukLdpQe}K(5oVT3aiwEJ_5GJQdg!H6C;tRA1cOYEp!^i+W9#<~mq^=7`gH zP*LoY0On3G2w2BOP>)5*i6t5dbVt0a5-QWmaM`+eu5-?5<5U(2joqH{IoByH!3}rS zYL_1rJ*j5F3#&DrZjXd#h{E}N{f8lt#ZL#|O?ryU)B~5}w(3YWfeVc_{6Jo-4ZsfS z?tzP$T#Hm4W`ho8<2Tb}N z9DW9f?H_5;Ecph?opxocQxl8y!5m9S0~WYqn2xMmzGY3kn8hDjRTVG+v#woUS$q5;@bobsYpM)}Q#G+hXu-nl>53uhm8HqwtJ=(g3ND zzt||Er2<^|DrcxQw0}&LGAaX545cn|rOHg@HCnZekDk3FAYlW}Op&zlU%+Op|Dp{> z&OA^DKq&@5j|)c|>~Jj}>yL6wHFWh#t$^71T$O?cC}yRYSm`U)9*3eP6GzW`t~WMe z0_QU5xdm~4J0M{@wb*2}zp0Lj>h?Au#TpH?=xu`lWJp zx|*dmzH%6y>2V=3-WIXkCM5GBgXV&l5v%p5{ zSp?4m!U5eIX4jctpuyGE=|=nJs`)yj(a&V^`)aB)Hrx&)G$w&I;3@$S=gy2l`-#A- zparmaWoSWK_B^-)Rn$WyNyGQX7iQ61cuNO~frB7s4SAYOt7A7U82kU!4< zDPoX-n?SxOCKv@v7_a-MqJ@t1D$LRSdxD`Xm{l!(ATC3(x($u24>(YKQm5f(HI%h% ztNum93(!wogYKy=53X~`GM%0oiT+bynfDpf>_eJ~v>YzT15I>cT$*eW1Kkr!YD#;N zrW#j4Q*O%fV+#Qn#g-l^5NdiRz7p4a@W_q-11rlZqI$5>ot-usX!s>*5hl-k$dYtyc4|i}MLrTPOZofRwj5hD+oOs<>#FxuBOqSUo~&S0%je z`i^EiN`#kroXvviG`v}nv57eZGK89aXtXapOCX*JtvnK6wIaH7wR09%%p zUHG6uUjg_Jo@*vKv1TELmE>z63zH4i2^qAc+(gwxpNU<*kgy1QS$KeGR)Vm%FeyaL zB)|2+*%$o7VAoQa7S+qGdZ^({#uETup9Ai`bp_6Xr_W%pk|$=MJvjZIHY$}Dwlc7w zPS2>2%Gx$$Z`9TPFe29sN}dd8H;>B!cT`YOlR>7bLQ zJsMj(jA7V<@yh{#horuUo~ffwJn`>&--pk5t=d-RE1X4wQso_8fUm|8d(A`ba~WkA zl~BsFJHi36=S>Vnh*>qYucNeLbG$yBBwJKFaKj*XNoJnvQW_LZUc54{C0fz;EZAa0 zgNLkHbM9wGWavt-Zh%{;D9D-I?KlKv$XHW@?)WmL7Z?8f>Ki zTj}dX*Vcw?_?iXSY(&yX$lIyX;F*8MljCa^lZDZu8Lqhsm9HNn9EBhXIlaQ;!{`~! z9AM4W8IiTw(3C<{5Yhoi`g(nm$Ce}Pj=n%=TDvAz0C#rX4LrN`%}kviG8kh# zSkZB!Q#chDYXftSahR-j+B%ix3myX=T1ea&uy4-@yw{5kaVfPzRQ*y?YH@;uZG6S9BjAM&rF&4Wu zz1&>bZJ32kZiv+B;0Qy^JF0uZvoLU_2eG;bjD}^BSU1a+wTy;lQx5}<-5p_zV*ZE0 zEFw+|cC>SD_J8<=;Q6cSbmN4j!TxsqY^@G`w5l{?gVq#f>prv}o3h@=Jt_e4xfNTH z1eYRd^GBnR?UgCfza`)S&`S9q8n}$?6c0-9WFd;)HF?q!?BuQy+jwL=iXvTKL1&JS zcrh;7=kOd1_vwB!Q?M!o*c=hc@s_#vs}U_{D6pFnJwvGM3S1~X0%xvVjhzc$?iDV& zrp+~WwNl8p+>omEUyOzZFAH=G)5Z%?3=;k}n9uLV-<<)5``Og9p!lPfI22ZM#ocr! z;XobZ{Y!iT8pMK3b^;l7YXp0{zwh?O!bmQ5TMM|nP_I@W3JS~Psup_l3fEo72kIBJ zfLB$D&rTBt&MqP9w*+Sap}##Nad3D-Pky7^5YRGAnLVTYZ;@YxI4uo2dMRNhyWA6# zgWkyhBZBuyMyvj;cxhu}7lqC$1s0MBEF^2bvQ#SlQCV^-^4Dmq7OsR3!krvbA$xYD z$S87D*|cVcJIajU9qhJVNwl#^`|Dfo5H`m8(c0j*YNPZ71Oy13bHNL0s=me2SvDgtva>a3IQoc}g|qmf_{6NVmfMkVhKFH` zsI4NL(F`8$D&Orf6qN}cIRi2h$w18K^NgXv$8vS0jM*lmBwWlLfjj_;!uB|z)Mf6a-t*q|BxhK|ldfITPEt3nG*=+dJ1Ys4s~7iTM% z6}s%_PP5b7-_-Q+_}DL9C@a0&gSRrp1KAF3iMKrqZkjM07VKs&a z{P}W#y3KnPBSNmW4d>OrA#yv8BS&W6NIAud7g;r9kWqP0}K zTW+qJ_EQ8A?uCDGDWvb<*4R;q-?6-y8H0r39?;UOiZv?WFC+0Ev?=&gSk-7ehdGt2JYc1;>R_N;R@^?4LHA72fcLCrliClo>gESm68KAt z{;aPmBhWi~{ZHjwnlH|C+gHe;EEeOMyUb+>6WxS!%|rheSBz6Vi&XElksvBd)iQlT zN1_3|fE;VWRPTWBn3{fcRpP-5?5EQ=;agR{IR`WsdwrJZ*7MfM1G>cv+>HBf%aT1Zh4oi;4reVLrn{F%{8 ztnJkE-NJ#a3FPi58$Lz2fL!PAH!~|B1Jlu82o0VVrK9^)hF$s9Jh$d;{A}<2ndC`F z9uw3lKs+phT%Thrke9Ft2p`}k@>>0I=aocL{(|bi>Da}^9$|09HFdIZ^J#bhd3kumI!FMvC#9YjNlmEq*==wLr=|4_oYd6ZRLG+V$?8howO~ z^`IGo1p7thCkacJJfAn%;}JcL}IkDBQ^(TL@7M; z0(Dr~{R^x7nE~@!wcEeDx{vhknUB4y{_#cXVX6P!UqSk8*k*Txjpu)l=m1SdU{7@T z%4YGvut<9=b!=udl4q{R`=H35h{1|bH$LX#oPKriBvqBR78gQXYKYUnE&R(F0UwD$ zyQO&Y4tPM_a^YKTmIw*22TVC);)ZQ<{V0P{NH->o>a(02!vMO+3tqybEw2&w*4 zDD+F1vYS48X9o6^j=E!^v?T!ZpD#6%{Gz6EFUIZm$A%1AeTwG+oNioFVt2#&Ec4`& zR~F%}Z<exPC4rQ0(qx^D1+A^)IafN_1ZXXs`CY6JvL zK<&gp0=F+ZFLI`+nZYDpvaFKdO?Rvi@)io{rWyfg8OMx<7}%E(MScmV=Kta<8&rfh z=ko!}Kap*;xsuP%U+YvqaMuDn8x<^;h%@|aMuy>0TaJ|@{-8p>K|`_+%0T9yvYB)X z}-{`DCUiwt!samK;?Uuv(-wlTFUOVo`K)27hkr12lTP7k)LQavwdMIqZu zUYXRs77kF1LW+|N)hScyfkoLx6wlB#CMXiHn^_>Ma5g#TQopYlf^6Ib$jr$(5^?U> zZONWX(#X({pmh7Vmg%{o1hw25P&_Q@)Dek3PuQlH(6(11I<~tipY{6g+PL<{PEo5i zRk)bjqqgErcLRC`HT}8zP9Ejqfnuvc?F_7Q%_!@;r8%k*AIhKghu!iAxwxA>GmmD& z6cBf}<{Hs-`Hb+9OV5y_--DG2T5<-ziTQJBsfjN7v<+rWmWzBYR0jT(cJ4kELWN4k-BA{4|6mUk?d_f{Nr#1aoc+9aj zA>`&+N9SROQ^b(i0hYm%3aGh(rGyoL;^xuSwz|H+k~i%h1b z187_bjWx!Ug~a6Du*kR!d*N@buD&^zEz-Wde8fJ4Nfs`m#%UMrb+WJVIOLw&y59V- z-MrWHqjTbVb3l6v>Hw#BVR*$*$JCTsb#UN%3(*z1&QBz|Hp~#_FFI;H&TC5B>em|D zrm|P_uj0`s)o6e+ikitj*VU8}sw(-`oM=iw=$fi`2&zBU<8h!h*di{lpfuN% zlAcRPW@3mz0r&p=2&GBb6R6MZ^wffCv@t_1resCi($@9mQ{&4I-N%bzHBj> z)FWe4D}BtWwgy>TQ(Nv%#?Vn#TM2_WA{GB7;7y($YGbvcNz32`5hfsyR~1c?Q z(W;;c$GU+zjD70%{5fm8RHydkzAp)61Duw1A(_QqPM8bZ8nYa&)M0Wl$hfJVN2=r@ z+MnPR#!zvFz7qA}70fI?WOInDwaw~os$GbDw))IeV`-@&%da@8i)m_pkeH+ z?`T>{de+vwndmxY`#_e|GN))`A&@98&lTQcF5f+cyNr_BE_ITZBzUc?E{Lkq40o*g zJn>B!er<)aAm$YHJp*-95a)hn^Ghx5T$a`ltWj$>O~(=^k014!Z+uGof*o#?XwgE> zHMOgHcq~yC}dBK1KPbPg>cuKvKDcj}=yT6&~g}*}? zqy!oU4rkOa-D#~V+X%P#r{TgtXcQimAL$aw@T~17)7~vK(udJqrLH*DxqwDRG)Is; zQY4Y>8omk%10Ce*1SOjqgu~~cA%D^eqc7H>j%*md>gvq3^{HG9Gri=X@^OvzshI5{ zAu8&CN^Upg45H$yP$q7uDg>qW91qQT!5ur;vJ@yne8bpa4-Mf811?U`E8!Y52PB$X zR_{2=zopV^796nB5*&X|LiXVR`^G-Ke(<4YtM3v)Rt{mWEbW_2ocFmf@^fXE8+(+0 zXuj@Ys!JbZkg6ScI*7Vj#{K{zRK;42r~|b^H)!KaXyO9Rnr&!fn^HfBUW3Z~yn}zbWwl9}3L-IjD!hTw z7I+d0ji-%n#A3Jmw`HggKrdMt3IFeFcMVO}^(W~+{iVRoMIM{o{!$!}D1WJc9|HKc z0~nHjuYXVm{5uQ{T)@BgPQiit{(GVK{NLY%|Myq_zobAIG-TTpJa?uCQanB?SzOhY5M{^H03Hl0bxJ?N@3W}$sT^y z)$z9%LhlJ{vyE#)OOj0~Q)pMQ=qWRCUrFmKdYe=*Sufl_g!^?*$JV%%YQKemia9UY z>bScq920}K)0ouq$oWI%XaBm{UoNWRwdw3MI?48;AIm&+#ayb*lO^cPo*HJ;v0f)i z5`jmFnA}l)W2%%2=Mx^kJylQ_swFRMdzOC*c_7NQ@RpBI=;jiRu=q?W(UZAnyZ?gz z|L6tyHM8Gl?b_06u3j9DNb9aVrgF5>Pb}9@g_K^tNkv6nZFiT*^+Ld1bJJ0pye;8o zTiuL6ynHP^SE{Ikk?<)ccq##7^=PzaUEgp@D;k|p3I4R^o9K$bn}p++m#U5MCl!nG zc&Q{_C#bt{vme1ZSDw#vqmA8+)vTh%b#X+#cEvKKSj}Vd=hZiv+66(EIM+K9yRsFh zx{djo;N#j;1S(V!rOqpF5-;yLITzpEgAS4<0lH2#sGJ`mtF2Ei+XRtw+t!1{Ue71- z+o&L2hx}xYE;N};HhJWTWVMonyW-D#Lb{L7eOv@C58}OjwiYYBc=_8_^bK}!X zPSmYRwb`N67A_8m(ECg4qU@s|{z1Q|%9@s_>uvWr)HyVE%fCExXot@2VyB`TMSA>B zg~o#)4!&$Xteu1$v{;ZSaw#j~!|wTc0DXJNc&^OMvdsL8YtaRrQyj0*zs1n|imxQE zBS7EA-JP62WMoNi-q1quK`XbCHdP&8w{Esm{e^N9_dUBdB9C(spi19t9n;yxE2F^k z*3NEmY7jC|e^DSZ{ls(sH!u9p9*zt;7LF~tNk~d+J8m#C<52G<#$kHw2bbGzHNyPh z0Smpc6Bje1H7zkVpYQ){Dw(-O7UtdOk8}3Ny^J(Et$7+6XMcA`*gNU9Wz5AJdiqXi zzp{z3+z%f<#7M7I>8@x$crf)&LZY+#&)YtZqZjp6udG8j=`_7+$Ozq-mVh zHL^$YnC0HSuUB@4aoSb5KGO4Qn`uk4v<*Sza=)#;`@3@?Q99{B<6}i0?kQ0SBKN9H`N}cyKZRDKSv7v~tWwq}Xwv2ZgIUw+n!n#)A!WX(3`6?-EVK@MMU! zXzpqqi54Tg92EapYR7BNyKXzAP!RBR(&O-uhs5G<9TJv4wDw&{o7@ud;);jAzCNz* zcsNbFx_K7Tbaw2s@u97~;c7cG)imHZtie~gTg&g3m!k?|>ujXIlJ>4iLBhG2g|?)V zJMfRf?E72-{g%cX1`qqWB#n!SigaxPK*?eAtgcwW}-S z?RqZ7dD^BbKl(B!Vj_%6^PA~>wx8d^Q&ys9EvVG!oJ{vq$!#+QD{ox=v+M6((xwMd zMxFlpF1=>@%3q8Vd3auSK}ww-n}e+(4uT#_vi4%+5>l=dYOu(gpm$( zvigr_a!q$&w&T5Sd|uwYBgFQE$09*?dW0vU5Wa)Ry6NEH;G^lYy1Sl!5Mp!97S^l$ zaseWAPJ!{XZhvM(`eF^4Qmn8AVo@8C_iSc-X|`YN{D4|Gr}2uvB%+Qej$qLdpKfMD&65 zt6z_i^N{Ern>-H#TBX~U?^_lW&M(HfPjGk8D@aNwG0^r0V{YWU@JA2EVD)|j6YfH3 z6|Qy}UN_8@t{8nxeNv+s7Vr0RDNI55MCQ%X^_^ty+{Dk1*%Wrbexc8ve$y|JQg=gf z&$BKB;?w@G!p<}08qs{UVkZ+EgKEcmPPHDX3NkojB_|rsPhSt(`A0$a+a><5k&!h% zJQ`QmvUhG_W@l&lbM0=9?+X8R#5&L9VhUWk`V_rMcG;Y&J9Ie?Bs`jVX0fBKqm@2{2;JH=BOmj2TiLz)jq#UEt~|Zt z6#&3>8Q_2IYs9=+<9U;~a{p>@a2nX1+`50tleG{sxA!T!kNfp(37)}*o2G__y~P(g z$j)nmhcG+e9uxJjiF<8+VoWFww^01O3{@jFoIU?ncm>-wIgbP%dFn9sl+t^|DGd^m z7@2JF0req1wv!`Z{I~uO7Uuz>WXeoqUBf{Gem$-8xrq`c<1ytXx+~8*NCI5Tn~<*$ zX14va;K%$rIfj*N>*M2|UOPR^l;LdNd337&LK)oM^o&)~%Ov5GgI}oWEl;UWq*b8A zdHL_22Ws7oy;p2b@{?hema;8as@`qdOqAFx$&~U-DM6k zsf(MR5LR~xa<|K-K;9>sWW1Woivh=X!bYi5J5-3$Yqn=5-0}ifcI|ssw;#s&wM+Wv zN0mcsm!%_}v$@Cc+7)6WbjHh=AIF$NJVN$N>p|}8FzR&b8Z!{Y-ZR?}eEBE0 zLj#tQwr+d(*w?jZ^N+_pI(mGleBymP!_cM#E|!sf7}Zwcp8dm}w4>s1SiQU2UahY_ z=eCB6c`RM^=>BA&Z!ZRxDv1{tmC3OkrHwC{AGXWVNKiy)ac6*6GX*UiJO=veaVK z;V|8%_qF2tIr9}>q6 zac(YUcetqvC+(PmFp~K4y7*q#H>zjg_7A>AHU+*Qo%3+hd4;++==qe`zvyDv_*LVu zeeN6tar_Jwf_ELNh@6+^p!gk|-a{60I#dMz#S=9vzlHIN(tXjhTX6!RPn30&Ht{T5 zzbd^nuCvlM6*aS5?#?AjSnAdZt51WfHP8V zCKbMasIYRNc2|M23Rf8XIW2M2W=Z=_e|l$^w2tom>Mw?Ga&vQ~ZSI+vn2ay&77Kwa zf7FTpqX_Z)wrTycw4I7oFjCT7L7qCK&`8=pLONEHnpNisSq$?td}ubONl^2v^m zxI2X=}6Q3>{voSgF0zu=4K45Aopy{T~W@ZQ6d{ z^6^=HSF#zPib1u$yr)&OYkKt5SzKt2-SN$*QirTAc}}-}>yn!t1Fmj@w%cAHkMoe^ zbw{<|Uz*|%To7k)C3mFCwUi5-yL?FCt!%aQHuVp~Jm|>e9j->-mrM*!X3v^*0;&J= z=jI$0zBB#5`6iK<7JqRZyvtNHVO^NqsT{QzM`_K;liTe-)nS;HNAv zsghUJc)@dr+)>!3P?ue@z;tuccGG%d(=+m}ga{L&|u)i$@H zP}(W)JC--XIhv-DAQ-V>`24Zw>rRz~@e|}jmWSNCcz(4!`6eqPlkIe+;yj;?l&JJ` zfxP^n$6+=~XM2A0y->tv$U1WL3hS+g?|Bw`U7U?_ZIth|x_2RT7v_@E)2}-ldA_Yue#RIp8BLD z|H%AC`d9r@;9uYYJo=)!&Urh%a&I=jmd^I5=zs+s(C^WvN=Q^EG<=m%wQ;L-^$fm| z1rri%S|CF3pFhhu&Gq_HgvTPZJGjnq%C5ch2wl|bI{hKMgQT1_1oKOLEH>jr6B637 z-Oj&fMAnxw^o4Nm(Um8b6`rByQ3)OVo3(a}p^(@}4bxXG5YsZLLz_un+G6je&j0v$ z9OEflO4}7CoE!LRM|ZBVk5>xMgBbgK16nWdWb4V7I%&svcT8L)=kdy*Bl(EDldN~5 zA|iUgGU+$PiYbQP&zm|RC^c+%cA7u;$j$kSSL&zvZu^El7fSz9yR?_Q4?wXuY$zuB zjg*E^6qc>p_tiYt@}p*jHm*|jJa8L&!8VVPckcKR~lqt@FDfg(5F9l;|0_t-X1p zvThY#M$NXhw)W??%q$f&VV6Ez8HBVST}U_P`Sq+z;( z!2!&JvHc~kx)$}GEkpd5xR@CC&sN949dCawJ<@f2@k+w7TtZw_bV)lu=Lkm1?_@aa zNhY=b`X%rjq1rLpde?HERQx0#<@a1#Md`y1)prO-CF(>fyz98GXwredu1WXo*|S16r;c&ZjNT4f ztll~&EkWA|A5GvcIk5B4##deE0J$qt5)y5(Rnx{{Mlt55UBXtE>5IaYUkgJMQvYm{ z%c;$M%z+`x?5mmfVq#*k$=o`ZY?A}q7N+)jVLE(kL>x1BU%YInBN`EgN#PRH%~gK& zEp|K%$Lb$R+b%bTyGSLOqEtyk!9^1hywxn6oq=Hil{Xz-5{}_0q(vx<`FqfU` zX^NXqyq-kG^e8IUaWLb(np6i)`s_GU@km`X_f_PGwwdnPj7m=u_c59I#fWd`6I^vT z>e+T>%z}KV@a2%=%|1(WaN71cGMJ8(k_U6;)qCD2 zy6V_g8mC|~5rIAy841lVjB<)y0^B(RG&CCAPj!?+ydGFeK|Q_iT=a|bifD7(w;Z&q z3U(#gOK&-AyXRJ5GbiX(M{)d;cs(UZddVOI$Kt@aktH#^f1ivK%`@dNZQ&Qj34aZM z_YZC|i?SO+Q8||TOXVe80auMO;NcvUB9|E9iD-3d+C zu=bht<1;9i;Z-NjP(&s|blQ=i_P%ad;k>Bm&s{1!92HhoXLswPHQOdUN$n?3oqTo0 zcBj{PW=5T&o#xcyqr`2VuaWMCy0`AceiMFtWlWoVNTBCMN9C^8TfaI;tnaN1J^Axx zT(Uy(uG*fJF6Sn&*vUyc)9*r$8VKL&vK|FS?H!GG)w$Lf{gEH2&UTMMs{j|A;H71L zpr(8-+nVr_asy~h77W-w9XrZU$Ns<`nCB{+=$z+W=053DAAHK{)bJ~@V+s$5#l5-O2`-C~Gj^5Hu%O)m zf1`FYy73{8!|{pVHIL<0zdSceM3kLVW3)hq53-y`Y^XtQzokQyVsFfv&img`Iz0@1 zcTqW72;3LV7o7D`^2hPf*z%-S1=Fgt$EEG8uLiTY$=p4mjSVUgBso^{^}&v=jDSRw z55Dy40OE@#CMQ1({am;b`i+gylb~z3kkA|EFP}`By*&c+3PU21?k9HkM9%1372+J7 zpshFRMC3eq_3Cg(a?v1#bMYWK`DWdiUQq4VgIy;hPgOos4m+)|cqQfYXU>t`jSi9j z7fsh4Pxbfz-`BOWg|fGVgp@sV`jC(MYQjiPEO} zE3JxCc(TEeP2$RYOiYYgfm$+dFt?Krx}LJ3VTKq_A$uy+R-4_EVi88;r=?vrvpM+; z1X(v#D}R`xP(qkB$g}Pcvv8bN<3LbYj5XTtElNFx z??z);xtp$5)e=3fg9AOz_S=W;;Lo4ABW?--89@#Qb?S3{1B2vxi(g1)c^`51126GN z?S=xYn=<9Q`!REGs#Xuq5SYCy6(ku9!82uRHRk8CUTypdwzgw z&>0lVWcF(jAyzSzM7pd}>zKOW(D4NcX?PDfDwbGvavMBV;l5F$ZKpYuQ6SF<&5?`U z=^UmndHa@oh1$$cuiPl~9L32`(>hW)FUd(*)i&;Tz)a0P+^?>w= z1F2X<<&0jzfIANR+Z>Paljy?G45s&02cI)K)_wQ}u?r2oxX#?j!@>2vtZ(4MMYJB4 zFNLlbH%zVd`?9J@?$lygX4OYwQUd;F&`G!*wIcB`XQhzM@UNdQHTF`heStjop{n{bQak2VS2 zEh3B*c-OnMgr0zXNr{*nL^V!)ivgf6ejGimnVDIhMI(c471Qd^^B8Wpb>c5gK6M9& z_vZ*X-X!C@J$=p#5yKop5!E#hgLfVg%p66vCc7%da~|wI&m62$nf;Z%-M`^$Q>LBC$ z`{X#00uY|F6gif^jxEVnGd~2M{#Po1qZ4-~7)QgGp9mh`Z6kzM9TpXGYC@q6$P5}= zkFue9E#G1qD)#<+E2Q{%2V(-PSd>YyRi+IoYvqv=(~>T3-Dy9%zziqYUCzyUw*eGw zp_Fw3kf_yl4McIa-!GK^UZlWnwY+mg^MXc``wL7il7{0L--nre=gigrY*u@}fTTl= zC=7jfs=~~fKF*BMgbwfW;sEke8DF%hO0ZO&x8^vW7h_BGX-~w!la=gu0 z43~TSmXIYs-Mo*@r@S&)PGE*Kq6l8SqeTY(Fd@{OblI!8Au=xd!B4SQ=#+@Ic;KWE zIWt8E5A^tB4@qL|N^pGmq8a(z)G$wUJ3?N7)_31eA&SV47W$FDyaGj1E*Cui*xSDe zdmzkVmtU(gNBOBy7n@HeMdTM5ZSC_)I-q4{{PH!^>fizEYas45e0;vqISp*<1d&SW z@@=goB19fCAKv4pYy1H2@4rK7#|9JG%)aYkaJ&k=BqO>U=2fq+c5D~ay|RO2j7ES@ za!02fhlktVLsAOhVJ}5{AM)R3K>ya6xEp!?L@m{iTdu#YkJytPJ(T%3gML%o={r-U z`!wC=+SO6(-d$$;Wz2tcGbHwu7hu!r28>@@nEXv3-C z9*OuJ5okp-ivIZD9Sd$5DvY+o*&N}a?t(DE^5DehN^pkmBmqW_@NZ79U zW3v@@$H*u_v*;7o@8r`{*0|wZh>nW18%wHGlXw%9&eM>;3!XBkb*SfegOp}%ZLkbd zIL3*o6K-=m%-`z9($Wo;(8b}xh7f2D4rRa;Xdyr|O`4g(+nmrM7lLTkLqin?#rxn} z-uOi36U46$1hd@#FxLC&Yom$FH=xY}7;%BxLLs$#Tfvg->(#q^qk$()>X=gljIB?C*yY zcggl!+~qtr)Uvu~a|u)RyUqf3?zZ#X@f%Mf zCAFF0H7UCOqzw(t_^i;Z@?TvmnI3XVYUW|+-^j8f4+71v#Z*7bZKzd9`&fa+7m_iM zTjH({ZxViuZ?XUKyTJqh?Xqd};r=+x5LQeeEPlIMY?--#Q(H?b zCQZzGL#tt;Qzh;T8BWZ_2oU4BAU(Rlkrx7JiV-{?# z?;Z*FM2eDv5yPd@p&7gU?mJC`SORULXS zaLd7-)!4*7oi06_;B9@}75gEO&OkH$6A7 zwj^pSp4}xyOG64Ys5da{wCpFzJV8u4);zaul`~k73uLRDv=zRVr&o9`^Syu#9$b7} zkyS!E5;aUu)@f72r@HU_tJLT>AotD@IwoE7-#m!6ZqjEVy%a@sXR2*aR73BGF^!J? zCa!+FlW+pF8GSY#L_2b1lXW47du&{X7=A7UapWAdzdn6K0r+vj-VG7f+d@TW_|9I?K>jcZvla&IIm}vGJQ2 z8nWBPraz*}Bwcs{ljB(ZdzV;~xR)xbY~$rdwW>e2C%uFsKdy%M%9dSsE|;;9Q43cB z<>3P+r0xG}0l>`8=|)2mZiT6eMRuXMBv-%K`ssCx!_%-8=(riB9^S;Q@L4B8-qYk+ z=ltI4&-=t7ZJ`R1nbc9O6kRv@ci_Vak>)Q1URQWMZv|hfub;9pIy&u)K?@;Qs<}b2+ zdFcO2kXpyULM|*#Z}RLcee&*G=(hgO7@JPB*bAE{;-4Jg@$ow@@a_{Skaq)guw;6) z)$MLCjxXhzVQC-zV%T>KP|{UdSeN^4oD`3~e);m~%YLkPoIH&u1$huFya?R9zij>1 zQJ;w zCkV3xtp|YT9u}{NDl98zK5b*+6s-1~?`T*IuBff8%?F$@kQQSO-XDKtCMy!FEDg9L z$8QBlzvnE?4=U01us3m^6m_;?um<|=%riw3e-I?|EM~vcI`m1%k0gF+*M-uuzEbp= zY4-n6sKyU~d2XdD9Gzciq4as;_R<3u8Zpetqg>Dy1iP-)TVLFQn;vJStISeFU7iud z(NijgZfO>LGb?!J3p?f_b8_+a-m|DVu6^e`pwuk-D2UjQSJszoQtt7md)-mDbBj?L z<>g-n&Uops;e^6Bu2h}O-tn$#;WOL*-;}?_8z#~EEmkt{c#XWVv^W=E?^mxm0twx7a4b}eU5Ntlm3Scgf&YA)lv*fzBo>6`Opmb zN_g3TnO-fz(vvWDhaLbnzm4QJad6AWi`FzOr+QZzZoGpqZ^SP;)js{|3!hMJ97nh5iT){`+S@0qUXErT#L)sA29E* zHBw%>HbFDj1-L+e7Bl!6BB&*9wt-CC4v?p9RCA$)YIYHrP$%4u^s|Nuo34VrIPgV# zsC}17BoE?*ZS}mqAUneTc2JcJPuzg>BjG|BmLBK^?)2eqEk5~6G?!;}T6;C@{2}|n znC6;Qppcb&jX`6*7aZ<9y1ep$CNIPG@3tGXxK0>~2#!ljYZOgfm8YclGys-^7^4m9 z9zE(#;=M3(^u*n0g-g0JQ~dbqxL=#@vv04wNcUOZ$`j*S6)*hWZydkalR7WUjCw(s zoCxplpAf$>&=R=q@4pp96b#TCn+FS7w%*cKbD2iLFRQ3SD)j8j#`BMKp2Ml4Q}Eq#|$7Odfc}#nD#tBmDh8 zyKrAm+n9K&659K9E6c*tGIJ{>`FUyCUy8*L&REgtRv+3}7AZHs(&kG=`%tE5mDf~v zWT#PEY&Td%p>|sRH45+*0jh)-v>le)&Bp8LH}K)6CEo_lf>vs-jM>cj)7~l4F`sq# zycJ~qV(eDc)l!ewT{L=RL#1cuaj{JI$E#*vx8E$Z3m;k(omRG)bLt#=lNZ^goowsZ zN(Ot--0IKb--jU0O54E0cn8_>RJ%Qq;ddOK0-Nj@slK&z0KBS)n0 z(_v66)njozVBxr~y{y{C(no)#H!kJ?BzpH|XW$JLAEi55jb6*W=}!wK7sAr-`=ie# zn14R+pd4ti{}>+n`NZy9nwT$NQYndzFYnXxX4_fOuB|Y*?{T3dKbV_~$+AZZY%*ME zMs>&gY6IjUwtsTtVzirQ3+f8Ao#A<6w?zwJg~Bk0L}Dp?w}SYIZUOe!ROW|X{@!pB z%6j$EGOY_+@P~R!?@waFcD{Z6T5i@i!qxS0beKAj$9(@|Qumu%UbwXjXG-hO<5dF| zHK75bRY~=$^dn~%Ew%R2rtCP$Jaa7VeZuh4@kM)JN_=-VyZL;tvCSkQEe|_B<3+|E zAazZS%P$n;^)s@@{{EjQ?$#CkT){*tQZr?f*~Yx5*RfN5qR5A@tqo1UEumuhJKlXB zm#RFP>kCs71m{kOn}=F6tW!4(S{7?pOl+tUH&#FCu=ltlb$}n$Cr5vNR-$@1 zC@)LNe{v?mOs0=YB+I)^Y-tzp*>N+Gt%{*>Li3)cyYso8jlv5+N@4TCL7mpW=fz~k z@3;y>LemFb*6>Bu)D^eZ)IG77EKorUAs<=|4?A@cHBz@{*+2c!Gxew1=Cwhvl=sM6 zTG(+L^4x$?Wuj3RxqaN_N;%$2pMcA~h$H|hAh1xSJ8M9FQ>A*e{pxukpW8?DqJCE< zM3aKV)8`p|a(Z+xc!X9iBw=#%&Ur|(_ zH1${9t@!Vg=3f=^z&;)E@(Gskux_v;r=d{NLCv54_Q&?R(ykjwe*&P;yD9T|T6#Ku z1#5o=JD$WxUAZKEljMrW^uy7WK~tehk5fezK%~lWcclzps4xCD0HhCCRJSVq1EQ!I zE~P&&^Y*GLUi9OiYeU;n&x|&!yPHVrd+(EP*0T;A6iNcpFJ*E&yy~TvTE90GpIEtS zCpgj6o|>|95`*Bi_?T7I-uk3~b}vBkK9VV*)O}tzC(vizZ?i*#-|q9sZ;%AJNJmIC z!kLPQTZI`Z3j0n3Je>9qVD+{qOL5{G((b1ItyP{f&B@Wu9eE#>$RixX&}Y(ew3zZx z#Z`%ps>=~&U-^qY((+l4&m_E@>v1TIMcK z)J%mOX|o5Ui4ZBn9G3LOH!qr1n`0MMc$22aYoChPj0}=O@DxcWLz=V=Od%s%&TNx? z_lMaUcRH5nYe6w{51M&q5e*`tBiDof2t-crKqG#c2^i`c_j6vBEmZXlx{v{M|A z`2(`c2~&;R_V`3o0;Uw5sm{HJRVz7v)wr?e4q0z{{n!DTo>|q@^JYv`$8nby`omPm z4CxY&M>Oi$P5VQs=lwo2*(NuNgKLo%6&GE8z7%3XEK7sEo>q7UFW&eUM&5)l63AL? zxm3NP#sVLm-|C1ekPooCXEbV03|LEht|&jr{Q7Lyagi>ENu}?<=}Bj$0nT%*rye*U zkGgzH9s6z2Ya{>tt5soqBx0*RoeRdyk5kS010-gq%G|a-p`SDw z^O6XTTK1p!Ot~vPhGhblOhvC1%G2<=kgZ1cIu3(R$SthBEq65+1ER{~@`dB&Srpaq z2qi4C`SdP&CKZXNfx9TGSW7Au5xX4>SUXKi7snMbefOyU6R1&m-Q!z#3iK23u0LF4 z~;QL}&@tBTox z1vIt1cXYbSSQ{`k20+F=x-%8v8010qvzuIEIC?!t$CU1HFkOMS!@vAdxzB&m#aFKW zaf4$9G9hd+Eq*m>i8DuXgG=_$YQdy=z$Z*TlH|}IAAe@MHA&$2hB;bBYl7v2jj(W) z-zS|3nPMDZ;Bz8e8u*(-M!^DWuY6usFTP@mC%^DMBH}6gU?1r6yqrgl--kA%b(-%zXAjiDjsOJv`U`c&`~Bz1QX0J%w+GtY_h>Zu0WbV z1^$q&t8p3KNE{pUVT<&T@ao~wR8nz1{nqGiZH+~h^jYkJ7uZ@nIm0%RHe{e1o=%$T zpI&s2oby4$ou$6*8|?FuRwb2{R`BInfFQnZr4Xiu_}Es>Uy_64u#Ia|T&BN3l3+3U#QsTh_AmPm;5W4{GW9-_Vj%ne`ikR_RiXG@UTAhpPc#&cHi#n)r z?^Jf>VZ}VhJJfd_(;(xG^0x+T@BKr2+vkTlGFflD(Fs%v6)rT483Fy0uq|Q2NUDDY z1a{cUzm5o-FIHMsHjcg)c>9g#`+=2jiT#))2hN-ZR@&?4&3;}6?%<2Q<0{JNIFL5f z+J6e|p0%Wb!Nwvp|2Y~83s2HmQpCs4eEs^h5!UYdFaP)CswDswQodO1d_lmgisyRm zNKO8WnpiRjh@7Ruj!eM+)TuL4$QJOt{nF|A;$^`KkK=c!2m>t+a+ZO;>b0;f-JnX8 zYFcP~p)^wT>dE zGq%0a2wt`V$>aWMQkyc=9baaJyV%_ESdyogeUM4Ipi6x!1o97kuPxBM<-If3O;1jy z?c31%?i~W}g7EQ3iL4kJ=AfoZ1rvkV`3#=FW-L(qvtIziwrTqUHc;-LK8{CATl*c1 zMgxG)zJauBW`Al>|AqEuNP9@b#KgEqSq(6~&&ZusLDO@b6Y_*ijs{-oM3Zk8= zLizn}+t)%kI#&=tQACO7D>sOrRCubj#WH&>!J$V5;JwmkRRFV}_-FTk{*kU%c+J`e`n1KW)^y&k%j?}cT#Y7I3#`kqM?g?Z(o)Qfy?BpuoROQh2O zk_^IW8U5`e^L5;i%VJNGx>#~NCW4+X9-9Wa8na1y#sJM^@&@qAsjgu4z>M*%D8u8c zA|i`dAmEcY+qq^INnv+?j_1*1KE9XGY7vqf)KQ)9JE+etQbbU(JrKkV2~lnZnaSlf z#72ouckQ8l{Mm^M`RwM;4rVQgyV?LDpS1TyuwodF=gjxn{9@zB&&J%SPDTxeXA=I4 zU7|XtIVeDmY`F|mN2|3xjXQ|Ez}4Z`YOHVFrY3i%P>>W8`-BooM#7*gy`!_Vz#0Aa zTQmtB0@1uM9;5i`FjD{OOE>9XADF<3Btx#?hEl>_?5ct9HwlbGnJUVSJN-wSTE4p` z-98gtsghm$Kl^%Pt@t_UMF^=vJk&%cf3z_xn?m~2r@T8xdN;Q6fE7J7$aYh^Uo9Os z^&T1-8I4}S$C{e92KlqKj{>ti-m)IPp!j+HjW7;+%$Y)P#u=~jDaGfQpVk8mIrTQ{ zm!UTTxNR2VG-|CFX}9V>xsOsfA9bw+QML<#%yAALc*a}(J0x|&A$_(q6F-!N7z*U& zZUt8dpPUWT*DyvKD%y?+qlj7+|EcRZ&br>*grkVgIcdZm@Z#u22trO$MIPJ`2i&zm zX8k=+6julj`d%Kr&htxRt!Lgtk<5=(b_&7wW1j5*T3Q7kJEKu6I^96DQ33w@B^gs9 z%7p7Y3oi=T0%9;lF;-j?Glp&3OkYTW2(ChNFRrR1)!uWTTkCaH!*N7kLIK16Vyfm^ zMpkOPKE4bHj-F$Xi5n0^=M%`DCGs!RYY{SiBj3{-_(*UpZ-0JE6CAmFoMoftHgZ(> zR53kW0QkQ8mm4b1PAiX4Z>F+tC^&?Ww_|6^h=W3w-^lV6%G-r=w0VZP#_j~pN4~O) zS0VzTPli@Sudw4WGvZw@6hsEoJunE~4+WBTC0Oh+hd5`_#vxVYsy^0)IU+ety8U^Y z>EV<6LWMMu>A&Ty;=FX+ZToA6S+?r}9`u(`y%ELLh&g?ye)RL@M5dNNmEMij4>qx> zU>|l^+Xd|M_`jL7CE?x#lZjIHrZ~}cAjjUd3e?utLI&=B-2a^bt9D`%l(4*xql15u z=(Sa#i@s(mnn~|EIy$<7)%XU{N8en{vyXyWC5SKKYLirGi-vzafxxTtS28u?w03C^TuP{F<~*>4 z{=?dfRo=3Y?vCirwbqbE<$w0`G($dlQmfEYX`mncu{b|^ivb_6>*~WU`^Wd+{Hvdk zwqrzi2U4K&iT_g3ceK;4yC-d6qQKF7;MT6E*$@|{BacmvU%gzvent1d97nZ67>9bb zOLugo?9m8}+SFPsZL94EbHg{PNUFG`t;j~JZAIKVA_xg6TwEJu2h1jlh8m&fwe2xklJ{EB>Tes zK`>26-~F*$6>0tY_=?Pyad`E-#_i7dbjje)F-wQB1uqSR{&|+)NjyqL(8n^t(xUV8 ztEriQ%a8>p9d}yhl2xI$W;M02?IZ`~rBiJ=J}W}il~`o~mBHqXv3bBA9Y6$sj)GXA z%EXGfwy4f|^U26V(?-wvDd=1n2ea`6*DTV9=@oy<#Gh3)wX2)4u!JVYW77KhkmtMv zj}g?v&fC07M^ZdsjChO!e8CLBch zB^~pFAKL#8V&mUIWB?IEY8XfWkV-pcNxjil>qg^ZJO}rwCg5L*LM^bl*6Y?`{p)fK zTI!!N;NxAQ-)_{_{^$qrO(ZW?3?^e<2kXE6Wv!cyK`adkLE$?B_{glsmgDE6OufFn z*~)+U@Ue|{3Rg0##o(MGI8S|I{JY2gJ;)u8c|uf6@}9p#by|U27ZNm{_{(s#h@o?0AsM5&{}<=<1%Fd+i`rG0Q=T_dzjf6HQp{d)}IP} zdK4E~q$#-jG9$_MFT~N6y7PPS3ak4WB~(^lEEv8AM}_kRoXetV)jek1zFkLv>ACA2 z>ON{>x3rBz#>MI%9TMtV&YxKQrzfEs1UK4x5kVcN2gbv@^-j0@Z<2=I#loTX#sGu9 zW!eJJYHIy2lu;!p)gq~?@fy2ZHqA+EN*A8@i|rxeIJi`f(KZK{HoE3Q3)n73pe+j6 zl%9GbH8rD2^okC2fvk#$>OAHlvF%Z_|BladsZVD77mw)L?+%3A7+@A+ApjzSM zF|XTRs=;5YishS01t>xS?h0hZ9dydpr#7$pG=`uK-Jvy+LPKTx_Fg|TY`%#7Qo;(R zFy;P#G$@r2e0&StgM{sKeL1y4X;StZ4xbgtNR`c+Teo3^iJAe&Kks?2!YzK`SrCYD zJut3q&nlW+yb9v(X;0SZK25NiTyE9G2GiyIA{ zxDhIw!QWhhp!Nat;AcRFiF=tVwrH;Z5U<0x%S^89aPVXfUH-IRsUkQRrhnC82d(@z z^}6Mcj`q%Bj_jYhY1dapNv(O`R5xLpn$qhLa{=t< zDX2&-4}uiB$D#XxM>52!&TDaPm_tjb@LY{h!47RE9xH7LW;zeOjRXEn!3=txOoeqn z$A;${d>JM=A83Z11eS_oBmVZ%WC~;svK1DZ(84_99pG?U)T!Pt3+(O~Ewia8?qiN5 zz~Q{|^1|oUA4BMkMVrp{MTc3UZ#!5eA`fF@W92gAh#4iTVM4iU0uH=X*-S-a8r@iR42$^#eY4d3*pAW z+Ue%9TkRs=XGZ}h=zpd5iSEm`Yq)AMo)%=l=6w6<9qj!5*mqh4>XoJCKOf|L|1-ft zwrAq)zS`>R_A1)iyNq~D7H?kF1;OSgy%OrJywi#Ly>+*-FtmN_jz1prRCc|(v%JN2 ziKl+W`B5I7nK!3U(oZ<*Qi#k2<}M%V|A2fh;o(!%$54GOs=@3`sXofLA|Rje&VBi^ z7iauiCTCdhyI&~GrW!@|sthn?=R5?6^S@gYF=Vc!tCee--(Uf*u4}Wf^_Lr~H{E&? z=K)3!=o@2Nz2Xi$FxDN8YX9g0fZ{zfv-Hn%JI^G~*(oB{PlK5RE2oZ$uE~&PMpr14 zDYC}>IBwbgIHb;%86sWByatfgLgJ`%j#_{(ZTrc9*J8ndt%+7ih`rr&(p80Bg6_Rg zX_@*k%X3=_HOpt#f{Ec*5cWv38ZL8Q?`!_S}tt2@&vTW?=F;$U8o7r3}>bYI&2Sf3zAGd%d0 z>V$mWNJ4hwphRhPQQ(6gl(AG zl0~|SY?VN1VDP;7_~7X)967^NuTQ7N)Ixlv(AygKf_KyiVyMD>Jwk4nu&Z_4lbw(N ze2<`$>-o_I8FxYlr(intWS@HQi{>{~T@$LEXQ)mknuDVtd*BaqZeN~DZv6uH#ikyY zBW_E?Qm@E!N<8U*lQc~Kr3ZF)AFFyhbMXT-x{yT0O9l{b#-#MaZW@>j5oeTN+ zBRc1=5cG9&jvGHl_pOKNTO4qgHa_8-F4mUqr>3she9XxM_^@P{nrva`D4vX4ZV52H z$>V8=E(<;!1Z5dFXkd9*nvfReG=l`05J$TpP>EIl1PZiji{>yHp?Jo7S|#v5e=LSC z1bVHKG0-so7Y!yu)OTMR)3G%{?%oUR|Msu#C>qj z;3|wzInZb<%JO$(LRtZQUtGLMTY5Ly8=(FC1!o~`3oOb7Km^WsTbmeEXIS+jrc#Lv zV%D01$qm-S(1$~mh;sPu%*nyR=jRQY>g2Wfw^^fblz1?ICe$S~pu3L$Eb#wYfQ)5O zv9wCgCnVM<8dUccdURq7IC9zu8hhx-z4U{4~4~ zfSU}*_5_Koc#8;b=*}?Bpfc-HI_Zack6CelwRbZ`_w0TQ?W>^^F2!*-c6kMjn3Q8; ze87#-U-SgbW`|K0v#cgdLV8m}h*Mg8q@IK>sR~G^Hd7S4eUPbK2WgkgUqMiA;1T~g zv-mS^QauKwDMK`({D+V75OeVhB*fijWLs-nZ+8+bglN8l&3&$eN5gsSL^!2vwgmnc z@Y)IoRqGO+e;S`&U#E zKud-Jc0*Nvs$+}sbp2J&^F&V?we{^K7-og_<7l#=u@-NCbC8N}QV{->U zyHj)^H331LQVP&w-`B6KA{ zfRorw(HLB5pozhhn}zjL1%~dkqa~Kv@sTZboN2oH_~&1mSQaGdLQuoUUFwb?&1&tX z{yT8nk*{`SZ}vN1SF+3OpPq1EPt{!&B8xYIqsHe0@T3_s@s=Wk7kbi=tCtpU&WBrq zVc*2nY3}~D_e((zycx#4lgPC{KfnF{5zRmUAj3GPb8XLD{?k>fwh|U@k^Xf@Ze@?4 ze&2t`T=i<*EKFDBElSOrdFyfk+kQtm93_xf&%Hu~E~0b}TK78q&;iO*;qgCa`jvRQ zCIO&=+v`v7K1g#jZ+ZZ#bn=2T?K>lpLD?4?ZB-v8nsyRRU-X_x@! zF`IdWH*@gLF!i7bAT3q7DSdK}zh{6h=N9nBv+_4L1fHt4rEG9_>4A1xw89tM{w{Gy zVN|0ke0fIhHU!~noZ0SDUbFi5k_S4|+xzeKfPa?!3u+=;Ya*xgbp9tV(%qOoi89uG zFB}tSv^b={lCDx`%E)9t0r&Kv~Pq`x^3(W`O0*?y6d zJO~-sOz7p8w!M=6vtMzg>0swrk7U{X)pT#mitOyBi4Qd4iW~Y}VEMsk)Kr~6exU}) z4FZmQS349Bg1oY!iZKrVD>rZ6Y;5exQ)f4M-FBVw_B-#VT>BQQuva#?M0TZ(x_ZVt zw%OilO7vdxDr9IVxChHKaCjXR{6#Jt^H7{W`TAOqx}u`{y8c>Zf$rFqnsfGAe?3JQ>QNAp+d@E99}>(Wv)k>N5EhjsC?d z{I|YgdAvS_Y%M319yVn`n7pgC!B0Y{M#ZOEVUD3eC-nOVIg}3JUbpQqh!5p#fqvidFS?*?R#pk-?Vbf0-! z8{z;4Mi3vAXWaOyutvw57j{p2b44JGZB@T{Ms8mGW7ydMSL3+c)317;iw~V3=LzH0 zv^D95s(0?(ym`5EP$ni)$I#hSdY2AU7WSGgsVH!M^qULG7oO(l4bs0>KPIZ7YQ0sJ z!d`jgs3$!}&+x528M^@KHZ8w))qNMQLBvD@E|k?)N)0Z`V7xoG3nbH9^iRKH7nz;4 z*;`wj`q{;3u@~tx6N?)sRZKMUqy|l365OE;E3h>hVX=z)Z*>emM~Si# z`<37RFQ}_F41UW3CrHvbB5Mn5G~M^k zypX23@iuInmYa^A<5EcmOfDyyHOHjT^(|1kVi(EJ;=ExgTz7vek*$sJA*%G8Zv@^Y zcvla>yNI?VRmh&fcdbxHhJLI!jiqnhDlF$qM(`=P;^u}xX37qqb6Jb$q)>)~o~l26 zj9OozHem198}G==ybHE)4&H!PZ?%iW$ehLR_vP8C#glnr_M*n9gB!88EYEz$=n;r$ zK1>AzDX&4x1ePZo@dxbF7mBnmXaNH@ZC8<4B?b_#aj z*IQa2ePw`}ryb3k0uPWT>Xw0*ORy6Uyfqaq^|+C(RYv)B-cQNvEu{VOe%Zqn$-$3m zgljBbu3#(A1dr|pK~88VH@DdhpDEpK{Ibg^S3^JWlzF}w6Eo?hA&QD6C^P0i*gZr; z`~LUL%WBteC@N)vg+vh+lFWu|v}IgRxS(|H>rcN2o|}yAG;0`6Gn*K1wT?#S=B9!L zJSt)bYt7IX{jkFOX{iDdHy51-!#-<=1t>4&>F;-^0b_chmB{mk#>?4`dBX>!^~zc& zu}^_RL{VI?MXvI+r=qSbE)ZW4rWJi}v>CoCkV-B5r)1KiVWZX!th8gE;L{G9GyR%d zPl^_1K%^7Mvq<4%$ru(e1QpI&H2_Ed}3wG0fQ*gop2B_&EO8^({&{k zMtYjT6CRqd?li7|!+5n_fGU}cVH3`{q096^^-cA^xcmj>=RFtv7bGoqk>U2ed zwfdUolFqsjD(6Eoiyza|>m9qO5eP#S_8f+x!GiD%zUT7~JAdRu3XgDue7EPIV9224 z*>Q;5VE$)_%L(-^IeX5^1=R-%-8)x4gIpz@6G#am;Pfb}H4sDwI zvL~o0O(s{GH&J}SPNiWm@qB?cau0(0Mc8xh4}ChZZoH(DVtZQ(bB*zQT?b)kzVAB7hmrCg$>rd}bE>BFks zt{cIAd~x-c%tHYC^?^P3o-s-fx{xm!v;Ua$)i%7OQ&Mq+Jm})3rx$n=^W8#v>G^F_ zdz{M8-1n8pSdHEBRBGNvZ=bgnql8eV5Od^EL43Iqu8CCQFq}VI>E^kpSZc)?Y1AmQ zEGJ}`l93q9Ec!9Ly?quofLIRb?%}@_j0keP?5DJ&UQziO#4TrG zq)xHq!2PCx{yzk3W?o)?F7t(?`?&#?-oCsM`ed{oL^ODvJA6~+e8mb6$HUmTWxB;} z)yI2+??zf@1K+zqwu-okSXd_aW%5Laus2;*V3keJRulgy$D?_hT#3|!1{bhaLB@LN zdI-hfy9x_P#i-jtnmaXG|3p2bc5;1Wj5>>5MqEn%QQgxi=EkrYu}t3PlZ;6IrCqK( zjR&;gAwglu$1zdn!$zAK=`MfMy=mwLzD43+h>^c8HKxAB`>H^A<+gXwfw*f>noky^ zi02Zd^E+#sGvM5aQGYc_Ql9;kD#Nr@YH(^eol5UszHP(Xx64$RkltFJ@M~Ege8xk* zk@E7gauOUe@#+$jm2NSk5mKHr%ExB|Q93M_-#y`4*|c0IzuCMayOzj5(cr;Fw%QF^ zu}0>czzLNEL%{$)BhFiozV8sWqHuUpc`tvUx*vK5N9EbQYAHNx5Pg5^02WZVEV2-D96+bo_M}2{&wZ-tlB#d@5k2-%}U?X z$mmoL$%TDY6YeU4V&97-8hK~@2(dfZi^e>Z*`@*Mh4&WMr9JW_B^TV>WTx*-6PZes zw|W8?k?)kV72hO3eK5<7K00_We8pTElJT5!Z)oyw*|ypHHEPBAO!l)Jy4a0e3Fg-8)dYP)}s{vVO}Q62SaO zp(VyDP(80-zm|m5$9IVx${C7x9fukERSTX|F=P!qI!*hZqp(O1Mn4>6Wo6ZcdDeo} znaZjt!y*;{l5RO8i6zF4Ja zDCOkzgY!{I#km~1tis}0J9p5*?EnSiiBub>Kk5{b}@_3%3yE$4erDqR`wiB=!F z4b=DWG5PC)Pk8opUvY}b*^H>^j@Rjy9c!E|@5@)%V~V$oVcXwbMyvWS%m@!E!yn&@ z(zRJ>S4g^9TBF-@Igg!e^=A1PU>`4FaQjVP;d>cV#;=2ed3l8?Xn^UiPP5O_6+D*SwSNd}vnYrFe3MdZLDX!~eq#e_&^e8H6~KQA}etJRx$ z5BVioAVtA0yI97_EePeSja~N04S4ST4T`&$Byax6b*4BalhXP9y5Ec(h-oJ#y77Xt z>n3x-NnbPIp2xYqkxDaM!*s=CGRFsvp*~J0d}H*6Kk~k==A+Fm`Gy2~L|+y>7*RtY zo^}!Noa)UP>>N3cTAkr$+oyqTgHq1OAVVU$K0zPhF zaf^~Qm@aQKY~cZ?+WnYIn zY%Zw(#qX;ecXlz(HGj*2>{pbVqX5XQcfcpF){HM47I=5l6(F`I$9}xFuKWJo58D-u z`E#?V%(?!%aVX!?f9v7D+#x4(>3b4ABx59sh!i-ARm$S6jx5XIT19Is0(+R8DgWYF zg`b|DnvRgOi5DKCuZx*p=KsXGhUQzkxke;RJ)m^5{tkK!kh5;haR-rSv8te9df#KT zuEW1J0y-=7?jBq%)WCP8{`gfkIu!8%@w3uDV6-{)$LnlN9!vWPEVd4rw=iLBbc<;4 zUAcU~L;lceWX`a+_!ud4C0pRaRz*77)Zgv}R|fhdYJyvDeOd-jk6qpMUyJ0|_r9rO zqCBho(yh#-svqVzhF!$;xS?str3dlP9yh%LND5B$jN(fxSNrv~R315|cFT&9(oih& z2SU1=lp!U5Ymtu`mxD}qCw==)GV;SKI&y2itC9`t9HNFGA9-X(Y z?oK!jGp`-^C3=CRGHN?y&Vd>ui83uKx$b-2oaBXNRZinX@)VZ#+&9|lfKGp*Rth?@5UbbaUn!=c57E^UyOL> zdUxLAKb?^a)4HGLV@3U5&g<@?hPv-$kUKALZ3iJFsYGi0j?P)x;$IqoUU?qiuww7w%1U zuf#Re_|}KQ`s0wWaw2gYHi}(ec0i8DngW0(Lr)3>9kY8j9P0AP-I438(nBCf38+1H z>j_x^0aXo89=-9t%{lqr^>V{t2{+M_z*;1yu-$&JC_0-gqo<;>vT+0(Ry^G|UH4mx zsbvbAZ$rGp|Mfj^L2LKS+lDbW@E2C<3S2kzlcGrcGQDY!=t z`@TW$OL7<(qkA>wD{#jp6@mXAP|jJc#S^ho&Q*e#*nB^BSLvHvWHIAay7%9W#qh&1 z75Xitr|b#wxk>q-Nk2Xx{r&q(Ti&zjd!O%)QKRFv<1lHaCUo@owztSUcv)Z4hHP~W zd7{R1iIt~{vVwoRTFaBS_s#J?^NR(`AT?5kqqte!4?oMQ6fyi-E%w}5f$qs_JKxA8 zlivfH&k73)&0LQWx4PV0iboV{m-6_h#DldgUETNKP;0CjZ82TT8#&e-k$qD^eY{IW zMH5v!mzggF5lf#|mc~?dgB%r3u)|9U`+YgKpsyio;l$g!u8y(~3oi0roK`^Om-11! zDI|CnF_adsg6^~@ngD$f#xvL2?Z-q(d{Q6nE`lf=z)5>peU*#6s=1LNEjDUkvD45* zHwHEKV56pYP|rRTi7XZ)ii@BA-s#k3yT>zqTaZ_nfdGAS9Nr0;z)|$F4{?dyWaaR9 z0B%dT72I>mdagdN3LG^ui6gd=Z7zDYy1u?N^coT0>*gA$z&1|-eE@scfzCcP7IC&! zy{~-K4E*pScqHtG*Yt*E8_})@WN3Pj7|I(jHTaPlC0xFDUvMC7fqIPN2(fh!nae<* zLPkgg`UWNCfKed-_3-L?R)%%i)tCptT=(*tSaQ^>Csnv)AM?+Cx*v){S+A&qUq0FB zE}4@C^ZSZ*WN!ZN-7xrw#_fbyfEkbq){ZM@$R_wf-{_h z{qSBG=Bv_LB%hnqpwW>E#oo7_F&N@|gINaG6h`Gx4fS4mf7zEUI)TqTv-OH#6!A(PU5@P`jEV)3`D!oJgHogueU|L8-~}PgS)K( z$wI^Hx4BxAf9}z_`{lA}1D%<9T4l*V#CKMhl{4;k^s*DWvJ|vixy=lT$_?={N@r!F z_un?k!ho?U5(wU1PO*j#;^1J)igd0N%A;9}>+UX+4l zeHHc#AUf4cRixEJSjShr{Sk+`-Xlf5?`e2y2XD0eic9)c1&eJ!<}R6guLehDL=E%7 z^hgV8bKCJ`_nX$r*jf8(uYK>mHGDutDrwd6bTg)Gw5D^&^i z7#qWM1^EYYAhUi~mYn**nXnK)4m(s{OP$YMLGVn^-e@@%9TPx&1~@qQR`@AhyXo2` zt$NC`(ysO)`f&=+ny|0WzgjyEyGR$QfP7-x#EPcB_ZY$Ync{uF%(XOmSyO3#Og1cl z5x`3ldY3mwe*P@_`DBcXNuIk{9o=ZK$d}ySY@Aa&?T6bQ?7@GEJyf=6*0OBHNnz0`tv^52%ds7X0x&jnVG-o~kaMg@4oZT=h@>$Ux#|EMgu=S^N{wfF2UqhJux3{b- z8z1EEP(Np|{r-@n#7u8itFm&GS2$7 zVy?NDR5W=mzpWoW`K$iD;(h&bCq>YOLU{#RIyZ)tJy@=yfg7zveNoO-k;ioFebWEq z=_})+?xL-SE=fU@ZV&`ikVcRcP(najR6?miVx(J1>3Wdv25A^dl$MS`V(1tcx|{cm z_uluLAN1#c&e>=0wbxqvoZXxc0DW@sh{?+M87uSBbi83=!_Pofsnn0!RFd?Zs$&y2y&x_1uB66{* zKnkIH?(6$hL`YTAb4lfI!524<=1<6cj(} z52j?lWKi2qWm#KgS7BOE$6MSWbg8+QVUtsl97Nb%O0gab;z`&QVEApQx?VYL2RX#} zEmG*#Q;?|6zv(8|dZAP`CddF*KBzA0;606jb!igq{)a82ixTAJyl7qTBfEa_?tyde ztKDipCO+^j4?^biz;Swz42PlB5d45c5yRx|c?G)P!x?$m*}sPymDXln+{4m^W@x|r zLQ%Q+;p`WRlE#!fAWEYG6wNs2s>joYh#c-_uG~Erh_NkF`y+8vxr9lLsn8Y)p0_uY zlkOWRrGKd1ivif&)E&P0CJb+cYEOGzO0TO0!(S-{nI3%wskBU8lH#i(q~$FmNM&D_ z*0L?xRH?Q0grvdDVmo0=+EL=E+ZHV)^p4gu@YuDLqwT&I<2}0;?Mm}11F(jwKWF;b zBQ2D}y%;m@v|#ngn$-Rqp1tY%XX|@Cd!h#>rKz$Hc21Z~17o z7H+*E8@wWN*tB`1+Tf?YqJ}G4ix>~D*v9Z%dq)8P$|+Iwgq)w1lx1fJ1Y}?D3!9Vp z;W+aW>V_jd+snGYdm0ek*8oeR#^^HZw#I}eWaIq?+~uFK>MJ;<&yC@+VW z!H=gVqaiP*VPLIsYM{Mw2ac?l)Zp+ZVErYSS0~*ot*W)1e8(uy{hBqaZCFU>mngf= z?10?yd=QrBfW#M&^8)@=^yg2|Yj$)|s<|_^x*qvM`YA8yblc5`p64oaB)$?1;tTG_ zhZp2`egDLaJE*Tb^vGW-h-yZMlh;K^XnMyeHqF_AH>ZVkSgpi^SVfn%F2|;Ju&??m z3`U~mQ;NMPMrJy(EH$>DLYSZ!(`?t-sqA9ZrFpYbIhhiizP5y=GG#@gfR{VeYFk=- z;dpg^bWvgiQX>ij{GP?6Fi!K4C$6 zIz4S~dd!C=W6N@UBzn+ocG91Ga;}{|b!0_(Rb6~LC{Xn%taQ{mt#EGUa~+IW%V#X7 z_(qE+(a0qwY96A~qRC2Q`W*1YrA{B`voxc}|Bm;ZgEG+|sa8cmi&fu&S*yT-LFImh zIG$&ojLoTkR#eyIf4$3FMB$7FgLu2E#o^6FtdmT7__q3NmOgxZ5PgeME2;kQ`l6~z zq~^)}tO=aI?~vNESy?W%2!ydEqntBa|7g+Nt&Xk|N%a@4v~r)P`d-?lFYHP+WQCR# zpNs+>Kml@SX0w_iIt9mofq$g>1JM9FNbu5sv&b!$DUDMesnzE@!-Hmlx(OcCXqmOK%G&_DjJ3h8Zt%#b$~>u+Q;c@ba+=jxUQzi zA}Vk!hSJH{uZTai@%jsEiR3lbgduk_fN*c+2 zGe{39wn#bW2Y3pNS&~>O7RV9qwn({r_4JxTuNoLJjgr)_34Az@lD??sofAK&W0;GWbuM|p#qN{vjBmd_G;P?v3E}v;FSm*%2bXP7p;gR{n@&Tby+<@<2r|)u@Ny6r%n?8ArnB3aZ z0KvJC?+;H%jj7qj?{(o*BCZb*XP_)*onHn7sp~UoTE{6-^(Wp?sKl@P1mYNm(c;mW&7faxB(FykWl=?h2DS1lI#0^!{<5RIM z{vtb4=Am~}^57geD)axdG?aQchdg|jnbkKl4D7j&jtxaMt%7wr^+g%D-XFaUPM)yn z**jHYW3vYomCtL|cp(Mz`&3V#Yk$-R^)4R4r~skR>js8*OXP5voj4Kg_=no-5s5E> zUB#cbr3n3!wF@vP)z%-!fx%?%fcd1=bsh~#d;Fj`d+nQq8%NzxNHEp6f34ELAjQx{ zS9wtH(ku!GfLP^puWk(mjH9ywbeoYX091@JMsYeyszp;Q46;1m*BYSFys`w`NRPyq z-sy8dLut1Kfd;+eesST%V2`bR9a-iAyRY1pr%3k3S`|JodDi|3jDCt~{cJFCbH)wg zU#>5*a+?wZ*g5b1L(UDxo7TI9bO&UI-Un5s<-Gvh-=a~bM=6otUCEk9!3@MPyJNfB zX(!Vcs3VOl10bY-k-%0{zl48pB6il05hNJ)=aERDda9_)!76u&e8Js5^;Y+LA$)0uJH^qGtfOT^yYzgV1k#2snkulU6AAWjH^yMTWVslR8ds5S5> znNC$~ui6h&YOS2peQE88_5TYti-}&RqWC83iwt4euIDvlML?m95CiYX4KtlzdX>VVparA~gOz6IPq)tJ_`&tqEf@ zKbsj)_THXsBl7`N4Jgz>3?_T23i|=21M5xoDV!Boru>iA@@ftV8=FLh0ezj!5apr) z_T^u=MFw@fW)*>q9rD>qZeH!VJS5N1Wm${x+7y|tr>nR4vF-emWi-y!WZ<9O zkA%_0yyWhvs_G>&`%r)bghZhi8t7ZRPw`%0z6+3FpNY@(PabV`(X9NU4I3WSSZjU= z_s47+(QnMAeNA4-Cj@c>IarlZKx_dujO(c7<^CE6+LtMM*k);Cyx8f}{@d6hclT28 zE4-;>(V{gs{5rEl+&+1S+7=9-lEIe;B}^67{hBy-Q~P|_BWj==MGAgxZ^+^`$TW=? z2Z;!~NpR_I^KauZpI`jWY*{(~7DEFZR!Z=nGaHyDpAYI2Xs&A>9lU!P_4b4{;rtZB z?+>zsEMQYbot&n96j@tv{gq3N@aHQr=nM!kheW%NK(A)PTkpO-Rpa@v!FHWU)FJHS z;dk-1Jzj1)fmAJ3ItxhtatJwx&^e$I zV-bA{g#CWk&baYas~OtZys=4rmdm|N)MiV+WSNRKPFhry8)xQbmLv{ zeI>u3q!jPMWQFfV$uE^5QN{vYw%S@*6Fm?y2CQzVP+Ti_mcph&B z>0hZ~Ia6<>i|P>1bc5Arn=`)L)lle(Q){5hV8@z~X0|AVUcY>D%!KlNgGYd@}Dph;c;8DWf&8R`1Z|XYN zx+_QscK_2Z%94(u`zH0?S zBYGZ+#&=xT0xZp-Wz1Pg(t8%qK-j1;vJZXh))-_ z?p^YaeNCvtn@=!jXuI8c5n zStJJGnMMSQS~4{eEIzn@p~xt~P|XiW*)dwmGk@6c;V+nCq>dqCRJKp54s|!{kjJf#Vdf>p3jfzwm5Yp^3qQ1uhQK5Zo&>phaq; z1kvCwnh(fl*M#QDK+YnSm;g+@ppcN=4Z*xmsbs{Eo^xOX9)d>!j~9b z%$B0z4q2olxwuyq42N?f;l8%|>*yo|BZT4^2>#}4P*)n?;#-0z$!E&NS8+oa2B*6 zZSd?MV(Z82!pu3GbsM5}Ud);rVT`vI0|ZTgvI5{SCe#ACf`P#acZr^kPVD1mSV1zu zt@pt0LLjSf>V*u_5m;NjpnFqRP3ZlUFc>G#KolbvIDgy_^4+Z177JZ`kH%H4Hb8)9 z_S#5}lBYzCK168%nSs&wvJJlUP;n;2wi$-6Wr|A}g}Q@|@XR{DkBO_Zf0~cV^B#5t zsoQ%9t)a5ms?|_8);Rr$NH-~v~Hsmc|D=ci4uu15nV zqIQdXCG0#&9FC7=37Dvq={uJr*=#F;_l?fdNR)zc@cxCw4*rj6IQ!t2&?~{Kub38- zu?@52JtP`3&5Qs6E0d4^9gX@MjzO#5Oag!FJkRz`5R@ue>`Fku+g0rt46GuaDy4_p z5e!)YBM=r)nnA}c{@2d}l2*6fO{?M=4YoZF-tgz_85*+xQb7bXLV)jZv|Ef|B+zw8 zhABS{um1|#!+!=54j|}le}Uhl!>ah?I~$fNneK+?ONn4a)?#U*gE>?zs`3p*>vKO< z`3Vryc3GDhH&l{@Zh>fG;k80zN?#d*53JH8z zVu2^ispNLBdd}k_WBj-2u&#HZAv71Jr%^C`umb;K&3y`ayTk*)}WkPTO*4XQu-u-gS(lztDVgo~aHV0OxX`#!%F!=xjX&|8)QS`vi-fDMr0Uz}Gt0PAj9#G$UzJ*WUK)P>0_e==UD9pkN z^X);9BB^Pz>kO5n?Bfe&aFr*}Si$CBn%X4%Q3@QnroOR!L-3cXjGpV(fYf5c(EP+K z{>7zg(8NO}Mmr>~U4{Dd@6Q|jW|M7SGf3a{XXMi>o3UpcmZ!D>8i@i{c{P-UjSYvc z#R9+*Uy`_VpCL$LFD^O|sxek>y0aGi&hb8|J=mWPWOrDBn+0n)WUSnNt}qn1ua>pP zBj+&66Y!%I%cVua^H`S|hb|~O zcsYbipVl=e3yNPw2~<@DFj`u^3Hi|l<{@$G|H6SOE3_Q?+sq?lKMI&sh+j7Ug8NY^ z0dW-@g4+hnN*!E9mrafoI^+HKbs!l(zuJt(76VYHo&)`pK#DzMgLI%CPsVJ%d6-NT z)o@fP2<1Pc%?NOU+|z&=_$BVcDR}m@JKC3nYHx2LRWvIt0zVOk!0Xb)`d-RT7QGi9 z9xfnK4~~5|ZtD+q6s3Y72s?8vO4@u`qMS`8+1DMX~$K zBO-Sn%jyAZhpgZ`UL9>KG@(XT3S7L!#5Vz?zqd$-2Sf{4pq)A>!Q|T0jR|`nV&BN6 z`~dfckpfdVZ<-c5&=DcM>-4Bycdtgv_*RkFD4exIN2-|s;#-m38Q~cQ!`1RxR!Xf@ z^#y$6eh4oH*M7s)Y`jN~=9}vUKYF9tl?N&!cHe@jtO|$|a9`Jy8#nCVLkG8q;%L0? z=H6FE5GjjE_^}TH6Hhv17gPO0NbU%xSz?M6zD}(XH{|&|{&|B50&m5(&x&0Gn4Ona zygm;T&M#h{am4Vpe6no*jQtFl!`rhLrHyu?Qlrne4GXND{*u<=F545y=AoeIKsdv&z%wL@sld@#9ZSGy&B7LPK5qcQo3{*RDg-XtM?^Uz@J!?N%t2mm z0~e0*k_5&#&RkV`lnDkd4B2o?LmJE`g<=>r-v0;}ErWA_ z;Eg5Q(VtKjeBBFY?V`n*g(1%ibGN7OOKl=n4r)Xky;QLT&_7Zyc=hO|8fr5bvh1!C+KTo9PGomj?;0yZ1 zXTZ0t1#kOA-hy;+blbc`{lr!D8zK7`Cp)%BCQnHhV!A;B9vaHW~%reVeV z4leY!FU30HZco}+?W=(=aOI}tcEpFtN(&<_fH|^_6ki{0VO-KW#?N-Y!Q|^WX76w3 z8RQg~h*U}gD@pb{??=yhjlfPhWBZ(Bu>Z1|c$wsw20)5WAJpE*(BJzvoIM>9+a+Cd zAPp!>g%x081nX1oeN;S%{nr*b%#PbI25%ej$D7c=onQGKK8mv1^s~liT3{C zV|2uCHki}Ck}2-S>WOKNV=fE_)H6L?B(QPthtQ)y!@pANx4nK-j%@*$a=|3swLP*F$K3%Dmi=m{4n(nF{An11w2SH!W?}UIEzHJ-gsaRY!xJ$P zg<(JgJ`e`Bd+PHp0Ix*&Okh3;CSzx<|d{b+5y56dwU1oPzQ zIS`VMr6i%bxp}LeJOg#x zJa`V&-sZ?5dYXo3Slm7W6pV8iX#}!eW|wT3TIOB%FY92q1>%je#%*^MEKwO_B`o_2 z=#Sm@+Zv|W^V9&e`_6kHh;kKlX5S&-S`23?-p+812lzC6`2&LVipF=x5D@GIM%5pO zi%Y?5Nr9M7)-Zw;LQ%|N@4F2|R~bDNb%41x2rH^gTYOY-+tM_Nd1vRA4`j$1FzwDZ z@ZQnF_X=IPbPdyTk<|Y-5F4|B^K2R}b|LlRolXB?|Dp$H6&ON5O8Za7#@d?qk4&o* zT{B5UEsZdJV}sG9!u#PHU=*j-DXRb=DT}S=Ow3z?+qDz^j^fw=SUut^=`!+p$?L^` zndpMUz;h>lR~bQ?t3Xo>pjED801R8pr+;SV7>BSH1D4)S#W^2gGy#R&vIjE0+*u1W z{;qWYAw)z()GH%+M6hO|$f2zsr$OnHXJGT32Fgq{Ox`oTdj`?yinD_+MNEG-R6d*l z0~Mm!e=U*V)pc-%tl0EF?Iaz-Z6FGf6=)ARb7~ACoSB&x;3>yCFTjb1iVR^%*BM7N z-2sL=65hIAcm}z>t@_07J0ewFEPpv@#kT9c4pY7llE9!6Dp;wY^QmsG1>6Rn4LWhT zpFn*HwOaV~^$0QoBFj7Ki)2=;l47$IiX8P#!K_hl8KK zES_^9FN7?n^2GATG4WOuiw0)xn<7YYXxKcvz|06F9DJ?7b_XXQF|tF6KrDSGimDHi zpxIcR{Y(>wEPSIW1rRX1>WZbl=&d^S^?F=&^%5S=qLx`4hRnp;JZ{r>CeOy0Z@X-Y z@%*#{!ojP5E+$g;mY-C^KY)2=;Z?tXXJ=R`duS%`@%eXqfO5a(23lDvCuQd>!Z!|E zHQ|W{AE=`Vcd^jH?B*wKYpF-Runt*dh3%x_&vB$(esRtnBaj`2A8IFkOogD>5qJR$ z>x3?GOAtoaK8cK=g1AcvBbI%&I*(%Lrpx>x7TY)~;fd3#j=l@UQBXTUA{MaCIeeQ! zSUoww=mM(X;!p+Q2XDX>5CKG4W0UWxIa2dhq)f|24MgC_HmVj3;eZEeD05ZW9{hmv zB!Fjp-;iYh|0Us-_xo}#&8u3N+*8V{Q!qH(U2`>iEHi$UsLZ(smbVphIA|&qN6@=n!XE%f5vf{bTo-+p zrdyag97t*k{80KOZ<-~zhi-msi@n&}22=Fkehl1&Qk^yInBcTnAT<}Sn=&&OZlQzU zYlc97fQI)I>{q96a6$Q8VyIA~BfMP#r24=v`1-gu7GTgbe_EmgpE)=aNcf_@4j1+y zC_8cpfgKf<>F$b%VE2`Hb2}Uh zA}7lxIwZer?MudUNh%f;%{y~zi1oi{mHd8YrF#Ne)UyP#AX6_?SpDo|LR~WXu9Zlh ze|pU840?P}!uzH6?*c0|qaZgX&C%g|zT?b#SM|OlbPfy9PY(a}9wCT+TaEqK%nSh{ z3=SmYe5;+wA(e&Lcw%fUu5$aXI`5wH{CWUie7osOif6Au>RpQC(cN>yG?ctj;aL+# zqL3jqDgz4-P+;fOq+6(&G%~2z;a;* zZh@uIbQQxYl1}Y$MK(zZC@z;6t?AQJ$1Xbu0b)$o7>azzfOwhym{ztFrd*yA_f&M8 zW>@r3@&%r$A#ht{!U%>#`oJKK-{KlL+*DYY_$2-&Sfn6aA{Pwy0}G;c%7^~4Ff{bz zloXO@o#bP9kP)RZuVm~a=8^$mNHBDwi>3KrmRY$036omM1lJMY-66p=TW zoyR3H^O&mtD3}sx>s7Ix@AVX*raBq{iO^e7>U^{ric~6&~3AFQBd>>!3@cF3r4NyNrzvY6jqR6ZM7cbID_lHTFjB3 zjI$EIO*f;f0O7*@*u!aB5F%{k!oY`nX2V}G!OzB~hT$xK+*SbH?tXhzeUC&|)E!}w zmOD`s7#A&dqwmZ~iBNBNfiUQ6bvbKDspSq`c5A%KiuRBQ%C93{s$UvOsj}nus6@fN@6Q{1^rGMYaqiKrR{+&vD)2W3lect4$}Kzo-@)jug!=R@=sM;FV_kRQFZDAAYoMjp6rb2r;>Klb3Jl=F30& z|De42^WF<|uv`eWk3Aqea9v7_8%Fvc1zDKCTIn9nowRJ@`i5IgrXP_i|TOJq^p5iq`d&C|q??fNj{Yge6-*41*BV%R|401pH! zYkGe31NB#T36*RXXscJ!A2aYWee|F8B9ORUXMqzU>nZRyTbG0yg9*VcS_GCnO-30$ z%6F_9@!HhfMC$=-C<0`VC4YqTI6VlvcM5S=9Mt*xE)0orvlP}ZluspqI_{G4^EuJ* z$wY3kHL&-dJFZ`yd{A82^NTwNwao{L$;!mWUScm5a`_@Gz$DHylt1L4%%mEXieQu< z9vafj)D%6z>lASAFPxrxa?x1KEv4FkXF9%Gp0S60+tp00yDL_hXEZw5kEud&8~&Fw78! zr%?oTzfu`@-vmPvJECJa)jADbZY4aw)PlT8>mK+jcJ_OkYIoa)mfyV<*8cVhK#5vl z+M88c^~@TZ=pBu(GAIMPeu0W%GG>^D^s4Ti`?xvSww@*#$@g;_rR;&KGjgndGTDLGPwWbu{K7^aAVTSSL1nL9@a@wYWkv2@~IUF*grBS_&xUPNhY~}$H@n)Cb zhebUIR)uCD=y<0SN~N`nRaqGJPnQ5>5mUzD17fG#advyUIU}3>WDr3a0M`AX0hUHU z?oGYZ`p9QAA+~~rm6@5@V%mslrFl=Vag;set{u|xTTILMw=2EzCfH|2=!mC<-j-lj z+U*S}DdTbQvW(TYA@fVbWNm}1|5R49ZjPyUsk@IuwJWD;+9k5}sZT>-zyAbZ*1&$f zt$9)G)+l#d=4e5Rx7Pc?{My~xtpJ)`j#lpH|rNv+y>l9a&;Eyvh_HS%Kp{mU8oW=VH z*mRjx+QLg~ny zoTzvBFoXLwUo@f^oKh>mRWWA%>_|;*kc^JX4+k$$c!op50H}z*{Y4ODmAdg_y|sC? zLRg~qmF@4Wj+?2mvG#P6knUzLntkT}bS#S)J5zkpTnB4JI#uje@Z3Yvxjal^oa;M;=O)@ z>2+@cuy&Bgof9O|=V=s% zy+3;j5tUIZMzw0pRQyId6b0m?OG(^wPf_r7ZK*UColgRh4g^KV;ZYh!%7=P3cKsh9 zfIBw(dHl`!VCwb9Dv>%b4L?(W3}o?qE%?{JH`cvKa;yZi5|K$pwbjq&_u85bfEmdi zsD*5*yqFI46k7_)+k{Uwhu6jY>5UT-R!+!!L!`{CPJB3rDdsBT$EBAA*Zse~hC_Gq zjX{enrFBN;8wXPp6i#TLA<-w~&0@yNK(KbPN4Y#(B)IdS-a89MLaAk+!+!J{B0U|t zhV7_f17BUh_(~o$2C5xYtBf~LcoTym2L7|G3>SS->NurR1d-QsMb=9r8x!PRbkstg zCh+n@PR-Ta_j3`H+#|pajwO5B&EA7rk+Z?i`@in+PuEQI7CJJn56NtdMxFq2g>{3{ z@f;5wI9rg=dB@$!>7WqJN*GkNcRU#%hcwnly@xG8ko3P?ApTWyOhGr1U&G-UR6 zg%>#@6cz9~XldAZ;s0EySl$@_9J%lV+sh`Hf;}BQeOJfij1P|3A2{crpK;mZP6<1z zUcpaL^noEDKBCM_JnWWMo~y^X{ZyqpmF_a zGlKOFz!U>k`hQPNNoWPU6Tw;?$?ZlYUbz%GQJ;qaCwV#;b?SdyKz9dhpq?uA4gSvt zuu{~gFfii@LoniTF1R3wx*nN05o`}r-FU8#T}`HARd&IbET|s|pRWe4-yW=0Ustnn z->31d_tw9vU;NvTv-$ho>%6UZkQ+!aaTSk_Z%ZDVC?b) ze2%xJp7!p~-rQU`4RVxZEk>xBP*JdP$ZC!mDjIFQx3e*UPmCgd=-X)|j*dD_nT{fy zT9E2?YW`Tgkc=dsp8-eUjI#wqz0K*83+mM+vk>!FS*LsmfGZ# zW}HlSK0fZ5*|~t1B}eqMUvu|#1`Sj)SC#jtN?uG3JsKk2Hf?q|?<-&ab3H6N@Za8> zutWM4D>s4rjEYZL>bM?;$*@})p%{>#zWwnr)_roI$9&e^e`U?w&s2F&$kO=L6SLKw zEd4N!X5>rSuVi>{eZnm)ep`I*U?ppitL#wgjK74Q{T-P&GWfxuFY)q?F#@Sp`TV&H- zA!pbf&(9#WpSdH6WM!ggX-BYbNjcfr(Ed5R8WJa492HehH_$dQVZ7|02zq)xti{N< zXUdUGEuqG(!Kd!S?@Z{Nu99w0w|!;`ZfwSD{P7w*np0BreNWfvA~UBg)3J%SZf634 zC?jTM{{n}@*UXPshQ#6h&bo2W%Q1hi$J4ccpq3tbda4EftD}mdqNMPOX&lF9tDUaC zt6x!*5QYNe^eo3jl%9Csi{re%hZDJG$}&yno2a_E(Pk2Vw|A3Yh<+zT@Ri1%-Kh5d zwrFf_cyDn~E}!3ByT|uhhW-AWy>YX}Wc~1*sj!=4E5EAgOWV)Ns9OXpPr;Ho%w_dt z@8|L8KQ3EtcDxAR->+GGsPEC{{c=Ya@wa~XLD<_O{xizs{zb%f#YtU)--=BJ|UX(Q- zPkC!Az;fYm3Ad$ip*dl%^AhGe_|0npW<0w zhYc^OPCTI3+y9!It(=~c{n4Ppa|R{q=B^x&vz}SS@J-G1@qMyXcG1nYA)+^_t%40s zf8-<6wH;rhI2s-nR=6eQbb)XAGQs6ScJ=#L)!pR@M@? zZkkJbPVa$|dXbd9c}e?_en`bO+@LJTdEPqzLwn_oQKjob>2_LOx{U8-dlfCi@c>W$?bYGGn5z-aDEean878SwB|EqBJ1udT`p^GOlVT4)v@dQU3=&4{i1VH z1*Ou>e9jTgkBRu2v-9$vI7bW>mAGr^1G%x#K^4~6teViGv{HwAeTV{TB zjwCfZxwhvL!A1&iE~%)Hu_Ya~Q|pRXPt>86=qmo$qkp;D6JC6Ax%PKtI9sEZ?IyZW zU>jZG)ba8*w@B>+($s80DenP;e9Oi5$32nYc2~6#N2Glvvw`Ep4!6|xVO|TJ>pE(f zo9~|KyU;SnGa898k9SqfI>I788ReCXeIu1I>AhR$L*g7NWrBJow(E)0MKy78x2E3M zYN+&v(Tg3gl<{$w*`C><7*hG)T%zdvz0z{7pcF1v#m04HOcIwy;1cjHq~wG@(Vc1A zB=~o+{U!Ynddy3il@#O!Mn}?vm$#qNiSWPa}v33$6_)pwv zb=aY#sP_1fd#wNQL-G>nTn)S6&Yc~b!Z8=-R#2yI9ed|Xsk7J(=?l@W;8I)WbK$C@%U1~P7^mRfpe=J@jr`CUW*{8*-6 zZf@Eej(@n$+f%mRH;@W8=kiV2O?b@K$_NBV(xPy5>bbs51x)Y9GOA3#ggFWOphfqn3q}le#_rmaSgnjrpev zua_giEet0Qqa2Ko;1RNQnkcJ2@Q+_xI? zW`1}hbFv)7m#QG54PK5yBx*3$OR4RZn=rh~^NNH5EEZ)5lIW}yIw$*#4r2~SDqh!)Lxp&`lX zg^_0Hbu&-IO@eB!yR+za&BU8z%(XkY6qM1(1Vc;e^HiJd` zm;sVhPLj_(M+8C_H6G^+I=nTi1Oi(LLP8$~q$FZ1?!WSE>C01aeQ1^QI zY;&#i*5PIQt;202k=jG3sMkZ~e2qU_T49%dFKo#>_m-igs6<<1+@90h7lk4iP$+gI)?eTI%F-XtH+)=rXl5+pc?o%Wk>$>`OWxSH; zcA(|B+Bd6KR zF0yjG+uGh9)G&xBOl)jha3+{zm!T=x{0|yqJj$oO=HId}nD5c*4^GwG3v)xAfRDA&YRrLs}ZW~Efi0j9iT zHom7>Amtbd**bl_hnlR)Sv<@xX+~MCEEL6TA18$HCHv4y1XX{it)FI(3EaM@Z$KXH z*KX;K=Dn2sd%cRRT&_?Sa0v4*RNrYv9?iw(r%cFrN8k{KVl&FVDfs@VN~tU<)5&>M zpuu~+Y$x4wpU9`AtZaK&&>1{DFg#TwXv}gzt^$AE?D0F^iR)@BCsw^4WL9_CnOyOo z#D`vX@n|d+pTted`_1lS1zLkH*EeqfKr*g_BJWj9-VJ-AWx1yEo%cQ}Elk>?(sXQM z!kAG)x#g@|`Kh~7T(|IZ30~)f2Od2+$}>m7Nz%|eGBk?A;BuVrX#zqv;6*bD*4UmD zmi1-*kXKIOu!Bcj^n7PTlebzUOOr*QV(#Fb&nxdvPCR$-EZLnj#aM1yP8X7;;FW|H*wwnjagNDuC|*qfhL`@3GeATwuKjD9nBHKZ`~N-oKS4JMS?0Xat9~xJS27?)ymSKVZo~>J5qhG zn|b6~XxPljXQ}fH3-a zcl5)x!M4w1HDNBQSz~SOs+MAIfU-!RR5`p7o;4Xol=)nTKW3bmSn&90Bk_)zj%Vcz z;_~H%o(kOhQA;R1{VsXZsG+Od zUlOyP26Efx-Ndq)s+{bZmkyn};Gg05Ai6ItR9=u_%+hR#u(&{tlq)-H|9z%+g>M(4pgn@j8Sm_82eSARO= zxAl2T6RUOi?Hs3}mKV0ei^`1;SP!&f)MIUe-^U&|SspF?VwhYx-;m`QWA&W4~<>=u*xg+UiwJNxVvj~%s+@kCOk;;F{I}1 zY=`FeUYZL@s#cIX9q5l(ikhyZc@|VwI>Oc*xY9hf&BAB}PycHe-;r!eM$V3mJZ`D3 zuIATEXZO91#GGKvV*v?8MFs%xTy2=bW};)t5+=vX^w+=Xfb}O+k>uQJew#G5ZIK*t z=u^vgwLzELlP7fBURLe7gam+X4pka`g&K^A7DF5rUR}v@>(0I@?`qZO_6&Sy73ED& zUG@y^_M6AmG1ZEe!9J1x&f zo?J*7!&g?qMODwCpJgN=UpeMZ=j z?W3b7GDM8n-O(3oW8$#-NiIy;GKlJ$rEp?Ry(j;2EsW;=WdD@dkJuiUJ{qo5eepn*pe|CLBN0e zdi@zeFHf5G?Q(RgIctQk&v`mWrQD=(&M&F&p~`Q#kENtu^o>PN7GC&=U|XL0w7Ij%}ZEAp9O?L&NI=-ZHo8JZb5-F$^QMe+`-$n&-RHNp?_Y$M|? zb6%nyc5jnKTAaUDl<3rUIBqeyev&Qk`u@|(lxMkE6uVXh?foOzx_GYM@li%V zSeB^G$>;wr&iBR&wd8bbjQq>Umwq z;QJnD9TXu=cro+C$!4%^)T8!%uSISa(|wL$tV{LypTSS11sspMTIWvO7@dR~YgkOu zUZxPs1()>jxn2@KKvI0G=aDVUV*=>QsN}1%{Cqw>J{x<3tH_(EYOhxE_T%pZU6v)N zcPgo2=V~4jBEG+r{m57Zb6?f8#-n31oo?wsuNTCftO3rP zg4-IWo+yT^@f7hIpO|Ug82qucsS|fh4jgx)ljh#Ua&K1E zzcevvq^Y@@ZhcQtUwhwsfrDOFkG{-Zi!+|>`kgktz9R{)N4S1#ukH#&dkYR9%ek)i zXMqDF>-YPc@ zw?HiQVP2q|MJM=0)=Na$+wtp#fclDCZ)_eyB00h*cOI?xSdHc#S);qbBblk3lLi;k zch|4WBA%SxCXh-{`L^AE3L&P&lkoLiWwdL2%5hsL^CGxsE$0>NhDm||{`myyvh(=f zjgLFD*yHf{f4BjbuW!{&kf~h#RMU1}K1i$u(m!SLyl3?Grvvff(`CnUUVGcCmnfFA ztP+QQ_rxm=6hkJdDp$aEK#QKDnUiW>df>otdeY%i$jefhjZ0T^hrgUN1gA`>R9&e( z1t+c%i)Lh6ToMzdV9=mPT#&ur@hB*&N629?!$(!D)*;oh)Qk$~ZB^NTCi*-T* z&sj)dU~43Jc=4HTPG3Mw=<(U7t>vCISejLfht&GV-2SKk=LPWHGyeQu<1vKClNtrt zpF+CKHLE$S+BrTN<>-3xd9X8F@KLt#;P}MErNW54{2D4~3VAst?gX{J#h4QXpfe(A z)0D#k)V}$wtSk}BOdPL*Ok3xMoR}eT{96Ya07TB-lTd1zY_RCkd-C&yqn5|Z8FkBp zrtnlL_BHs~8c*(v=M)u_D})?p2o3eOQ*Ecdcr#r2YX6V9yMS{7woqPfj=3g^qMKBW ztYI`K5kn~@o%B37WRxdwV(meaty!kT(%<1f3{)g(UhbR7p`n%j06mIPpFG%m64U=( z$8EMGAob_f&t#tmq4iqCvWYxHSj)|z(-SvIagEJ+l37Ll;|5v3*KsnKEE-8w!W=Br zb1&u%IN;17$jbnyjsM5qTYpv6eQo2%f?E)fR#Lh_x{YhLr3Yp=QGMam~dR!~}c0jf*0 zoQh#|_p;nOODS^W)HB?;9VOh*;tO62h99UYp3b>l#-@;Yh(&B?(nFxj*lD$26O>in zE>6V;I?95C#dO2v!k3QJIm+SwX7m#j<20B2jQlFr4~G|#wQqPmf~xT?v_WS>L5`!Q zp^(}!tYp#;%|Nn6Xgre=D^}>eA)OX?=pXCb>^`j;IPb_CW9Of)WlN9jAAAz?K~GDq zJ#LuSAxw~uI&y@k-S(dND$pd`-hhHmv@Mr`bq*%N4eR=J^{qzS@ccppXdh9clG>R= zU6TTk4{t;kjBz$ZQjc8xjAbaAKSrbz@{V=3l_=;lrp$o@7z@u-bjj{IaSp z8J+$tiMXLX_wl`#eE|y(&St_U9^JgZXFf8~rVX9h0dGy{aI7(}((qq11ho(%{L@6g zGM4n8yddPRw&6Wc5Z-((hY7nEV@)FJX5B_GMZH({karukL67hXgS8ZbJvx~HO|=mrtlpP(aS zq)Xk^P$uDs?c;O@1U6}MZFx4~w36FT&w*sB8H^9VmgQ~u%q2T;t%bN9W8c1K1)VED zS}oN3w(S7|);?x!E^CBQ9npuWO0$Nner~bJyx%-+_0X90(C@$wJNvX;a5R2N_Ux=* zkt_^3p`nHAcpNC5KUjAA6O;?7mjE5sOW=1ot?2rq!+5crUV(669{0}KGB;6CQ|n0o zot8Vq4HJ6B66-A5Zpb}0KHiT`Z1@VAtyqYRO9PKXBmpOxmaaIMH9+?10ofwse2NxJ z{8eR8&X~trO-6i@E#>3+!ki!XE$qg(4*8~ z@WEP9(a78O_u-1a1;}*#}U+138Utb@eomxpdF>179hR{bl(RRJqNg zPSJQSD^2&MKlx0>h2z>RX06Sn>}N^nha;$bH-hYzCW(QXkotQ&ozKtD*AULD-Kb)_ z(c56W8DWlU;s?%Sf8fmkKalDI1iS)}mfD6yEgX)|0QSsa@^y-Ec%1Gqk4mbhkq-9DlxA!j>~#t1wS@s1zrhmXxg7z=rvU3E}Tuc`mDGX!e!Sp=~Z10d1ZpzRD(ray5c+O&$S4(N}h(Pa~lt5|ol zO(`h6NbGPf%QATz@RC+LLj>90@(r}>pSp0M*oyQqmoJU0a0y*F0@GD-Q|fsezAJda zSHVQXC~T4ji-hXVjo3#S3O`GxDy^LdA1?EMyN9{ezT6lZFv6yOhRWmBqBuBGZzx5b zNV{1A%pl)@uswwLJ5})@M#7PcAJ*B-p5Gqc-QG`RC)rXtf{RpsK}BU{!_w`K>N$dT z*?eao*3Qn%Eg?@rL8#ZlX*1W@`~7lCYo%U+5qpdn-?>S!E&o>uEotDdtA zxHvJh!?8!IXCu}MN8|PKb~r6i(E?hCkYSF6j60oU$}M$*t#LYj?NLswAM@(AQ*dx> zY%(X48cnKz+m?RU>3r<4W=!4b6%mQ+f=^bj(DX&!Q+eCs zhdLJKA^)mG|q9%+*iuG#YM4v-vAYr~iJ%y|rzz7YCRt(u$u+++9JF z)qvZ{$q7?>#e46Xd9N!Nu;jC(9a;Mu3dfXRYcw*cLLg~@k~R=is@;@f(0M9dH(&BV zQE@F~GuO(_QLZg0_Uf%99VIql(z;Y49Z2y|jmvaTzufRhz>&8!zo@M97JGznae|gX zNek5{ulEDAQecV_D7=q!q;W{`9k0C#fZeY{w|OLo!vDZ~tb>_yL`WfwAOW(W;KU5K z(Xq6_s%xHTsJPVQ7qmTW_U5%eA2^eu-;7>qJ)%=mJ+an?_=SL3esbkWC8KRIClY(PY-;+c(j7o5Ba%r z6syMLcXT4TzVu_O?+ooE^ZUnCd_VcAsHb!sT3fUyf z1htE&v3diwa*^qc=mv+MwZ77J_)JER@%+6mxKvhEotrQih=sOAek7^#_5ynIS+ z`9)k4FErW|rQ&?QMBw2HTLiXwQlBv;^fp1@qOpZ|>w8R$ z(p~fZVG_5CWk~czgdx5&pQC8>saE;L1%y@xI205`9i3TJhpp{7gCORJN)F3Jq2uTz zJ#cs&&P+|U0o*A9ZUylpT~wDrsTgHjA~HK?BU0IOSMqPl`oq0YIv26`QKg{YXvu(Ec{k^h4~!Y=A7)pZBY6J4Uf-yvSb-?D zR6yrf4Sq_9_AtFFm-kW#r$zbM$r?&cNW*sYdjtG50QXjaO6!>iW1H0P1nWO}Q&&0F zXRSUu!q(ha<`+|s^c9@q?BWTMKE8pBE)@e*W&;bm+|rtJn~M~EH_dI~OxJ2IH7O|# zcDL6Nk|eAMjL4E*M0%Li?c7nmQtIwz-GgUD_rs{^d9#aQHC3CgSVBlu{4|+{M%zNi zc^Hg9e~OZtOE_)Xj}EJv*=lwbHU1tPJvuYb4S^=aXXr4u)y%uDYOoRyNDdJI*lf7l z)@zk%j5BwtG=|_ZrzwWN)9Pv|3hD5?+marXL$d5SKac0*wVR z@`Xl~WvIYR611G94qgClJ2`Ub~mB~j;s-k-WEjkaP+_q}D~7)iA*>$Q`ci!7(A zGb;6%-PV)ju#GR*BK;-txi|#e)t9-sxbRp2C`wKTVarA`Zy^>#5^iAPtIE}w4+ofT z_Ly}bJlY;X{&hgjpJ#_P!gcP*l%N~h8V6b071=W8a)Ru3cDBF=wg67fr=@UoY~P}x zXtyB~u*jQm1MlAzRkDP>JKx&gPNMAlcy}dylAkW>(#R02mf&c;q3zjyg28>GS>|OG zdtpHzLtvUs`0llgJ!grR$k3zkREPGbW-RsX`YjR>%#jS3e0u0Rw#OdWWwQ~PN6$1~ z+pR`1Mb6b{S@!znoTJ(|=_-bsxUTw&k$YBN9zGlU8@gq&a79Oth_#*aLfJ=Jy!2cW zyb@mDIda8X2)~FbJ{CC(YWV(8FyP)vk!?0Gk21dGzZ~kkMoK7&wUFAdoA>dks}Bgzy%9T6=@pJtbAqThL)yvS*CY9GZVrw{IlC**$lbfSc~0>? zZ!7i60~_i)rFqSn*IC zn(k}Z6A9L*=maA0`*wA@_sIpVh6ELGJe{tCRFdNs`s%5Svgg^xJM0DJ;$&DpGR9oN_%ko6x1JHaGII^QObY0=(}nii9gU&ovoZYQG^Dml9h! z*2{imnzcd|DM7TvYe61fQX0>;I=DlH^!ho@)DtNP52Hs=g16n~tYVxC?OL#Ll-D;o z3E|r1)9E`*zFv`cc~35NhjZmg*YR$0wa4kb=d!9`^(U}pMypLO)d z5w{6jRZG~!g3|c)kDw9T3l??mhvUu)mzBA>HGAJM(lL03w*hvB4pk(-g|!8ZJ*etE zyK* z6_(DO-mkmob4Z<^yz;;ZYc`y=p;Vk>am%U}1+s<49%b%CRYG%A+s59np()P1)7HM` zVW5ZbM7Ng@%Uh}qX0LfQ&-6!_ea?zl#F})E;;ywQ>mdlrJbiLcLj!tzn}2+~Hk0&l zKftwi4_?8Cq11e=qN5n6kBV(C$qw3S4QgK#zt9ykih@-a?G(XQ1_P*?pk%t(GW@eA zl3n{R=Sn__BJdmzaC#607=ESuT4#GN#`t*x6s zqsI|MRh9U$xj`^qYQv_gs!D#%Rfg!72;IQh2hs#y2SxI+B<)kSsaUEr$F^$T`RFQGgs`O^Icmd{dhz#hq!Q^qo5@%UP zuHm5)9!-igJ<>NOG%#yHL2T=^VeLsB1_I>@k*UVknF9O_c01oH+gI;iPYTPN zRx;{FKxy#gHLD61jBl0?IAfE&^fkNaNTzWTWFxK}p}A0k#B4HWs3wD1aYG90u~WHe zOjb5O*S>4xJCpdcKZ1(-y|qKq+&awTtC2bM+->PFO}b4o_8Vv**_jd&x!&KEOnf}7 zb2Bs&M~ZRM0%IE0S4fAJO>)^()2}Yv6G{~=`$#qe#i(^vcDmLo)QYDPv+W=|2AjY9 zGT9^MtcN%fsG(ct{Gra!LL+ynvO7C9$?FDG=Gss5#1I6*6F!LJ?;?!~xn4uIs2Qsw zoR~zne3*$8-?|!0?IN^>bi|Db<~9$ABd7E$oP#|ZBuOMwTv?+I@PlW@mBJo%;vdWN8g&jV{+17o z5u%ee{m~%NX&GOK)N|dbhVD3Rxkp@P1~Z(B3Cv7>+D6FFHaHy9*GKr!3uU7NFWGG^ zqN{a~F37RGgOBaV^s6KR<2YuyYpNpm6eV=;FvWZC)pcBCa~K?>lE_hS=iFu@4nQpGpcRYvI+{u3KvJCEGw`125A1S zelTR~XqP6%rrLes!rz|th8Z_uNp0?tt7VyZ_2d!{c2c#v;mhFNHpdmh##!AJ6RK~d zctE$&A9$7gyo2C_@uuSxSBXZ2@vhUj&Uo&4FXY)XKoJLEV4(*=N|&Krx7Z*k!JG1R zxiZ0umY#m<=TOs4hX~66#@ducDU;A%?XsNhSq4v7c&FIlExT^7v!OFJ*NoHn4jO?7 z%;F&g0=Y08Tu~6s>=O;33J{b0lW^Widulo#uBFIg$#Xj5G)(gGH8DK}IPqI4(54|9 z4}o?3=XfNK*b4af1Ni^l`TZIHeLVl4Gr`{?|Bvs!0io@`PlSA;1PPk|IVgdE@jstl z1wI73Q~dV<1^xe@Lkhsd|9ld``hU0l|JA@MvVnPj6x={VsUnp~X;zuV8W|SnxRN65 z`#by5Em$~WrA{67lvp&9!*OE(U)1Ekrds696s6g5im*IXlPVuNL}6S_X80QNPup8$ zV1hHAKeB41-NUoYrgWnJHKU^_n7j563+2r~ujZrm2cLvQLAKLm=Ef3QBjsL`mHDva zU%Mp|isR5zEY3P#P6 z1HCLtaQcG&2r>z7mo`3gV-VTQY@Ga`=KmBq{+eacYiYId3i1sN+{?XpmI68-ix?cx z_}BPb1(-HNjjP!lpF=j7z|$nSv?m_v^=k9-f1T2lHA{7wG4KYG#tj}uA}$whs0;M+ z{RZQIF7m0d?9Gt4@u?WN&w!Ewc+M`f%@tDq9mG?YqOAFbtGxRQ&o?XjF zGNUCw`Og);o$2q+VR(9h2H5g|TckAzz3O)O&zB-+Vv%dL9t!_{xu8;a^$yw>Dc8T| z#$LvN-4Qgqx_302X$Eik&o3bMZ96gEV#d7*M#c{1o{Er8TrfnX-Lp2a zh-Ay;Qi>=AN$OaBS>FHrZY9bPC!aRqU33$h>Z+S0b*2t6arD1u z4`4VNvQ16SMHI(~*>tr-A!#gt7`k$j?(9D0V|!VZ%Rbq856}0<21QwevvXF3LWWxH z)w!kN!%y14mTcec2`LWYHt|aGVJrPRNg~3lpul`Ye|!F1OlTBDq~yNM(c>`st<9)e z9jncvYyE%6`X5S=Ko!f^l^uKdsTr8v!aL@@zNqPVw||ZINjeypnTG5cnmvsZ9LN#i z1)oIg@^a-vW@7@EEsg)3)W;)(4N#?sEYxzGClGsoAUej)2-0otqzJW8{NDL@_Gl?L zc~tkQ)3?7fia+@D9q4YhuTApsuTo8x=5Fx*ZB(alleU|qW3@eKIv5Y0_+gLBKNC80 zJ=4$sz1;=PaufZ4JY4Y^r1K@1Mq~#^`BbU@ZnYoX-t1PUVDsKHh^OB};-9TVgTpjK zpeEJdL*&EO>gqg+o(s}{_4z&02_kTAS}*;-z6S0v--SKT-s$7}Jldz` z{}WvQpGW(@?YE;E7drl33vfR}|CeY(Wc0Ipijh_`HN{_<*(BO91*oF|E=?4HK%Tm2 zP_S~e%$G1^$c6#4nVMFEb7hrxP^g{=&z8v;{Y>6w@;R!Uv zx#_;hPi`5}5kqTM=JL4>PL_5GiX9;@QkKPT^Z^4kQdYU9v@0$;g5%X6SW-h07BqjJ`kbu|NpN)N86y^MATNe4$ z`ufFJ=c=mhx;Wi2b{6ZA_P*&#vuS6mnQEO)M+yF^DLvkPLjqRI9}*<2G94stU*ve5 zv*dWKv%>hd{z=gI34isSiQn10Ez#tp_n5HCYuLEyhgroGU;XVx(pPd~Z^S>z97nXt zUcbut)_avW^-brS@J_3W`CZJW!*sxFPi8Bazd7=1^Wg2l2II#`TxXN6b+7d&-lGxD z)aaFN$`qXt>g}B#0yzZvBH4M@Up8nK!%fDHEq^(jWKc5jc5~H=ztYO&tQjK!2Y)-B z;%eOEbC`?-+(zJfzc?zKFyH4wX*<<<$9(uyXLo9_8aC}*Z6T?Z#Af$#U>O-77woX% z%jpOv9GxVKA^>gkcjX7G$ugyBjH70jN&|^!YIc}$lMf)Yl#g!Clp)BAd)&}VYx$Jv zh^{=aSBO11PGWUfrK(_)YIv!eTw$t*u9ox8&gca>d$RL2D?R@*PO{5sf}2u~_m%fu znu)G!zE($y-I87X=58t6F5`B)%ZbI*%b-wIs7cjo7ny}Zug^}BIK3;|WFzA+^9qsS zsb^}cn@M`N8evW9y?TM|1ihH#m8op;F1*_;)SZ~y&Uk8os79q;+|+_a(W7OP4<(f@StlC>G*ww{p($RA z*(7&0?Ci;)Lww%af%MXH47Ewm_dE;(Nwfjuc%gF{$K5Okw740QWvkgbnBMgq%cW(b zw(GImYQ5u9tq?IgcE{yR@>xM<(*B_Yu}7tlT_2&tcXRLA8*hHscV*2Ve~)_JdU~zj z-KfaksZSRhun-7q+BDeOhhZCy2j!-W&bHi5e-Pdu9&geBJ!3--JOb;IYMmO!S`g)9 zO^9+X%eeyC>1vDP)8wf}s{lT>rUqxl?=oBQ6am{46yXDT24Pd4O0$O)R|x9K=A+{l zrXh+@rhCJ@)M8QD?4tP+_5eW_#ZkGwuP{D&2cJupsBubh`+R4B{Pf!&iH9@X5m;0T z!BiF%h8nRof1G)p>GK(@C`X}ATO-P{?#A`-1Pnpa_1=U%c9>dnK2;TUepa$_TRx?; z~qGDx&r!HfUQ zHQN%Cf?Am4DKoysZbhFmhvKBDRiG)Q5(+pW$T_Se@$F^7+zQ*3i|<`%NVZM8EIxlO zkCLJ1@yNw*sdq$WN1b%ig?ccHw>AwzX9$!3^oJRLJlS}l0*}M!%K98NzPHfS!>wio zPhr|eJZrw%qpO@NYQ@LcU%YudSX$-!2YVyX5G%^(CC3<#a@ZEPzo*hK3uKzyqeKym zJ{EnsAD;XW4i9a5iOjIMG8t@8Yq+e6qGJ#z?n|VbT85>7>-@xu^U}{)bW0nXB~J=W zOw5Q^)L>I0DHRt9>I0ixmfxg_ebc<$T%+X~wSSW9?J#Uoni8F0qL>xIAFhX|Nf#EX zLtWkOs2`GDpZ{^YKAPbqnk;)FZ@K$PHe+J zGGom3soq~$%^o|Iwhvjf9$^bz3k$m;w>Y@66b=SLI7VwBGq}3Kc>k<%`|RQFD|4)% zy|+kg>cUH4RpQDbu2B+WjmKTBuy-1_&P9#iLu=H73kz$zvrK+5dQXt+DlgobgplaebyEdwtEoh4Sf)#XH1> zSzA)E>kRj>*~%(A8M62Cc*&zf&VUuGYuk-t@I~|49-RPx6E1vruYbEPFi19Pny$*L zR~XGPAE#FoN4)iiSfu;AHs>jZmnXqAXGwzDO>I(-qgr2%@LBPO2C7y+ z4_oMSP_w(W7H&R52yu*c-soV1ZWYroDuF5@-?vi_7F!x`shRO!xs?o?56%vKTdz=L z7Sp%8-c6!8P7&~a2hYRzK3;>0EpJsKQ7iUD!x4BL6eL8 za@=|;E~A^SL7`Z3g`-L8+y-gue3KK~e%G-P$|nPSo26t_ZWMd34E+x^n(s7psT;i< z=`jqFy!uEJMR~Ad)P=J%)RypX-4!}&!%KFpmTY({GtqTY9RB;)OFWky(3V?nR$ep?CsZkB&jgAYoNw!W|9R5<%L?V;r)1*3CxYJK?fPen z9{VX00vO8zLvCk7vfu07N(VkX$BmZ3y%c-(MT||jA~k?=E%nhEcl$GF`AF|lTga2^R~%C9_I+cKcL-TRe#~_iHSwC?`zlkxUkGJnm*Ig z@YJ=Bgg%+~IO9NSvd0i6HLQqE|D}jtp%AXl=u?;d`m+P-JGP?pMs@+8ntq)&*p-Vh z5%%D3B3wLW&F&8acg7eNEOV zGHB0RuS{OQdz|#Sx`rEplLBw~Zm5DE<8g&>wtrdE=R_ zDw@QiIqUe(;}hUpoR`$s-+gZ_kebj-3Q&}%>EG9Arj46@;F_5yB9W(l9@hRmQBTa) z`a$ymBl)2E*t&r8A$8HJG10_K#EAv4Qp{p18BlsfVGIXy=X{AJH``gl)?+weAlgJo zNNAz3U!LBs>Q|0j&}DG=cq#BP-Y?Aaw+;t>#;hx#Vqfik6!MamW`YO{r%iH05I+U> zP{-&5y(O z%Qrt+!#lV+6N)Sm7&qr|Ey{et&#yV4s)gpOdzOEv_u%ys+QQ|}e*l;;+?>ToQ6Kv1 zd49RO2u>9jPte$!P&_(s=i*@~cH@of(2a>ZW*oMtGA%9a>avi1Z1RJ)mh89TKxX<> zMAY$AaroTv3arA={L!!!2hBk#0Lm#thyfZ)K0x#A1FR`W$wx7){?k0Ve~t@=8}vlKTmusDRYy42Kd%Xh&~U+rvbn(=n8v*rHK{tABxAM_jXKmD^yOw5Zs>EgsrVq-JbSg z|00A~slxjEwWLBv(M>rC7W$Z0-Q0MTx?omx41tq^wir_?tB7t`1Zv2 z_}q6L11biMCIl1Sz^7SOn2CE&+3dEks41f!2W2dPcUCZabADfk^!l0#gpG%%Mp?JD z6gOd}dgyc0_pq0mkC}5$yxVX0cjr!-o((-?p8s`<23+4wAG;bWeT419PnqFX?Z@FZ zQT1`O+zAJDz6}FQc8iiU{S*Y@sH7eyt`FC(&&nGf74u8@l2}W|98{T(u%u4Ja`NH! znvRHakaV0Y{}aktxB2WxB4+COdV-~&H|s{Kl#Y+CH5*y%R|-7&S%oa`Ujd@iTx)EV zPPxFCO9h%puNtNf;Vpc=1J@ZNhv|M)jUE@&x-$);+5ZFr+?R%#o%n;n%{V$7-~lv( zF4N%5(!9g_IvK9-Vq&4%Xq;^VE>+4J6%-Iw#Qor!@En?~0c?#awIOUR9Gr&AP6 z$AM$MYXpas`(p3;GFPfYWY$ty2#*S;+jRgxmFrSYi&MR_1SU0X7Fl^!QwjD_vzi*; zdU=+AtE<5RDnK2OdR;xqZy7Zr)oo!&cBKIbpjj9XYVT!)^fEFo2JELscnzeFZyvYRQf4uRd`M~wEg3qR|>r1JyH|=WPrBr5(d=RXjT;eG@^_`@v`5F z95LC&*5W_-GZt0^pHBGb1uV1t1AYLDrh+-+%ppQOH8vfzd`-l?mRGo)~ALH*z9_Sslp`G^N zfM4>o)tCw1buk-8-&iSM!nv3y{MEvff|plVq1xHfw4JmGfg=-{^bM&=$@T82gB}zU zzVd(FLa7_^@x2A|sf{t}fsu*LnKYIBmbNpPdXxVCmYPE1>FHI?xzu_u40co)Z?I1u zPN!;BxAJ=veac|U^MNekfCvHe36_)@8@o8w6JFaEa{=P;-~LFgl-8*~w-h`J5 zva~$7*x7BaWIDSyee9NaEdIoRPeu ztkHe&Yk5|&15D%Q{-qc=EHJS_tp`dYYq}7E5V^v(eIW;tdOxCK!9%f!Ana0d zZ30tha%$bUTF;dD3wi0n$F!W;fupKGyhRx3M<*Rv2g!jy5n>^Uw2Ku+q`FSL-5 zf_M?q{x_)3oP`6DH%m_u#^KkETW=Hp@&EAxI_s1@rTR2k6*YMBv7{f>uEnXmoZUzW z%BhppC#8k6yEMW_JB{L?Q-2Y)2 zAv?yE##nJ$Ct{HS+K$Z~*8w^I3{w%H~3iWOG|%pegTaW`2`s$nS1M zYI<9|=dh|mQxG!aB)INZ^2jbsn2*_b{ALqP=BwIk>pYi=%j*ZE7@^S6lZmP_TwJ3i zt1+XOj(kKj)GqV(dvYz(gU@kujcZ)19C@(XluhMgc>JlAXa>%ltFm{wIC#?K9(uC9 zc*{Y8uQ|Qr{I)QLuuIXDe0NOnblwrg%(g7lgUrgt=5sH(q}K>>%&g{f(m&Lg*zPSJvv}IZXMD(?mkG_f24; z^vwPA4CvNT)0XxouG(As5sJssy)ib0PK7U^n-P=oxesh@EdtzIucuOEixrcIByQfg z=9(yne9f!vq#Stg-cq_TrY_G=5jsuYR5s;Pz(GtUk(=X(J|oy2UR|RCdts1ji^I9=STPa;3V`ZD64|CgweMsG5=$PRnpnH zRl`h$j+~wof3I6*V|Q*ML+%xZTanV9z&Pa{`q!hy&Qsk@T>w(s%p)aUMOyj=&cg6F3C9gtx3%_>^>Qf5?Jeq zI|r!Ts7g#;NrBV_ACNE%u)3@j*pA^zl-!g!Q=Q7s7o|9M>owPHG;6t4r~T;Du4rzg zD5EsQBn!=)daOqN=?ioTAiH-*!T0WnoNRsn6xzp9#K^t;!w^+jvjGHRYVLoq;VQ9g zl`HuKxedh%=AQHM;Z`&G8_`L!6D@AOBdov`KM!1O-m(_d4EghAT(d`pUP4i`lc|rz>Z*7+lR_BxWt6sc{0CVH6?k4BQQZ zP$VWS_SN7wEkCavGT&faJxXHirKTWyNLUDTfcD%8GD$R&E)>UvL;C+gs%TXJgmvMo z1p%p3WiP2S-(-VbtDm{uC_>k*tU}!^ZuX0Qr1ewHwF~CcZV(%JKRY#Dah%W z`>>$%3U`$w655!*=SHCWtA-!yQ5+X;JAoXjc)?{_rddvO$b(BPp-EMKBA1Gg}I$m8iy-L{7=4SzzxCrH{~S-k9OXcYx&` zeo=Qpo2;Jl!-G){2&dduG<5B{uCxTsWg@~3g|yJh2{|XS@6l(B|8MwG43JH6n{wyE=K!C#lnDme7lhGCeEpllM zkFhpT7ytQOam*Wt29bEQ#I#MQCdGkY@xcbd>I2`)!7b?1deXsehhERuZP=2!p8N8} zy=FD2e@{OYgTxGmR3IMQRfRZcF&UF>Q~(pcjB}6iWKXTFy)_6`+9Epua?V=pL_u7T zyo-geGU1skIi1QA8HU1d@fLH!{g}@o0~8v?v&?maboiQ7+zx0m2lKIG7$7*hG(W`T zG2s2~E3||9RzYz*#Djd;UWp%t?qRA5N4_?r*mNQHXf4Tw4T)sT zJ6uN&Jp2?g`6WZ{+Bia(MJ43E$t8a!9lBYMgHz`v{oh(6-8cD9K>+a7iQp|o_S^k#u4}Dm>84lU4Mggh} z&16)sYhH04`R9#rs=#{RoUQIPn{_5#&JCrFw`v$Yfq=kjWP*OUC1>TvoSwp~49*=q z`NiC4A_xjMy?pFm*Y&ug)cX!&$dtnr^O|v=aM?dgJGiZ_p&24lmvC>$#fn_p1O$2_O6G(RElDFZM*2iilccM%I>dyz=nD!#DBCpH2R ze@=WbaWm<>BGk1Dk}E2HeGX!)2*NCuDIJE=)JWXaSt!XW@2Hv&Hzf`dsix(^-l?gn zyuA|P#tRp`5lq>+*)k_D!6Jd=wPS^HUAQWV(ACACh8xZIU$amqxsm+Vv0U=i8+WmeK3AgziW@Xf!+E| zI^wxeZgI7Q`HlK}O>;a>L30D8;*m=b#};kRU%0*}=;GLoBURQ@s(sm7KJH&e)bB=3 z@7zanC>wqZ!~B95Yi~c-+BE{zJ`>gP6RUqF3k3Kuv5FwnZ&x1~Msfya0{f|Lq!oGs7g;yAMFO;b)`{6b-erFy}3lmukgHRvv2iN=N}AsO_znp2h494 zWg{|@C<7XAL-gIhZTNE(t1A5{fhBC=S{BZ&=-z-@#7=oGK{qo^-+fb*e4~~OP#BG| zzsH34m;H^*&CX{6yPnrZqUQ>y8Tl$(=9cz-=NRJ==rK`}!+O70SR zTRfC`>TLDtO)j=>Do~-|`<`DmFT`;(>E+6y#%d~yC1~Zi8?BR1V#vwpdVx=&Mu>^zDKV&jcMxm;0s-9t{8|VR|vTnzPsXO0F$9)vt&_>YoP<@ z-Kb-4sHGfzpfB2#M7RtB5Yt(Mdt6iHBtQ7H^(I?Y{LNMpN*5Kh0ENdw{*jcLrVPBl z5c`=ztRZ)1m1Bw2ziR;=ug4>LRAaKAT~QVenBc`8%-^06Z&nv%%W?VW<_(fS7V;{k zoFp_t#GVJXD+dSUO>`=!g|EHx5sEf`{ADmLU^)GY7wr?D8iYZLCW@%?v-? zK}O2%a!qjY#5f8wNoxx*NpR$GfCh?*F(m4!zvaQg)z!0RgVKrT*WS8*ZDnX z66&0U>gm*o23e5;=duiW#|olf{cLBZ293eIgMG`ntIPMfNk{KRn2S{P&uX z({25Mdqvk*ni?BLR?uqljG@-QXhO?D(+neq>_m=+RSGdmQ4cV5qiFkB$HGP!CE6M| zp07rUm#=a_zN}5Q3#v8Sx?&Kh<$so02Lbstsqpm^Yee`8Pr=e_77ra-Ta(BuRw-S= zUyWP?mqQzPca4NS`r2(jSUf+wSHX2_GEsmOw0r2Q+v2F*Bk_b`09JDOafD$cJ7;tk zYwra)-y^RHLJ>yU@duh3_v+KUAk(PIUYp=F%B|siWmyMu6E8QM_l16PY9nPhL z;88MEq6$6IseYJ@+n}7)-6yeWKwegh5>X@%0-=_>88Y?W1RcWl*k9+l2Dt~bF7l;y z=Ej@bpI1!LIF+ST(^w>2tf>jyrTc+4d?kV_ZznY7s(4?rQ|+o^5y zA2zUnAO(Qkm@#N9Eu{-l*@#^^$D)M2CO>%E7)zwM!hi7u<2UAfVKdQ2YIxz9t8&~{4 zl|SS1UD2%5DEGU$9_yV&Y?UVQE*$=y87Il zOqhq)Z*L4K|J+b>nA<~|!Do)0i>|VyO7(iZz+vhG#|9>@D;yE#HmIsaaXq!I113dY zsPSuIljyGw5KNw5M}_BJU(s=*ylTv+BE*X-TbTacqQ z;Wd_7VLq3yF0;Q>^ixTCIn7Wc313}$JHp^Har2TY_kzPvI7vT!pisk$cH!om2lwJs zA+k#=c|tD}SNU!qTAQuaw-B;leYBPR_r!{K_r>!fXW}H3^vpRXr!Y+K*H(mwj+yLR zI$Y@vXjdb!v~4HNk%k?`QXMHU29J%EY_uVJ-f~={UsYm2YOSBEmI!FCdb4xD z{Pf&)Q2SVxkX!gD5FR?l1DgjY_~;-7l&^1iai4^2xjTj{3&TN^mII56W`KM9uRt#Y zmkHV{{Rk3l=%J)}g=JS~_+3})oH9_ChI^J8yyh4Y6emY<<`qUAk5CdV%q5t=-udv6 zCYX{BqPd#&32^& z^_S)W2os~=4yx!4;?pP3gK4t3>q}^Wng}Gl`;2K;bB6`Ty=dlHbnw?Q|z*0-D{s2F{Y8T^hvuDDFKM*&4$o6r# zq^#@JF@ST|uI2xPQ92LOlNlpr)R_tbgF4R)>yAJQ3;H&Hk5`HON${d4s(Ta`kjP%Q-V}8-!zF7UVx?@=08uI(H9=x z(SHG;IK8Tm)577_Ex1ARvPv8qye`4Lr*-xe0h~aBpSTR1Nxzfdui1{}lIB`UN>}Z{ zPYts+(&ht7kWj)Ur@9XS2>&uD+%`(B3%uWttm&Dbc0K-F^3R;;5H#``i8|(LuquO^ zPYLZ-wI%gWeXiyC7RSfJR~u{~!vf`pbH@PobbSI3UtYc8xBJn+Zo*n1dph+F2X`Yf z>9NuD2zK!kE(>1zo}SkYwb9PIBfSD3<0K>DJ76L2xAx;jSeEv%Dnsl~D|>!F&A#wU z-(OD=tgRR8KW|jevVV~#;$EJOm~#ATxkJ|Ik+D_P^fN9x3%fdKM7)%SgZ$1R$f1W{ zpScQDV$$RNx7bj_uE5?CV}!p zeMC@#C2r9hcvIB{Nb4SgZ0LDrT&jPcV(h2|RV4HpfuOqnJxyxJsVA6f$gpb2@0<7L zbV{UBH;_62SG+Y(4EHjP+w)gLv1$zkmozsf0+2q@)7UDc#Z?(%s!i zC?Fusp}SK!bc%o=-QC^Yao54`e&2n$5BK$szYjWwXYX_NUVE>(=KRH68R0jKxLx*O z&AM+}Pic({-_t)z0@p1Dmx>5Bs|(h%two+u57o>jEB5@k|1|r_(4cr%R`Vu~eHM5k6B*VTM}#q_6Y&11!_?{8N7=W(q_HN}PqVoE$I*(I7%N zHpnoaZaEycL-En49@C}~JjU7}Q&>`3jT$CRca$c+q}lzrg@;SPHWb_5Pr-@Bn<9qN z56P7+1OQXJC)dI5c#5cGP$M!7OGgQs-wFC=A}lVjT=}bVJLu?#KGN}C6u()Yza|XZ+4v&RZw^z9M4<6fkbwo# zc8rBCu%UY2evNJylmHK`sO$P){uw-iXTWw zY;O+#ihlmfL|XcCa%)Ggrumzs)9NDBcdRhb47pN0^phyb;LFVj6SOGb5uCZ=$m8ln zp3#~?zDPT1dAWCM&oZD@l&f{B+`@!BsLIJJ02B5-x zGfA{n#Gm`Mi%J~jo>+n=QYlbf+3u6AJoe+KUB5FUV#FUS88I!P%I>ySol)Lw{8j^8 zmf;DfFU0j`wYl*tpX(_{I?Z2?VN(f4Uk0vftxoet#R?*g#88aH0jYz99`2WIFQO9_ z`t^tpk%z9ecEQ9a#EJdh zpE+tWpt{SS%3psW=M5L3N=RvSfn~R7#m6wS`xVd<0L!;^xz2Gstsmz13k^*Hwp(y( z6c)EL9N?*8CmUSLO$wodS%BkW+jY5Y_Kk{tJ$5WG+eEQ07*fm=($*pJ>~0yCFC;ao z$^Y*;60=)q%M1cgT46Vx9t{hhb5KmraT7D%tkF!8H(B7O>K%_}UJp~ES11{=-m19o2g=k=wJ z`&&!}7|0^xH`8{gXR=5f^}RK3y%-xOH-MtNs_+?;dMaX!GtaW{E5q@uhq>eKx(lSZX>TZ}Y`@m8cOwR}RgyGNJ{!6#TJa~j@*uF7cy zS1OU8NeejEB^)}G!N=OOLGj&jb5w6 z@eP9Wl1;=qfn2{kU-VFvJ*pP_&qkl0&U>y4a^GLD63#*&XICVHjUm6~y}KBR1Lds2 zk0RB?XJ3_q{Uu`B4WiW<6C0ETrI^GG-%8FpMdy85i(BD~+LemONlqrBx0xG;x+E9ERkvFlnp)7l5KdZqlV*;1n z$I+O{t1CN}OQ#N!0>D_!g2e(nB^$c1b{j;$rnhLaNCSL}f;LtHTc;h?>pDZal)c!h$!3xfMQzG+UJW$dvpP5fJIjSN8uezg9VKe4 zhLkMxQ{s3?r)ENPa}^qGn($N&ZSa>tmiJ+@*|KToKN5xPn@&*r_}CJ^=Q&RO%*=!J#YaQkNpn9y`|?(=`G z_FVr~d!^_Y@nYl0oW)t1Or&gy0;3iTz2cOR?p`T&dE11j8>uR_72e~w&D~NW9?R{y zWsQyER}%n@?qrzxL|V|JDs4v)zVFh+P#c>Vj%p4x4t@W zcS{>I8JJvXt=9I!*Wsz-GtBjmx!G8${a!cy97OP9T#H3m319G|h(A@s03n&!@GY~x zo0pu+?Tp^54^>!;CeW<9-dhcU0UuOF`s-TdVHX$>DFEoNnacuqJO83I2*Mby1dI&Puhe zMeF2LnFe$g`wgxiO%#yfg!FB1mUfhGTJ4dQ!X8pcmVYT^L9|rbwO|D(J=dP%WMV0pjX#D{ z{V}6y_}Lj#eI1`-Eln0tu#j?K7zD38C;r^tqVkLfNtgZ9%R{oeQT_PmQfVQTA&f5t z=klYq;E5fa4KSTWf0qi^4l%E_8y|U%EscNLZ6WbG6Tca4ZVXp_L+a2IdiMfN>S|E^ zV>XV<} z(x#%7B>9K^UV8N`D}`@PJ#kj^kka?JlZ_E^NTK3MFy(*-lLfQR6VK(jk6(X`K7)Jy}kshlAPG8Lg*o(Y0KQLK%cY(^u1Nc|+~g7P^aIA}sNbCEEqv{NOXB|h>0 zAw~&+02X`}oK_2q$YIsx-*Y8S`T9TwUw|D-IEk|R(P{v-r%%#x!q!$F=LK9^z6hN! z8=fYIFYk_q=NY@rJum6>*hcRc@s4sFn#Cw_VG~UWeQ2Y>o@quod!VP@aX)G?Hq9#P zX2o@tWHDbaKjqs=RI#7;zqG;iTElsorPJSMA@>?XYx0sH+uu3rSW-S+kYIq&QBy26$*u`xVlE9 zuE=B58w6^%BNfWFVoi$8UjjA<1ex|fne;>;krC zT6VaX5bSd*iX;1T@5Gx&kQzR2B_04pX~r0k)>KX5FP^Hn2cq&aJVeBbgbF{P^OBq8v6g=Cd_4&cm$N zo>OTmR?G@f{HPiGlHCtqx?n75jsl0+YCs?ONp+>-HcF`1?^G{k(qcxWMj10Q5rCfV=zX55aaS=VtxMoEKJ5KI zCaIW#z|TUg@-`Bu4<|$tDlo?En^OL{3pp+Hi_#axq$=nK@nXh6;iQp-EpKqUe^8cr$tsT~g1`TCl4dbU3e zy_hFD12UzuiDzxXXPxI78e2-=O$?)km);}rE5zEfji$v}xD$tUC{z34J%6zHEG zk&^1|fn^ttJ%OBuFH;pg07RzCg#Y}-jeCj~S|OpF8%&P~wlPYO_?6ztk%HVHj8UV9-{;i@;;;{Qwo@%&W{&R-w5db;X|(~DB@Khy>@9iB#;ch2gUpJ+52^s6+&KdWZ(Uqz`m7Z=NAi&3W*6dC?9 z#|?fBIx2)>#!R93EFvhThsy>$hasKzgb0JVAD&iXIOaDHi>sNTWUMic3}Qa!_<|_WKNEH%$qKm+DW<($Xb=2b_|H?D{FQtYQJgCHk&F)&iP+hG^4<= zn%64gyu)FHI2EqFG3}k2T!>JgXHK|BylU~E0fw2bTPY#)?D-|= z{6~NfS;4EPVA+#1eGnwd+bOFLzMhOck=TE}78jf;Oo#KePo&oWcsAl`%p0{; zkG7@tVH1`K>dWJm5F~!XT9+kPH!A^`Im0RhOnZM&2ElOfOf~n6Lw1?@od`}NopGd5 zk}nEgf4+;o!wVtv-7Bg@QNcrK5z@u42$7@_IWYz5IUdYoM{w>4=Pr$ikYa*R)a+kz zQ(zf86w(`Y!@}i&{i8-o-Pqs zUw44FiwF{XEf{r4k`2?i{nx8i341`r;Z&IH&h@g{CVALhlu4zhq2S+4Biy!Ev3XS{ zc~01l?zZ8X5-yh95&p8efyN9{h#xm`Z}Fo(9hID6i1)&eZaYlW*J`x`;OpaY$S-qC z<&fZrKarRa^zyh7w6ph0N?Bg6ntjyI@J;91Yiyvcg9jMh^vAoiC_oet#( zGrwyOfQOR-*NHAdtdFZ)Byv67Q-2$gP9ADQOiFP|<5QkU>67+^PUnwsS^gT&1;Px( zwWIO$hmaQkkjk3wnpl5PCOcQXxPVT3#0#VX;BJt5uuIp>2t!{Xe_dy-ySMd4e$_Zk z(kBwdQ?11f<}+C&a|JWS@&0C&O2MnHDXh@W!?Q~Q`Ocj^3;M0l*W^@MzcvL%m2hZ8 z-WUU^S^nv`+gDiVuSOEd9&`SHdk*!@`;;KOp4LBGx+Mb)MHHR8)9`6C7GM6x-p%Q| z5{9ZG@SNKgWE)$^n`lA0zH#ELr!rQk{aq}2KE0C-x3Nxm5)CTvoiEL@aIcp3qd<&) z0td@q$Xkr)J6XGEz_bo_X81$lU}EVm#Uuw1jAn@lZif`ErS(C$iP6anb-LOG+ir@1 zd0pl7yYZ*kl0*WSJ$x08cX`Q@QD!qd^JaurSsZV^&>c&9T z7gxBdk~wZVxX^8x=n92Jr_P^0Zh6Ws?x%1G)7D!*ZJ1 zv!{02^VMNcbw>_#x4h5D1iJKW7T{f?su>iHlz|@b`R690exhbZ)}()URhyWZ7?gZd zpHAW(Uz$9tQ}$oOwY{0L%-64~Vr`)x+)53eb_}Xo1K@b*XnTc#@FU&v4rQ2R?peva z5`BtcoI6r>37@z9Acl|aQdcHAv7u{_m@%1ULjuDMS7dDp5|n` ziu$A_pTI6CzLTvgA6tA6I8AKC6dn2) zg%!OIs1>@@w#!=eSb_L$Dnb$4t%!p&T1eD+eE0Io(tbA3#ie^@ z2Mv;$O+NW(BBGsj4^eR+xFWv+=Ih>~Zp*!||S zD2X1m#JB&g1(<&0DW&T~Y@Jkx=(w$suShu8ByM)=9%o%p1>Z)H1j5bXZ!2L4p2jr3 zVi3hR>xg^SSZd>Jp*;Ed`s6d2Z&Vxg36n@_BiC_!9Sd1AN!`HfP^R`*DfrvbuE_j5 zk=rFLma3|k^TzQN-848VK$Kn{vK9KRc8Uwl`jbbF2N3ViI5|1ImdF-Q<8dR$Zv$M4 zDtGx-Lae$Ypo@GuP5lPbWNRtkRr|DPUPa)U^_Z$imM+}b16~+Nu4vXQIB4n0G(crq zfx<%pdASD){}(~-s1K=Y@PjsUY;r-sStIkr77Y0Uu5n4;^A|A*ATMVb`_}KDq!YYw zTS!uf`B!7TR2<5{*WihTRySnQS*tG?HFCA>NmkH{!|SzLb%2DA?!!YctrVnRTsy=3 z4Hy>VI6I8qwhbm95v1p_3unB)*~$F-YdaK)$Ek{~@Ri(}0h>zlZH;V9e-W_4IFm;m z_wOvtW6N;@F%4Wzve8ONLXasazdN=@&huI)IY9JX4=99e^iu#EE)W`~LyN8A2I#Ie zBlBJM7eOe|R2r)!5F%z_@8SS4L`Vo$c}7IXo$dMX`jIbnnL8g8{@o(~m|w{aZIm>- z4JvegxAb?Hu4&t|t>yzeWy2iMShvK6C3U3bpgUSAMniz07U!|GPPAl4@WIzLVAlQQ z;wc!4)mN~T@?Hnc3Ox+Q@Mm$l1M<>i74k^_nQXqxKeiYN@bioMPR>=o3|R7BDXpo` zFjPvAOJH`9d%4`8Ed?3KEMEXUdwhbpj#gb!ZAU;KQf(NC(00VM9EHOzM_dkBn8_Pn zg-va5r}y8n#`m%`^(&(D$BcN#-8z8+S2FCVJD|#~PoN(MXuvsxSoZ|AYcT+B_TSR< zb~VFx1+EpE_SoB_n-9P;^Cl8gsk!pZ!rb~}#qUQ0LQFy4ccXau9R2w~cy-W`FDc*2 zU2CZzPC*P+M|X0p5+ky2v=%Z`5a0wgZn$iUD0jIV?buhMiJzp#>pF#DS`EJ59@me^ zBkh9P`gdki#wWA0QnE4@z=+r=EbWh`jT^EAQlx56H9um4h63l2m+PHbUmxE_@-ILm z0f@lqd@LzB)r1}^e-FtAf}QvUtOQ${TUF0x*$QFK5#$_&64STo=9Z;GqfVrl8wBKQ zY1dzv<25h~b971*W|rogfnV^Z4;E*)i62&t-OJh84PZY_J4^%I_ks8Q-|1JW61N9I zV+`m%<0&45_(7~c6qxNV602oixM{AR`8*%YqOQ6{0^03&XBzJlgQJ9T{gRtI3q2P$HhR&4LxL&dZ?nJ(mXtEZM({l4RWi*Ex1$beX?mcd$HEX6BJv|!#hRu7Q zJxR|b%d4n1HlmzDN(?E7Ibl}Z6Ehwj~|7}ypnKY zi?lP<*0bfiwn8dqz-Rvol0E!u6Vn1N@SdPT8P<&NN zJQLvilmz%l0>U>5&KK{m1{n{hqQdpf&HZfZ=Yit)A}(q@J8Iv(o;BKGdfV99y8ZO- zvk*DWIZU{6%r@5^a_yvhAB!$Ostqiy!10DUWKX@V>9n)0cYTH|tZ%DH@~ZV(g*VXJ zjWu)JMoDQQk&)PU(xAuI;iN45YX|(yeNpWPcBDio;jCpS=#22qsG4>e9c}O-pRw8M zX#CDvwNa_=vH3^))oJ!2w6t43u%Bcql;mdX`@wsZy>QkIqT{7w*}cGN1}DY{AmZ9{ zA^{e36g}HRIV`w?p}E~M99LmJx$>?mCNA93#WxAu42WwlnK*%gqHxZLSH-R4_x|XV z8S^%GEb6Y~Xw48ab!C-o2F0!0N?zqJJ=V{pbXB%`y1zo&rr8PzabUH@Ax$XNTab!J zv$DD!WRqPV_?`Fjca3B8MPfjz=)So(duV+oH_d(5aU+Q!k60!cwQZKt9$~|5|`DW&a7}+$}kkvl-b!( zout|~j+(&bN07^~l&QhPUY7kxL!Cfb`Xx}Yqq#m}cg}qVn)UpU>h^qT0#p}wU9*o1 zEofe%z}=tFAvJgboz3ClLi|X_*RMd$dDt75@&!s)Awx>ekWZaTJ`e|UL@u7zUmRR% zf#n7mfJMQC;mjo>7R3P&Wpv{A)U7%ofh~U?+RDchB2Y?lyX^J-;?YpoEOMip32wsd zr8wtmw>}Yl4I9(j*_!!`iQ4Bak&8XDwy&Qa8Fo_KvECivM26dY!p|U8YmZgj{ zMng_PD~;Z6FTvM=t5y1*=U@uDD6reQyKwKS!)>G{eaWDvm~N+zkXpLl_lzF5pQ1B# zF}?3=NP%VcBM(YfLql+FzXqwjjuzXCG{$6*jA5Wh)@g-IVZh))qNLKB2qBgf&UBU+ zXrJE`s=ZHF-vKQ^4wj?VkKOthc^XS~pE@jfX>|1OOop-bYsK(7{lRS+a3>Kc9BkRm z`v@{Ipg=5FB|KhyM~T4vjfzQf|zzsOmlSI~p|N#Rcib73J?eRB0q0 z{K8|tdk-}rgNa!+jMvhXs8@y`U|Va)=z1@=EuJZa;0gTy1~y1gnZ$0ne6oD%nyJJ| zqj;&&30ep&4OSO?Kr)JU=ZE0UA$WWq_ZCx#)!#wdK+%P_8Jv*@+P>^nbtJV5Y;pdq zKyAHzoK!m+HFyY;1<>$7R6zmq9`6L^-~AP_iq@kC)Tc1<)#AP9a{S26`PW`Ly3a8dr$+E+riq-8e|U)_aKw3(;XAe9MF!X*F)0>KEuq71gSilL-zeD z@>Z-cck`F*Z=B4PKn0hbdxp|qo;y-t8FLY(lG?tWGSR-`nlej_S-8@T+$a+Q{G|+= zE0;gJkC;4;vvYOdlKTuuIaj2<4?oL;L76=ZQDUlIEB~ocI-Jo*L6VD8Py zMZToA z6z3U8tmoa&bOol-Re;*I7dQ2z`3r7pMZx0XK35mzML>N{6;^|L&d4uWd&=auOK*MDlcAQU)8jSPp3-22*b^UVVx*y3==?? zXX=OOPsy6z()p7C2nhU9Y4J_v#{CZfbO2D94h4da&{0Uge}9abd$wX+g_m-A z?ugv}3vEDl&ftB<0owl;#j+lD;(+p=7#$4W8xu*YpAA1}oc}c=&u+cGMtCyhWFU&m zxixz+*me9WH9`4Q6IA{EyHmknCHq*YMq*hCuriPB>ac~~<8j-q4j%Hzu7;#>EBa1a zWM_SINl)#*YGn=@Iu{QGf-?&<(U|O_fyE+s&=3OzZo)?WOK;Uuc(6T!|GKkvz_fPE z`SH3!?W8|=30Oeyh!7BD6)c@@m_Uxv4E3g}g0khi5oes{<#zUgGQ);An;M-^&Wa?B2N~Y{;T75_A+#lfX*CHI)Et;24_{ zeeyYjCN{c~$W@gr3lr@GH4fI#ZClBIP)7~=v`w=vUWUV&6{ zA{0i0jS=zU03G^hOro@l)3`Esy*X{5heFL5*Y~;iB$Qv$lPGq(r72lzi6&+g!}rcQ z93j|R=3pLRI6CXyuQ??(Plwx+oPVbK8|;vnjpkm*3d=G*!WZs57E2os$ZyisJY1HE zVLI>UD z?aswatCq{^0>k&KFEt-ng1OqT;-C224XuJyBCTT34}_VC5lv)2nK-{ZNd@4e01Fo~ z=uFh{b$oxpuK%?WZpAg116RSp?^ukTEt5rd#5XY?M*lz&t@G)n^y<2He*wlj9SVk@ zSk%-3VUy_mX(|2hjqjtC5qJCiMOtM?jG7le+;zkKu|ZJ-wt6ilDI! zZ;U1z9{PY8%fGzB8UQ~&2;!b@uE5(G!DgRnYnmo7Zj*hBcuotLC zJdn+xHU;u|)w(FYyW@`xni_vr12ohc?4A4PQz3-?H~E_$3pbP8hZB?Bc1B}u{looaD!A5QdxIFd z+1j}5dfLgEslKr@r!iKxDDv~Mu{&d_>S6Kv**>wtVciBT@w>?@7~Lk7acj7-<6b{+=WcSlSoCv#aG$?{Pt&AWm4i_ zcW-UpjltnuruVFzEkdGg;Jg{F*<0&ff!T<@^vQH?6Q#&;#=R{=lt19H2;n%TkGh0-OOw{1g@ZJC@HO;N;bybhbQrJuX_y`p6}W@F<8q%zK2vo8boMXLJ4Hp0HXxJmlv}+}jcR1CAz7liZ4N7m$e_VJ zudh09vy=Aq34NL>0?``C9&_5fDk>>9zMNB69viP{YN~iQy1R!RRiO}tH+|1%KT}-} zjcK2DuBf!LHXgl(Pj>3+%Ey902>Do@|2%55zML~tRv!OZ*OYP^nv4b4QJ3s6wVRXz z=@v6)M)!E>xzwP-fHd1qWH9e2Fmr5=YCT8=jcKYPbg5a_jG>xwj2H( zHHw2STih8}*{uA(hV_KPAI7~I&uNic(x?Y~XK%>Fq4TY~vDLV+6MZss)~5CgA5O$) zSRjg#k5$mebp!h@!#=gis+15foO`k7y|41pM_|wf-u?2{3)yLk-Z}l@dI?F%cAaM5 z-VKBk!ue(KBcB}=a{ARLPd!tBoeqcQtNeDe(tK1xMGu+N<_Y-u#+!`31)0KCYG>Hz zK4iKMSqjdQ7O9`&N}!#47UHJl`hv!Y9yLnkT=& zjJ2y!T!!sm_i-P=MTmTNh4fcAmsflI$ZqlywUCuU>3{vY7e2XAbb-x6;Sb8dF0}m; zZy70FOE&-Yf=7cUit}nns9AG9X=$_b=Q{XQP>>*Ylt@lu%8B}Lv4Xi0M*;HPlG4cV z6VJE?e80*|sEHpz$gNokANpSVEFPn;a@fR!tN*GjQJb_fEMCI$UssiWXz_N{9b8o& zXj#7_`F0gbo+@=DGW@UcyNP1Vibi&{aRLUyr#SG<>R|7xxRqRS-YJKd4?m4DBNE9N zBkwii@Z86%>=993cC;Nu7geMO8`nG&BJ7X_7Z;wc&Y1NUIk1(28Ev*jtb7w>s54C5M>&SFiiq(`j-Y+U%Q>&DU^# zY5d4Y83hHGrJ>M-XnDR)Jy(tLTi8>BrYr?RPs^x%l`si Cpwk`z diff --git a/glance/tests/var/testserver.ova b/glance/tests/var/testserver.ova deleted file mode 100644 index f593d29f300e6b196386950692c1fd5f3945ab8d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20480 zcmeHO-E!Nw8O`nHDOmMlZ&s#6N|xlIwIfS%qG@DlEj!KhW``mni5rU4^2a~z>`Y&& zkJ2~l0SJ&FC0n$Uw6k00A_;*HA3mJ(0R#@Hh=fr{f;$qN_;;%}-{emNe@4lFWoUEeYzjCDbx$nn-AGcrX?yD#Wq9{3gs zLqB-7hr96gL-^1JOT(PKwU`{r$-Lr@o z^WD;a$Tho5xjz?T==7LOe2WyIF~9D8=YhB zk)WR?3wQC9;dA~dKmEIOL9A;h+97YujQcUvJTk@vtF;6ZeTkh7&14QWQ;>)ATSLv& zWpS}$Ik~Rl?UDpELwRfpnuol-Y*_Fivva6xxxUa;BB04Tl8Oq(;$_Ql(Ybl9u+V^W z^HXeszUZ+px+s?OW|(&ps0R_w-om$@6NUXLS(_J+ND3ZJvAz(HAgJBcl<5UUdP}x` zfaER9(UjRM3#|0KQAuK+=&~F)NKY1ENp7$@G4q#jgZOx>FJQ;E18kP)^k)4J`12Di zI5rvjUKIFPiv!U{P*i9mqdXanm8>D}OExI9l`%fpr)*GYDH=ZT_1yh(62FAOQW{&>%AV2n``lELm2IMkRPy z3~mOmo)3q6EysjN)ACFV-VY~*+%7X8G=YYgVEH%m@wdjy)}=UH@*qo~f4jJNHTYRa z&sx9d2{SHy{IqT@kCC`3rpD`j(i-c94mVn_u2NBM}Mfk;6mVz z!@Xyh=VC$4l_562yiLiMg4|6xCOV~=5cy5K(D$5Qn|enqcY+mRQNTb{Gb(j4*K(!2 zMfq1kN|~5Yub?stVzKpk0h5*;_**e@L2qHiGF!t@{dzW4MuBrDdXuXQEoIVjJ;@b? zHqs>O9%Yly0P*hOj|Go8Zkqoa#ssuX^(a4csP>>twoGVYTep&6%iTPK^?N^z1Uh|{ zHlq3o6=rydIfDYM5>dUfL<#WA5GCfa2=JvZmjlb)>*mN9N>~6*M=@<2B&B8m2_fWp;kW<6U;ZPu`wGW~j%aW@-%d*=I+e$XL@v9cGeRQOA-m z7Mfc99>SV`AJ&va^!!JwVQN~d*Q>8O-A=t_H+yxnYj^6J-X)f)nI`FWf2%2p5}7GF zH6@YG&TFYmR#QUH+=UxIy7XO}1n0E{e@1Vv8Agl9jL2|>zL|+n0?ToC7VPlHt5Yq;z+;_ zG6Iv{I7os}T1I!Le5mhkC+Y_TJ#_t$3|P)UEjHa4vCsW`63kYsL_?=MAqk&z@{Vek zREKyEkvetL(HcLr8jaFkuBiO&{*@6p_d|ycJ9F06e$e2MIc;;H`bcO~$8&Cc-@Pz# zW#X(osvD*bmwtr3VX5F2<{g=0KNk^u5MAS6XN$`X4%QMv*6DV zt45aSTAE1U1-cwE1wr9vt)?J5cW#V}GcNL`3m7bgBbr9z2Zea%62ddCQ?QGYhp=aQ z1$K!&H`ma(U(oF1d@_20`R%7?jpu`LR-cTHUGXG2uov?OGguRZOv0?yk%36Zyd~ogAiR5y*R3rS*}RAxc)LjXpw>^F@u^A*-PK**G9_`86cih=}gf8I=&P zm~phBn*v;M#jaaRp0RfLF(zvpyN=wTE+(@@j-bJCKFJtAki3)GU_Kc>Yl15@Us;(p znc(J~xnNH-8bR`ituMvy263I&auI=Ziu06sF|f2zJ%yr*{2q8VEtI2&E%ktx2F*df zG#0(hKaEFZ8B1%({tQv`Fc_F4nG#hPs(2i4EZj@)`;zVe(^X=?_p_nv1A<0P_z-YR z2Sa`kMQyjy&^kT6ciKTZS#+ujGu*(^r4#Lw`PS9it;TpNEzGn>nwV^nodQ07nJpqg z?S$UwdZx?c7ETZk`pb7^lG7CZx_~W2qsRN zEV%xz?*jh;*V;U(X~_m>sCs~%n~U!@M2$2rk}(>1>6)S9`fK{)5M~N!J;Z zMZ9UzlWA3k3{0ZqWHg!`U8~5DHf{%#|BK1M8~HdH-C=Vy_$F&#H8T=jC3z2Fvr~eM zPOsB!SIbngii*F#{xSuKkCUN~hn;pcGrsvUby|ju-l#ilVK4Uew`kQeB`;HWEgzp5 z!@;=Oxva<#3smy&7cWzAkbGj9?RJgZ!F6sYZ_KfOR`!oJeCs}$UEl+BoLV0ARkx;0 zu75hO;mt#L^YnmnUc>GVq9axC@$G1CKQDVeu&2&EYpG{!FphgR)chEX5A#=!s3S(y zd1d7KVGo9y3&|M7w&ROK2GHW)44u~o!ye{`O>YTtCK};eODkfxz|y2C9^#Gbn;{NJ z_<(j6+};c!3Ed3k@QuX=_uY0PIENA=#?WV?zqawWgQr*96)|P+XN{pwxrS4JKLhPwp;Ee6o+v_1OS(qD5kcn+$xG8#(zS ze6H@1AKt}P4|IH$j)aQ*YNEA!>loU}WRxnu#QT~4wt=e2_+Wm!g9O*ospO#dG#zK4 zhOR>*QdX|H#+g{g-B4Nh`*9hn5P1bs#sU%;`prZXNr%|4^#AC5uS#LB{z zH0*jH0c5vpX6=-EPH z%(J5I4JzyF$uZGylf;lyaB*8usKSbg zs^-CFK$F%TYNqGpq#}fg@RX{l95WIJaRQBCDzucqt=}oElz!~=;{AWG=w||J=Lj6AuBD++P$5tuP$5tuP$5tuP$5tuP$5tu aP$5tuP$5tuP$5tuP$5tuP$BU9M&Liq;spNy diff --git a/glance/version.py b/glance/version.py deleted file mode 100644 index 0172206c..00000000 --- a/glance/version.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import pbr.version - -version_info = pbr.version.VersionInfo('glance') diff --git a/httpd/README b/httpd/README deleted file mode 100644 index 20bf42f8..00000000 --- a/httpd/README +++ /dev/null @@ -1,2 +0,0 @@ -Documentation for running Glance with Apache HTTPD is in -doc/source/apache-httpd.rst diff --git a/httpd/glance-api-uwsgi.ini b/httpd/glance-api-uwsgi.ini deleted file mode 100644 index 3b10be8e..00000000 --- a/httpd/glance-api-uwsgi.ini +++ /dev/null @@ -1,17 +0,0 @@ -[uwsgi] -http-auto-chunked = true -http-chunked-input = true -http-raw-body = true -chmod-socket = 666 -lazy-apps = true -add-header = Connection: close -buffer-size = 65535 -thunder-lock = true -plugins = python -enable-threads = true -exit-on-reload = true -die-on-term = true -master = true -processes = 4 -http = 127.0.0.1:60999 -wsgi-file = /usr/local/bin/glance-wsgi-api diff --git a/httpd/uwsgi-glance-api.conf b/httpd/uwsgi-glance-api.conf deleted file mode 100644 index 84bd7dd8..00000000 --- a/httpd/uwsgi-glance-api.conf +++ /dev/null @@ -1,2 +0,0 @@ -KeepAlive Off -ProxyPass "/image" "http://127.0.0.1:60999" retry=0 diff --git a/pylintrc b/pylintrc deleted file mode 100644 index 6b073fd9..00000000 --- a/pylintrc +++ /dev/null @@ -1,27 +0,0 @@ -[Messages Control] -# W0511: TODOs in code comments are fine. -# W0142: *args and **kwargs are fine. -# W0622: Redefining id is fine. -disable-msg=W0511,W0142,W0622 - -[Basic] -# Variable names can be 1 to 31 characters long, with lowercase and underscores -variable-rgx=[a-z_][a-z0-9_]{0,30}$ - -# Argument names can be 2 to 31 characters long, with lowercase and underscores -argument-rgx=[a-z_][a-z0-9_]{1,30}$ - -# Method names should be at least 3 characters long -# and be lowercased with underscores -method-rgx=[a-z_][a-z0-9_]{2,50}$ - -# Module names matching nova-* are ok (files in bin/) -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_-]+))$ - -# Don't require docstrings on tests. -no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ - -[Design] -max-public-methods=100 -min-public-methods=0 -max-args=6 diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst deleted file mode 100644 index 8a204c99..00000000 --- a/rally-jobs/README.rst +++ /dev/null @@ -1,32 +0,0 @@ -Rally job related files -======================= - -This directory contains rally tasks and plugins that are run by OpenStack CI. - -Structure ---------- - -* plugins - directory where you can add rally plugins. Almost everything in - Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic - cleanup resources, .... - -* extra - all files from this directory will be copy pasted to gates, so you - are able to use absolute paths in rally tasks. - Files will be located in ~/.rally/extra/* - -* glance.yaml is a task that is run in gates against OpenStack (nova network) - deployed by DevStack - - -Useful links ------------- - -* More about Rally: https://rally.readthedocs.org/en/latest/ - -* Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html - -* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html - -* About plugins: https://rally.readthedocs.org/en/latest/plugins.html - -* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins diff --git a/rally-jobs/extra/README.rst b/rally-jobs/extra/README.rst deleted file mode 100644 index 836f35a0..00000000 --- a/rally-jobs/extra/README.rst +++ /dev/null @@ -1,5 +0,0 @@ -Extra files -=========== - -All files from this directory will be copy pasted to gates, so you are able to -use absolute path in rally tasks. Files will be in ~/.rally/extra/* diff --git a/rally-jobs/extra/fake.img b/rally-jobs/extra/fake.img deleted file mode 100644 index e69de29b..00000000 diff --git a/rally-jobs/glance.yaml b/rally-jobs/glance.yaml deleted file mode 100644 index 8e5db0e9..00000000 --- a/rally-jobs/glance.yaml +++ /dev/null @@ -1,45 +0,0 @@ ---- - GlanceImages.create_and_list_image: - - - args: - image_location: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 1 - users_per_tenant: 1 - - GlanceImages.create_and_delete_image: - - - args: - image_location: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 5 - users_per_tenant: 2 - - GlancePlugin.create_and_list: - - - args: - image_location: "~/.rally/extra/fake.img" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 700 - concurrency: 7 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/rally-jobs/plugins/README.rst b/rally-jobs/plugins/README.rst deleted file mode 100644 index 9b989240..00000000 --- a/rally-jobs/plugins/README.rst +++ /dev/null @@ -1,9 +0,0 @@ -Rally plugins -============= - -All *.py modules from this directory will be auto-loaded by Rally and all -plugins will be discoverable. There is no need of any extra configuration -and there is no difference between writing them here and in rally code base. - -Note that it is better to push all interesting and useful benchmarks to Rally -code base, this simplifies administration for Operators. \ No newline at end of file diff --git a/rally-jobs/plugins/plugin_sample.py b/rally-jobs/plugins/plugin_sample.py deleted file mode 100644 index 6274d85f..00000000 --- a/rally-jobs/plugins/plugin_sample.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2014 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" Sample of plugin for Glance. - -For more Glance related benchmarks take a look here: -github.com/openstack/rally/tree/master/samples/tasks/scenarios/glance - -About plugins: https://rally.readthedocs.org/en/latest/plugins.html - -Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts -""" - -import os - -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -class GlancePlugin(scenario.OpenStackScenario): - - @atomic.action_timer("glance.create_image_label") - def _create_image(self, image_name, container_format, - image_location, disk_format, **kwargs): - """Create a new image. - - :param image_name: String used to name the image - :param container_format: Container format of image. - Acceptable formats: ami, ari, aki, bare, ovf, ova and docker. - :param image_location: image file location used to upload - :param disk_format: Disk format of image. Acceptable formats: - ami, ari, aki, vhd, vhdx, vmdk, raw, qcow2, vdi, and iso. - :param **kwargs: optional parameters to create image - - returns: object of image - """ - - kw = { - "name": image_name, - "container_format": container_format, - "disk_format": disk_format, - } - - kw.update(kwargs) - - try: - if os.path.isfile(os.path.expanduser(image_location)): - kw["data"] = open(os.path.expanduser(image_location)) - else: - kw["copy_from"] = image_location - - image = self.clients("glance").images.create(**kw) - image = utils.wait_for(image, - is_ready=utils.resource_is("active"), - update_resource=utils.get_from_manager(), - timeout=100, - check_interval=0.5) - finally: - if "data" in kw: - kw["data"].close() - - return image - - @atomic.action_timer("glance.list_images_label") - def _list_images(self): - return list(self.clients("glance").images.list()) - - @scenario.configure(context={"cleanup": ["glance"]}) - def create_and_list(self, container_format, - image_location, disk_format, **kwargs): - self._create_image(self.generate_random_name(), - container_format, - image_location, - disk_format, - **kwargs) - self._list_images() diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/notes/Prevent-removing-last-image-location-d5ee3e00efe14f34.yaml b/releasenotes/notes/Prevent-removing-last-image-location-d5ee3e00efe14f34.yaml deleted file mode 100644 index 344e6e5d..00000000 --- a/releasenotes/notes/Prevent-removing-last-image-location-d5ee3e00efe14f34.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -security: - - Fixing bug 1525915; image might be transitioning - from active to queued by regular user by removing - last location of image (or replacing locations - with empty list). This allows user to re-upload - data to the image breaking Glance's promise of - image data immutability. From now on, last - location cannot be removed and locations cannot - be replaced with empty list. diff --git a/releasenotes/notes/add-cpu-thread-pinning-metadata-09b1866b875c4647.yaml b/releasenotes/notes/add-cpu-thread-pinning-metadata-09b1866b875c4647.yaml deleted file mode 100644 index 583185b1..00000000 --- a/releasenotes/notes/add-cpu-thread-pinning-metadata-09b1866b875c4647.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - Added additional metadata for CPU thread pinning policies to - 'compute-cpu-pinning.json'. Use the ``glance_manage`` tool to upgrade. diff --git a/releasenotes/notes/add-ploop-format-fdd583849504ab15.yaml b/releasenotes/notes/add-ploop-format-fdd583849504ab15.yaml deleted file mode 100644 index 3749eed0..00000000 --- a/releasenotes/notes/add-ploop-format-fdd583849504ab15.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -prelude: > - - Add ``ploop`` to the list of supported disk formats. -features: - - The identifier ``ploop`` has been added to the list of - supported disk formats in Glance. The respective - configuration option has been updated and the default - list shows ``ploop`` as a supported format. -upgrade: - - The ``disk_format`` config option enables ``ploop`` as - supported by default. diff --git a/releasenotes/notes/add-processlimits-to-qemu-img-c215f5d90f741d8a.yaml b/releasenotes/notes/add-processlimits-to-qemu-img-c215f5d90f741d8a.yaml deleted file mode 100644 index 33647173..00000000 --- a/releasenotes/notes/add-processlimits-to-qemu-img-c215f5d90f741d8a.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -security: - - All ``qemu-img info`` calls are now run under resource - limitations that limit the CPU time and address space - usage of the process running the command to 2 seconds - and 1 GB respectively. This addresses the bug - https://bugs.launchpad.net/glance/+bug/1449062 - - Current usage of "qemu-img" is limited to Glance tasks, - which by default (since the Mitaka release) are only - available to admin users. We continue to recommend that - tasks only be exposed to trusted users diff --git a/releasenotes/notes/add-vhdx-format-2be99354ad320cca.yaml b/releasenotes/notes/add-vhdx-format-2be99354ad320cca.yaml deleted file mode 100644 index 37c926c4..00000000 --- a/releasenotes/notes/add-vhdx-format-2be99354ad320cca.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -prelude: > - - Add ``vhdx`` to list of supported disk format. -features: - - The identifier ``vhdx`` has been added to the list of - supported disk formats in Glance. The respective - configuration option has been updated and the default - list shows ``vhdx`` as a supported format. -upgrade: - - The ``disk_format`` config option enables ``vhdx`` as - supported by default. diff --git a/releasenotes/notes/alembic-migrations-902b31edae7a5d7d.yaml b/releasenotes/notes/alembic-migrations-902b31edae7a5d7d.yaml deleted file mode 100644 index 27e05949..00000000 --- a/releasenotes/notes/alembic-migrations-902b31edae7a5d7d.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -prelude: > - - **Experimental** zero-downtime database upgrade using an - expand-migrate-contract series of operations is available. -upgrade: - - | - The database migration engine used by Glance for database upgrades has been - changed from *SQLAlchemy Migrate* to *Alembic* in this release. - - * This has necessitated a change in the location and naming convention for - migration scripts. Developers, operators, and DevOps are strongly - encouraged to read through the `Database Management`_ section of the - Glance documentation for details of the changes introduced in the Ocata - release. Here's a brief summary of the changes: - - - All the ``glance manage db`` commands are changed appropriately to use - Alembic to perform operations such as ``version``, ``upgrade``, - ``sync`` and ``version_control``. Hence, the "old-style" migration - scripts will no longer work with the Ocata glance manage db commands. - - - Database versions are no longer numerical. Instead, they are the - *revision ID* of the last migration applied on the database. - - * For example, the Liberty migration, which was version ``42`` under - the old system, will now appear as ``liberty``. The Mitaka - migrations ``43`` and ``44`` appear as ``mitaka01`` and ``mitaka02``, - respectively. - - * The change in migration engine has been undertaken in order to enable - zero-downtime database upgrades, which are part of the effort to - implement rolling upgrades for Glance (scheduled for the Pike release). - - - A preview of zero-downtime database upgrades is available in this - release, but it is **experimental** and **not supported for production - systems**. Please consult the `Database Management`_ section of the - Glance documentation for details. - - .. _`Database Management`: http://docs.openstack.org/developer/glance/db.html diff --git a/releasenotes/notes/api-minor-version-bump-bbd69dc457fc731c.yaml b/releasenotes/notes/api-minor-version-bump-bbd69dc457fc731c.yaml deleted file mode 100644 index 9d0f075c..00000000 --- a/releasenotes/notes/api-minor-version-bump-bbd69dc457fc731c.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -prelude: > - - The *minor* version of the Images API v2 is bumped to **2.5**. -upgrade: - - | - The **CURRENT** version of the version 2 Images API supplied - by Glance is now **2.5**. Changes include: - - * The 'visibility' enumeration has been increased from two values - (``public``, ``private``) to four values (``public``, ``private``, - ``shared``, and ``community``). - - * Formerly, it was possible to add members to an image whose - visibility was ``private``, thereby creating a "shared" image. - In this release, an image must have a visibility of ``shared`` - in order to accept member operations. Attempting to add a - member to an image with a visibility of ``private`` will result - in a `4xx response`_ containing an informative message. - - .. _`4xx response`: https://developer.openstack.org/api-ref/image/v2/?expanded=create-image-member-detail#create-image-member diff --git a/releasenotes/notes/bug-1537903-54b2822eac6cfc09.yaml b/releasenotes/notes/bug-1537903-54b2822eac6cfc09.yaml deleted file mode 100644 index f35f3541..00000000 --- a/releasenotes/notes/bug-1537903-54b2822eac6cfc09.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - Metadata definitions previously associated with OS::Nova::Instance - have been changed to be associated with OS::Nova::Server in order to - align with Heat and Searchlight. You may either upgrade them using - glance-manage db load_metadefs [path] [merge] [prefer_new] or - glance-manage db upgrade 44. -fixes: - - Metadata definitions previously associated with OS::Nova::Instance - have been changed to be associated with OS::Nova::Server in order to - align with Heat and Searchlight. diff --git a/releasenotes/notes/bug-1593177-8ef35458d29ec93c.yaml b/releasenotes/notes/bug-1593177-8ef35458d29ec93c.yaml deleted file mode 100644 index d84ff098..00000000 --- a/releasenotes/notes/bug-1593177-8ef35458d29ec93c.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - The ``default`` policy in ``policy.json`` now uses the - admin role rather than any role. This is to make the - policy file restrictive rather than permissive and - tighten security. diff --git a/releasenotes/notes/bump-api-2-4-efa266aef0928e04.yaml b/releasenotes/notes/bump-api-2-4-efa266aef0928e04.yaml deleted file mode 100644 index 808a0481..00000000 --- a/releasenotes/notes/bump-api-2-4-efa266aef0928e04.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -prelude: > - - Glance API ``minor`` version bumped to 2.4. -upgrade: - - | - Glance API **CURRENT** ``minor`` version is now ``2.4``. - - * To partially fix an important image locations - bug 1587985, an API impacting change has been merged - into Glance. - * This will result into a non-backward compatible - experience before and after **Newton** release, for - users using ``add`` feature to image locations. diff --git a/releasenotes/notes/clean-up-acceptable-values-store_type_preference-39081e4045894731.yaml b/releasenotes/notes/clean-up-acceptable-values-store_type_preference-39081e4045894731.yaml deleted file mode 100644 index aae5bb90..00000000 --- a/releasenotes/notes/clean-up-acceptable-values-store_type_preference-39081e4045894731.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -upgrade: - - | - Deprecated values are no longer recognized for the configuration option - ``store_type_preference``. - The two non-standard values 'filesystem' and 'vmware_datastore' were - DEPRECATED in Newton and are no longer operable. The correct values - for those stores are 'file' and 'vmware'. See the Newton release notes - for more information at https://docs.openstack.org/releasenotes/glance/newton.html#upgrade-notes diff --git a/releasenotes/notes/consistent-store-names-57374b9505d530d0.yaml b/releasenotes/notes/consistent-store-names-57374b9505d530d0.yaml deleted file mode 100644 index 4ec91639..00000000 --- a/releasenotes/notes/consistent-store-names-57374b9505d530d0.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -upgrade: - - | - Some backend store names were inconsistent between glance - and glance_store. This meant that operators of the - VMware datastore or file system store were required to use - store names in ``glance-api.conf`` that did not correspond - to any valid identifier in glance_store. As this situation - encouraged misconfiguration and operator unhappiness, we - have made the store names consistent in the Newton - release. What this means for you: - - * This change applies only to operators who are using - multiple image locations - * This change applies only to operators using the VMware - datastore or filesystem stores - * This change applies only to the ``store_type_preference`` - option - * *VMware datastore operators*: The old name, now - **DEPRECATED**, was ``vmware_datastore``. The **new** - name, used in both glance and glance_store, is - ``vmware`` - * *File system store operators*: the old name, now - **DEPRECATED**, was ``filesystem``. The **new** name, - used in both glance and glance_store, is ``file`` - * This change is backward compatible, that is, the old - names will be recognized by the code during the deprecation - period. Support for the deprecated names will be removed in - the **Pike** release - * We strongly encourage operators to modify their - ``glance-api.conf`` files immediately to use the **new** - names diff --git a/releasenotes/notes/deprecate-glance-api-opts-23bdbd1ad7625999.yaml b/releasenotes/notes/deprecate-glance-api-opts-23bdbd1ad7625999.yaml deleted file mode 100644 index b213b0e0..00000000 --- a/releasenotes/notes/deprecate-glance-api-opts-23bdbd1ad7625999.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -deprecations: - - - The use_user_token, admin_user, admin_password, admin_tenant_name, - auth_url, auth_strategy and auth_region options in the [DEFAULT] - configuration section in glance-api.conf are deprecated, and will - be removed in the O release. - See https://wiki.openstack.org/wiki/OSSN/OSSN-0060 diff --git a/releasenotes/notes/deprecate-show-multiple-location-9890a1e961def2f6.yaml b/releasenotes/notes/deprecate-show-multiple-location-9890a1e961def2f6.yaml deleted file mode 100644 index 5ed03e01..00000000 --- a/releasenotes/notes/deprecate-show-multiple-location-9890a1e961def2f6.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -prelude: > - - Deprecate the ``show_multiple_locations`` configuration - option in favor of the existing Role Based Access - Control (RBAC) for Image locations which uses - ``policy.json`` file to define the appropriate rules. -upgrade: - - | - Some additional points about ``show_multiple_locations`` - configuration option deprecation. - - * Maintaining two different ways to configure, enable - and/or disable a feature is painful for developers and - operators, so the less granular means of controlling - this feature will be eliminated in the **Ocata** - release. - * For the Newton release, this option will still be - honored. However, it is important to update - ``policy.json`` file for glance-api nodes. In - particular, please consider updating the policies - ``delete_image_location``, ``get_image_location`` and - ``set_image_location`` as per your requirements. As - this is an advanced option and prone to expose some - risks, please check the policies to ensure security - and privacy of your cloud. - * Future releases will ignore this option and just - follow the policy rules. It is recommended that this - option is disabled for public endpoints and is used - only internally for service-to-service communication. - * As mentioned above, the same recommendation applies - to the policy-based configuration for exposing - multiple image locations. diff --git a/releasenotes/notes/deprecate-v1-api-6c7dbefb90fd8772.yaml b/releasenotes/notes/deprecate-v1-api-6c7dbefb90fd8772.yaml deleted file mode 100644 index 79b70983..00000000 --- a/releasenotes/notes/deprecate-v1-api-6c7dbefb90fd8772.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -prelude: > - - The Images (Glance) version 1 API has been DEPRECATED. - Please see deprecations section for more information. - -deprecations: - - With the deprecation of the Images (Glance) version 1 - API in the Newton release, it is subject to removal on - or after the Pike release. The configuration options - specific to the Images (Glance) v1 API have also been - deprecated and are subject to removal. An indirectly related - configuration option enable_v2_api has been deprecated too - as it becomes redundant once the Images (Glance) v1 API is - removed. Appropriate warning messages have been setup for - the deprecated configuration options and when the Images - (Glance) v1 API is enabled (being used). Operators are - advised to deploy the Images (Glance) v2 API. The standard - OpenStack deprecation policy will be followed for the - removals. diff --git a/releasenotes/notes/exp-emc-mig-fix-a7e28d547ac38f9e.yaml b/releasenotes/notes/exp-emc-mig-fix-a7e28d547ac38f9e.yaml deleted file mode 100644 index f0904994..00000000 --- a/releasenotes/notes/exp-emc-mig-fix-a7e28d547ac38f9e.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - There was a bug in the **experimental** zero-downtime database upgrade - path introduced in the Ocata release that prevented the **experimental** - upgrade from working. This has been fixed in the Pike release. The - bug did not affect the the normal database upgrade operation. diff --git a/releasenotes/notes/glare-ectomy-72a1f80f306f2e3b.yaml b/releasenotes/notes/glare-ectomy-72a1f80f306f2e3b.yaml deleted file mode 100644 index 7a42599a..00000000 --- a/releasenotes/notes/glare-ectomy-72a1f80f306f2e3b.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- -upgrade: - - | - Code for the OpenStack Artifacts Service (`Glare`_) and its EXPERIMENTAL - API has been removed from the Glance codebase, as it was relocated into an - independent `Glare`_ project repository during a previous release cycle. - The database upgrade for the Glance Pike release drops the Glare tables - (named 'artifacts' and 'artifact_*') from the Glance database. - - OpenStack deployments, packagers, and deployment projects which provided - Glare should have begun to consume Glare from its own `Glare`_ respository - during the Newton and Ocata releases. With the Pike release, it is no - longer possible to consume Glare code from the Glance repository. - - .. _`Glare`: https://git.openstack.org/cgit/openstack/glare -other: - - | - Code for the OpenStack Artifacts Service (Glare) and its EXPERIMENTAL API - has been `removed`_ from the Glance codebase. - - The Artifacts API was an EXPERIMENTAL API that ran on the Glance service - endpoint as ``/v3`` in the Liberty release. In the Mitaka release, the - Glance ``/v3`` EXPERIMENTAL API was deprecated and the Artifacts Service - ran on its own endpoint (completely independent from the Glance service - endpoint) as an EXPERIMENTAL API, versioned as ``v0.1``. In both the - Liberty and Mitaka releases, Glare ran on code stored in the Glance code - repository and used its own tables in the Glance database. - - In the Newton release, the Glare code was relocated into its own `Glare`_ - project repository. Also in the Newton release, Glare ran an EXPERIMENTAL - Artifacts API versioned as ``v1.0`` on its own endpoint and used its own - database. - - For the Pike release, the legacy Glare code has been removed from the - Glance code repository and the legacy 'artifacts' and 'artifact_*' database - tables are dropped from the Glance database. As the Artifacts service API - was an EXPERIMENTAL API in Glance and has not used the Glance database - since Mitaka, no provision is made for migrating data from the Glance - database to the Glare database. - - .. _`removed`: http://specs.openstack.org/openstack/glance-specs/specs/mitaka/implemented/deprecate-v3-api.html diff --git a/releasenotes/notes/image-visibility-changes-fa5aa18dc67244c4.yaml b/releasenotes/notes/image-visibility-changes-fa5aa18dc67244c4.yaml deleted file mode 100644 index 9b9136e3..00000000 --- a/releasenotes/notes/image-visibility-changes-fa5aa18dc67244c4.yaml +++ /dev/null @@ -1,175 +0,0 @@ ---- -prelude: > - - - The *Community Images* feature has been introduced in the Images API v2. - This enables a user to make an image available for consumption by all other - users. In association with this change, the 'visibility' values for an - image have been expanded to include 'community' and 'shared'. -features: - - | - Image 'visibility' changes. - - * Prior to Ocata, an image with 'private' visibility could become shared by - adding members to it, though its visibility remained 'private'. In order - to make the visibility of images more clear, in Ocata the following - changes are introduced: - - - A new value for visibility, 'shared', is introduced. Images that have - or can accept members will no longer be displayed as having 'private' - visibility, reducing confusion among end users. - - - An image must have 'shared' visibility in order to accept members. - This provides a safeguard from 'private' images being shared - inadvertently. - - - In order to preserve backward compatibilty with the current sharing - workflow, the default visibility of an image in Ocata is 'shared'. - Consistent with pre-Ocata behavior, this will allow the image to accept - member operations without first updating the visibility of the image. - (Keep in mind that an image with visibility 'shared' but having no - members is not actually accessible to anyone other than the image - owner, so this is not in itself a security problem.) - - - | - Image visibility may be specified at the time of image creation. - - * As mentioned above, the default visibility of an image is 'shared'. - If a user wants an image to be private and not accept any members, a - visibility of 'private' can be explicitly assigned at the time of - creation. - - - Such an image will require its visibility to be updated to - 'shared' before it will accept members. - - - | - Image visibility is changed using the image update (PATCH) call. - - * Note: This is not a change. It's simply mentioned for completeness. - - - | - A new value for the Image 'visibility' field, 'community', is introduced. - - * An image with 'community' visibility is available for consumption by any - user. - - * In order to prevent users spamming other users' image-list response, - community images are not included in the image-list response unless - specifically requested by a user. - - - For example, ``GET v2/images?visibility=community`` - - - As is standard behavior for the image-list call, other filters may - be applied to the request. For example, to see the community images - supplied by user ``931efe8a-0ad7-4610-9116-c199f8807cda``, the - following call would be made: ``GET v2/images?visibility=community&owner=931efe8a-0ad7-4610-9116-c199f8807cda`` -upgrade: - - | - A new value for the Image 'visibility' field, 'community', is introduced. - - * The ability to update an image to have 'community' visibility is - governed by a policy target named 'communitize_image'. The default - is empty, that is, any user may communitize an image. - - - | - Visibility migration of current images - - * Prior to Ocata, the Glance database did not have a 'visibility' column, - but instead used a boolean 'is_public' column, which was translated - into 'public' or 'private' visibility in the Images API v2 image - response. As part of the upgrade to Ocata, a 'visibility' column - is introduced into the images table. It will be populated as follows - - - All images currently with 'public' visibility (that is, images for - which 'is_public' is True in the database) will have their visibility - set to 'public'. - - - Images currently with 'private' visibility (that is, images for which - 'is_public' is False in the database) **and** that have image members, - will have their visibility set to 'shared'. - - - Those images currently with 'private' visibility (that is, images for - which 'is_public' is False in the database) and that have **no** - image members, will have their visibility set to 'private'. - - * Note that such images will have to have their visibility updated - to 'shared' before they will accept members. - - | - Impact of the Ocata visibility changes on end users of the Images API v2 - - * We have tried to minimize the impact upon end users, but want to point - out some issues to be aware of. - - - The migration of image visibility assigns sensible values to images, - namely, 'private' to images that end users have *not* assigned members, - and 'shared' to those images that have members at the time of the - upgrade. Previously, if an end user wanted to share a private image, - a member could be added directly. After the upgrade, the image will - have to have its visibility changed to 'shared' before a member can - be assigned. - - - The default value of 'shared' may seem weird, but it preserves the - pre-upgrade workflow of: (1) create an image with default visibility, - (2) add members to that image. Further, an image with a visibility - of 'shared' that has no members is not accessible to other users, so - it is functionally a private image. - - - The image-create operation allows a visibility to be set at the time - of image creation. This option was probably not used much given that - previously there were only two visibility values available, one of - which ('public') is by default unassignable by end users. Operators - may wish to update their documentation or tooling to specify a - visibility value when end users create images. To summarize: - - * 'public' - reserved by default for images supplied by the operator - for the use of all users - - * 'private' - the image is accessible only to its owner - - * 'community' - the image is available for consumption by all users - - * 'shared' - the image is completely accessible to the owner and - available for consumption by any image members - - - | - Impact of the Ocata visibility changes on the Images API v1 - - * The DEPRECATED Images API v1 does not have a concept of "visibility", - and in a "pure" v1 deployment, you would not notice that anything had - changed. Since, however, we hope that there aren't many of those around - anymore, here's what you can expect to see if you use the Images API v1 - in a "mixed" deployment. - - - In the v1 API, images have an ``is_public`` field (but no - ``visibility`` field). Images for which ``is_public`` is True are the - equivalent of images with 'public' visibility in the v2 API. Images - for which ``is_public`` is false are the equivalent of v2 'shared' - images if they have members, or the equivalent of v2 'private' images - if they have no members. - - - An image that has 'community' visibility in the v2 API will have - ``is_public`` == False in the v1 API. It will behave like a private - image, that is, only the owner (or an admin) will have access to the - image, and only the owner (or an admin) will see the image in the - image-list response. - - - Since the default value for 'visibility' upon image creation is - 'shared', an image freshly created using the v1 API can have members - added to it, just as it did pre-Ocata. - - - If an image has a visiblity of 'private' when viewed in the v2 API, - then that image will not accept members in the v1 API. If a user - wants to share such an image, the user can: - - * Use the v2 API to change the visibility of the image to 'shared'. - Then it will accept members in either the v1 or v2 API. - - * Use the v1 API to update the image so that ``is_public`` is - False. This will reset the image's visibility to 'shared', and - it will now accept member operations. - - * Note that in either case, when dealing with an image that has - 'private' visibility in the v2 API, there is a safeguard against - a user unintentionally adding a member to an image and exposing - data. The safeguard is that you must perform an additional - image update operation in either the v1 or v2 API before you - can expose it to other users. diff --git a/releasenotes/notes/improved-config-options-221c58a8c37602ba.yaml b/releasenotes/notes/improved-config-options-221c58a8c37602ba.yaml deleted file mode 100644 index 5a3a49d3..00000000 --- a/releasenotes/notes/improved-config-options-221c58a8c37602ba.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -prelude: > - - Improved configuration option descriptions and handling. -other: - - | - The glance configuration options have been improved with - detailed help texts, defaults for sample configuration - files, explicit choices of values for operators to - choose from, and a strict range defined with ``min`` and - ``max`` boundaries. - - * It must be noted that the configuration options that - take integer values now have a strict range defined - with ``min`` and/or ``max`` boundaries where - appropriate. - * This renders the configuration options incapable of - taking certain values that may have been accepted - before but were actually invalid. - * For example, configuration options specifying counts, - where a negative value was undefined, would have still - accepted the supplied negative value. Such options - will no longer accept negative values. - * Options where a negative value was previously defined - (for example, -1 to mean unlimited) will remain - unaffected by this change. - * Values which do not comply with the new restrictions - will prevent the service from starting. The logs will - contain a message indicating the problematic - configuration option and the reason why the supplied - value has been rejected. diff --git a/releasenotes/notes/location-add-status-checks-b70db66100bc96b7.yaml b/releasenotes/notes/location-add-status-checks-b70db66100bc96b7.yaml deleted file mode 100644 index 97b41f29..00000000 --- a/releasenotes/notes/location-add-status-checks-b70db66100bc96b7.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -prelude: > - - Adding locations to a non-active or non-queued image - is no longer allowed. -critical: - - | - Attempting to set image locations to an image *not* in - ``active`` or ``queued`` status will now result in a - HTTP Conflict (HTTP status code 409) to the user. - - * Until now, no image status checks were in place while - **adding** a location on it. In some circumstances, - this may result in a bad user experience. It may also - cause problems for a security team evaluating the - condition of an image in ``deactivated`` status. - * **Adding** locations is disallowed on the following - image statuses - ``saving``, ``deactivated``, - ``deleted``, ``pending_delete``, ``killed``. - * Note that there are race conditions associated with - adding a location to an image in the ``active``, - ``queued``, ``saving``, or ``deactivated`` status. - Because these are non-terminal image statuses, it is - possible that when a user attempts to add a location, - a status transition could occur that might block the - **add** (or might appear to allow an add that should - not be allowed). - * For example, a user is not allowed to add a location - to an image in ``saving`` status. Suppose a user - decides to add a location anyway. It is possible that - before the user's request is processed, the - transmission of data being saved is completed and the - image transitioned into ``active`` status, in which - case the user's add location request will succeed. To - the user, however, this success will appear anomalous - because in most cases, an attempt to add a location to - an image in ``saving`` status will fail. - * We mention this so that you can be aware of this - situation in your own testing. diff --git a/releasenotes/notes/lock_path_config_option-2771feaa649e4563.yaml b/releasenotes/notes/lock_path_config_option-2771feaa649e4563.yaml deleted file mode 100644 index f6171c00..00000000 --- a/releasenotes/notes/lock_path_config_option-2771feaa649e4563.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - The lock_path config option from oslo.concurrency is now required for using - the sql image_cache driver. If one is not specified it will default to - the image_cache_dir and emit a warning. diff --git a/releasenotes/notes/make-task-api-admin-only-by-default-7def996262e18f7a.yaml b/releasenotes/notes/make-task-api-admin-only-by-default-7def996262e18f7a.yaml deleted file mode 100644 index b1958e27..00000000 --- a/releasenotes/notes/make-task-api-admin-only-by-default-7def996262e18f7a.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -deprecations: - - The task API was added to allow users for uploading images asynchronously - and for deployers to have more control in the upload process. Unfortunately, - this API has not worked the way it was expected to. Therefore, the task API - has entered a deprecation period and it is meant to be replaced by the new - import API. This change makes the task API admin only by default so that it - is not accidentally deployed as a public API. -upgrade: - - The task API is being deprecated and it has been made admin only. If deployers - of Glance would like to have this API as a public one, it is necessary to - change the `policy.json` file and remove `role:admin` from every `task` - related field. \ No newline at end of file diff --git a/releasenotes/notes/new_image_filters-c888361e6ecf495c.yaml b/releasenotes/notes/new_image_filters-c888361e6ecf495c.yaml deleted file mode 100644 index a0cc8fe1..00000000 --- a/releasenotes/notes/new_image_filters-c888361e6ecf495c.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -features: - - Implement the ability to filter images by the properties `id`, - `name`, `status`,`container_format`, `disk_format` using the 'in' - operator between the values. - Following the pattern of existing filters, new filters are specified as - query parameters using the field to filter as the key and the filter - criteria as the value in the parameter. - Filtering based on the principle of full compliance with the template, - for example 'name = in:deb' does not match 'debian'. - Changes apply exclusively to the API v2 Image entity listings - An example of an acceptance criteria using the 'in' operator for name - ?name=in:name1,name2,name3. - These filters were added using syntax that conforms to the latest - guidelines from the OpenStack API Working Group. - diff --git a/releasenotes/notes/newton-1-release-065334d464f78fc5.yaml b/releasenotes/notes/newton-1-release-065334d464f78fc5.yaml deleted file mode 100644 index 745f95ec..00000000 --- a/releasenotes/notes/newton-1-release-065334d464f78fc5.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -prelude: > - - Glance no longer returns a 500 when 4 byte unicode - characters are passed to the metadefs API. - - Deprecated "sign-the-hash" approach for image signing. - Old run_tests and related scripts have been removed. -upgrade: - - The image signature verification feature has been - updated to follow the "sign-the-data" approach, which - uses a signature of the image data directly. The prior - deprecated "sign-the-hash" approach, which uses a - signature of an MD5 hash of the image data, has been - removed. -security: - - The initial implementation of the image signature - verification feature in Glance was insecure, because it - relied on an MD5 hash of the image data. More details - can be found in bug 1516031. This "sign-the-hash" - approach was deprecated in Mitaka, and has been removed - in Newton. Related CVE-2015-8234. diff --git a/releasenotes/notes/newton-bugs-06ed3727b973c271.yaml b/releasenotes/notes/newton-bugs-06ed3727b973c271.yaml deleted file mode 100644 index a7fd49ce..00000000 --- a/releasenotes/notes/newton-bugs-06ed3727b973c271.yaml +++ /dev/null @@ -1,61 +0,0 @@ ---- -fixes: - - | - Here is a list of other important bugs that have been - fixed (or partially fixed) along with their - descriptions. - - * bug 1617258: Image signature base64 needs to wrap - lines - * bug 1612341: Add cpu thread pinning flavor metadef - * bug 1609571: version negotiation api middleware was - NOT up to date to include v2.3 - * bug 1602081: Glance needs to use oslo.context's - policy dict - * bug 1599169: glance-replicator size raises object of - type 'NoneType' has no len() exception when no args - provided - * bug 1599192: glance-replicator needs to display - human-readable size - * bug 1585917: member-create will raise 500 error if - member-id is greater than 255 characters - * bug 1598985: glance-replicator compare output should - show image name in addition to image id for missing - images - * bug 1533949: Glance tasks missing configuration item - "conversion_format" - * bug 1593177: The default policy needs to be admin for - safer default deployment scenarios - * bug 1584076: Swift ACLs disappears on v1 Glance - images - * bug 1591004: Unable to download image with no - checksum when cache is enabled - * bug 1584415: Listing images with the created_at and - updated_at filters fails if an operator is not - specified - * bug 1590608: Services should use http_proxy_to_wsgi - middleware from oslo.middleware library - * bug 1584350: etc/glance-registry.conf sample file has - redundant store section - * bug 1543937: db-purge fails for very large number - * bug 1580848: There's no exception when import task is - created without properties - * bug 1585584: Glare v0.1 is unable to create public - artifact draft - * bug 1582304: Allow tests to run when http proxy is set - * bug 1570789: Metadefs API returns 500 error when 4 - byte unicode character is passed - * bug 1532243: glance fails silently if a task flow - can not be loaded - * bug 1568894: glance_store options missing in - glance-scrubber.conf and glance-cache.conf sample - files - * bug 1568723: secure_proxy_ssl_header not in sample - configuration files - * bug 1535231: md-meta with case insensitive string has - problem during creating - * bug 1555275: Tags set changes on delete - * bug 1558683: Versions endpoint does not support - X-Forwarded-Proto - * bug 1557495: Possible race conditions during status - change diff --git a/releasenotes/notes/oslo-log-use-stderr-changes-07f5daf3e6abdcd6.yaml b/releasenotes/notes/oslo-log-use-stderr-changes-07f5daf3e6abdcd6.yaml deleted file mode 100644 index ba928e1e..00000000 --- a/releasenotes/notes/oslo-log-use-stderr-changes-07f5daf3e6abdcd6.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -upgrade: - - A recent change to oslo.log (>= 3.17.0) set the default value - of ``[DEFAULT]/use_stderr`` to ``False`` in order to prevent - duplication of logs (as reported in bug \#1588051). Since this - would change the current behaviour of certain glance commands - (e.g., glance-replicator, glance-cache-manage, etc.), we chose to - override the default value of ``use_stderr`` to ``True`` in those - commands. We also chose not to override that value in any Glance - service (e.g., glance-api, glance-registry) so that duplicate - logs are not created by those services. Operators that have a - usecase that relies on logs being reported on standard error may - set ``[DEFAULT]/use_stderr = True`` in the appropriate service's - configuration file upon deployment. diff --git a/releasenotes/notes/pike-metadefs-changes-95b54e0bf8bbefd6.yaml b/releasenotes/notes/pike-metadefs-changes-95b54e0bf8bbefd6.yaml deleted file mode 100644 index c9bd82d8..00000000 --- a/releasenotes/notes/pike-metadefs-changes-95b54e0bf8bbefd6.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - | - The following metadata definitions have been modified in the Pike release: - - * The property ``img_hide_hypervisor_id`` has been added to the namespace - ``OS::Compute::LibvirtImage``. - - You may upgrade these definitions using: - - ``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]`` diff --git a/releasenotes/notes/range-header-request-83cf11eebf865fb1.yaml b/releasenotes/notes/range-header-request-83cf11eebf865fb1.yaml deleted file mode 100644 index 9a1fcd3b..00000000 --- a/releasenotes/notes/range-header-request-83cf11eebf865fb1.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -fixes: - - | - Glance had been accepting the Content-Range header for GET v2/images/{image_id}/file requests, - contrary to RFC 7233. - Following RFC 7233, Glance will now: - - * Accept the Range header in requests to serve partial images. - * Include a ``Content-Range`` header upon successful delivery of the requested partial content. - - Please note that not all Glance storage backends support partial downloads. A Range request to a - Glance server with such a backend will result in the entire image content being delivered - despite the 206 response code. diff --git a/releasenotes/notes/remove-db-downgrade-0d1cc45b97605775.yaml b/releasenotes/notes/remove-db-downgrade-0d1cc45b97605775.yaml deleted file mode 100644 index 25546875..00000000 --- a/releasenotes/notes/remove-db-downgrade-0d1cc45b97605775.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -prelude: > - - Database downgrades have been removed from the Glance - source tree. -upgrade: - - The ``db_downgrade`` command has been removed from the - ``glance-manage`` utility and all database downgrade - scripts have been removed. In accord with OpenStack - policy, Glance cannot be downgraded any more. Operators - are advised to make a full database backup of their - production data before attempting any upgrade. diff --git a/releasenotes/notes/remove-osprofiler-paste-ini-options-c620dedc8f9728ff.yaml b/releasenotes/notes/remove-osprofiler-paste-ini-options-c620dedc8f9728ff.yaml deleted file mode 100644 index 3c7d6f5a..00000000 --- a/releasenotes/notes/remove-osprofiler-paste-ini-options-c620dedc8f9728ff.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -deprecations: - - OSprofiler support requires passing of trace information - between various OpenStack services. This information is - signed by one of HMAC keys, which we historically defined - in glance-api-paste.ini and glance-registry-paste.ini files - (together with enabled option, that in fact was duplicated in - the corresponding configuration files). - OSprofiler 0.3.1 and higher supports passing this information - via configuration files, therefore it's recommended to - modify the ``[filter:osprofiler]`` section in \*-paste.ini to look like - ``paste.filter_factor = osprofiler.web:WsgiMiddleware.factory`` - and set the ``hmac_keys`` option in the glance-\*.conf files. diff --git a/releasenotes/notes/remove-s3-driver-639c60b71761eb6f.yaml b/releasenotes/notes/remove-s3-driver-639c60b71761eb6f.yaml deleted file mode 100644 index 9f3474d3..00000000 --- a/releasenotes/notes/remove-s3-driver-639c60b71761eb6f.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -prelude: > - - The ``s3`` store driver has been removed. -upgrade: - - The latest release of glance_store library does not have - the support for the ``s3`` driver. All code references - of the same have been removed from the library. As this - release of Glance uses the updated glance_store library, - you will find the ``s3`` driver support removed from - Glance too. For example the Glance image location - strategy modules no longer offer the ``s3`` driver - support. diff --git a/releasenotes/notes/reordered-store-config-opts-newton-3a6575b5908c0e0f.yaml b/releasenotes/notes/reordered-store-config-opts-newton-3a6575b5908c0e0f.yaml deleted file mode 100644 index e6fd3daf..00000000 --- a/releasenotes/notes/reordered-store-config-opts-newton-3a6575b5908c0e0f.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -prelude: > - - Sample configuration file shipped with Glance source now - has reordered store drivers configuration options for - future consistent ordering. -other: - - | - The sample configuration files autogenerated using the - oslo-config-generator tool now give consistent ordering - of the store drivers configurations. - - * Some operators have reported issues with reordering - observed in the sample configurations shipped with - Glance release tarballs. This reordering may result - into a incorrect "diff" of the configurations used - downstream vs. newly introduced upstream. - * Latest release of ``glance_store`` library (used - in the **Newton** release of Glance) will include fix - for the ``glance_store`` bug 1619487. - * Until now every run of the oslo-config-generator - resulted in random ordering of the store drivers - configuration. After **Newton** release this order - will remain consistent. - * The store drivers configuration order in the sample or - autogenerated files should be expected to be - alphabetical as - ``cinder``, ``filesystem``, - ``http``, ``rbd``, ``sheepdog``, ``swift``, - ``vmware``. - * Note the code name for the "ceph" driver is ``rbd``. - * Note the ordering of the options within a store is not - alphabetical. diff --git a/releasenotes/notes/restrict_location_updates-05454bb765a8c92c.yaml b/releasenotes/notes/restrict_location_updates-05454bb765a8c92c.yaml deleted file mode 100644 index 270785c1..00000000 --- a/releasenotes/notes/restrict_location_updates-05454bb765a8c92c.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -prelude: > - Location updates for images are now restricted to - images in ``active`` or ``queued`` status. Please - refer to the "Bug Fixes" section for more information. - -fixes: - - | - Image location updates to an image which is not in - ``active`` or ``queued`` status can introduce race - conditions and security issues and hence a bad - experience for users and operators. As a result, - we have restricted image location updates in this - release. Users will now observe the following: - - * HTTP Response Code 409 (Conflict) will be returned - in response to an attempt to remove an image - location when the image status is not ``active`` - * HTTP Response Code 409 (Conflict) will be returned - in response to an attempt to replace an image - location when the image status is not ``active`` or - ``queued`` diff --git a/releasenotes/notes/scrubber-exit-e5d77f6f1a38ffb7.yaml b/releasenotes/notes/scrubber-exit-e5d77f6f1a38ffb7.yaml deleted file mode 100644 index 8e91f497..00000000 --- a/releasenotes/notes/scrubber-exit-e5d77f6f1a38ffb7.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -fixes: - - | - Please note a change in the Scrubber's behavior in case - of job fetching errors: - - * If configured to work in daemon mode, the Scrubber - will log an error message at level critical, but - will not exit the process. - * If configured to work in non-daemon mode, the Scrubber - will log an error message at level critical and exit - with status one. diff --git a/releasenotes/notes/soft_delete-tasks-43ea983695faa565.yaml b/releasenotes/notes/soft_delete-tasks-43ea983695faa565.yaml deleted file mode 100644 index 2c91f516..00000000 --- a/releasenotes/notes/soft_delete-tasks-43ea983695faa565.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -prelude: > - - Expired tasks are now deleted. -other: - - Expired tasks are now deleted in Glance. As with other - Glance resources, this is a "soft" deletion, that is, a - deleted task is marked as ``deleted`` in the database so - that the task will not appear in API responses, but the - information associated with the task persists in the - database. diff --git a/releasenotes/notes/trust-support-registry-cfd17a6a9ab21d70.yaml b/releasenotes/notes/trust-support-registry-cfd17a6a9ab21d70.yaml deleted file mode 100644 index e2279099..00000000 --- a/releasenotes/notes/trust-support-registry-cfd17a6a9ab21d70.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Implemented re-authentication with trusts when updating image status in - registry after image upload. When long-running image upload takes some a lot - of time (more than token expiration time) glance uses trusts to receive new - token and update image status in registry. It allows users to upload big - size images without increasing token expiration time. diff --git a/releasenotes/notes/update-show_multiple_locations-helptext-7fa692642b6b6d52.yaml b/releasenotes/notes/update-show_multiple_locations-helptext-7fa692642b6b6d52.yaml deleted file mode 100644 index 8cd19587..00000000 --- a/releasenotes/notes/update-show_multiple_locations-helptext-7fa692642b6b6d52.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -other: - - | - The deprecation path for the configuration option - ``show_multiple_locations`` has been changed because - the mitigation instructions for `OSSN-0065`_ refer to - this option. It is now subject to removal on or after - the **Pike** release. The help text for this option - has been updated accordingly. - - .. _`OSSN-0065`: https://wiki.openstack.org/wiki/OSSN/OSSN-0065 diff --git a/releasenotes/notes/use-cursive-c6b15d94845232da.yaml b/releasenotes/notes/use-cursive-c6b15d94845232da.yaml deleted file mode 100644 index 307b090e..00000000 --- a/releasenotes/notes/use-cursive-c6b15d94845232da.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -other: - - | - Glance and Nova contain nearly identical digital - signature modules. In order to better maintain and - evolve this code and to eliminate the possibility that - the modules diverge, we have replaced the digital - signature module in Glance with the new ``cursive`` - library. - - * The ``cursive`` library is an OpenStack project which - implements OpenStack-specific verification of digital - signatures. - * In Newton, the majority of the signature verification - code was removed from Glance. ``cursive`` has been - added to Glance as a dependency and will be installed - by default. - * Glance uses the ``cursive`` library's functionality to - verify digital signatures. To familiarize yourself - with this new dependency and see the list of - transitive dependencies visit - http://git.openstack.org/cgit/openstack/cursive diff --git a/releasenotes/notes/virtuozzo-hypervisor-fada477b64ae829d.yaml b/releasenotes/notes/virtuozzo-hypervisor-fada477b64ae829d.yaml deleted file mode 100644 index 2f6ea47f..00000000 --- a/releasenotes/notes/virtuozzo-hypervisor-fada477b64ae829d.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -upgrade: - - | - The metadata definition for ``hypervisor_type`` in the - ``OS::Compute::Hypervisor`` namespace has been extended to include - the Virtuozzo hypervisor, designated as ``vz``. You may upgrade - the definition using: - - ``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]`` diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index ed0ae548..00000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,278 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Glance Release Notes documentation build configuration file, created by -# sphinx-quickstart on Tue Nov 3 17:40:50 2015. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -import openstackdocstheme - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Glance Release Notes' -copyright = u'2015, Glance Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -from glance.version import version_info as glance_version -# The full version, including alpha/beta/rc tags. -release = glance_version.version_string_with_vcs() -# The short X.Y version. -version = glance_version.canonical_version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] -html_theme_path = [openstackdocstheme.get_html_theme_path()] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -html_use_index = False - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'GlanceReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation', - u'Glance Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'glancereleasenotes', u'Glance Release Notes Documentation', - [u'Glance Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation', - u'Glance Developers', 'GlanceReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index e672189e..00000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -====================== - Glance Release Notes -====================== - -.. toctree:: - :maxdepth: 1 - - unreleased - ocata - newton - mitaka - liberty diff --git a/releasenotes/source/liberty.rst b/releasenotes/source/liberty.rst deleted file mode 100644 index 36217be8..00000000 --- a/releasenotes/source/liberty.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================== - Liberty Series Release Notes -============================== - -.. release-notes:: - :branch: origin/stable/liberty diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst deleted file mode 100644 index e5456096..00000000 --- a/releasenotes/source/mitaka.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Mitaka Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/mitaka diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst deleted file mode 100644 index 07d91d3d..00000000 --- a/releasenotes/source/newton.rst +++ /dev/null @@ -1,7 +0,0 @@ -=================================== - Newton Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/newton - :earliest-version: 13.0.0 diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index ebe62f42..00000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Ocata Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index cd22aabc..00000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 0c6c01a4..00000000 --- a/requirements.txt +++ /dev/null @@ -1,58 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -pbr!=2.1.0,>=2.0.0 # Apache-2.0 - -# < 0.8.0/0.8 does not work, see https://bugs.launchpad.net/bugs/1153983 -SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT -eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT -PasteDeploy>=1.5.0 # MIT -Routes>=2.3.1 # MIT -WebOb>=1.7.1 # MIT -sqlalchemy-migrate>=0.11.0 # Apache-2.0 -sqlparse>=0.2.2 # BSD -alembic>=0.8.10 # MIT -httplib2>=0.7.5 # MIT -oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0 -oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.context>=2.14.0 # Apache-2.0 -oslo.utils>=3.20.0 # Apache-2.0 -stevedore>=1.20.0 # Apache-2.0 -futurist!=0.15.0,>=0.11.0 # Apache-2.0 -taskflow>=2.7.0 # Apache-2.0 -keystoneauth1>=3.0.1 # Apache-2.0 -keystonemiddleware>=4.12.0 # Apache-2.0 -WSME>=0.8 # MIT -PrettyTable<0.8,>=0.7.1 # BSD - -# For paste.util.template used in keystone.common.template -Paste # MIT - -jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT -python-keystoneclient>=3.8.0 # Apache-2.0 -pyOpenSSL>=0.14 # Apache-2.0 -# Required by openstack.common libraries -six>=1.9.0 # MIT - -oslo.db>=4.24.0 # Apache-2.0 -oslo.i18n!=3.15.2,>=2.1.0 # Apache-2.0 -oslo.log>=3.22.0 # Apache-2.0 -oslo.messaging!=5.25.0,>=5.24.2 # Apache-2.0 -oslo.middleware>=3.27.0 # Apache-2.0 -oslo.policy>=1.23.0 # Apache-2.0 - -retrying!=1.3.0,>=1.2.3 # Apache-2.0 -osprofiler>=1.4.0 # Apache-2.0 - -# Glance Store -glance-store>=0.18.0 # Apache-2.0 - - -debtcollector>=1.2.0 # Apache-2.0 -cryptography>=1.6 # BSD/Apache-2.0 -cursive>=0.1.2 # Apache-2.0 - -# timeutils -iso8601>=0.1.11 # MIT -monotonic>=0.6 # Apache-2.0 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 775cc697..00000000 --- a/setup.cfg +++ /dev/null @@ -1,93 +0,0 @@ -[metadata] -name = glance -summary = OpenStack Image Service -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = http://docs.openstack.org/developer/glance/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - -[files] -data_files = - etc/glance/metadefs = etc/metadefs/* -packages = - glance - -[entry_points] -console_scripts = - glance-api = glance.cmd.api:main - glance-cache-prefetcher = glance.cmd.cache_prefetcher:main - glance-cache-pruner = glance.cmd.cache_pruner:main - glance-cache-manage = glance.cmd.cache_manage:main - glance-cache-cleaner = glance.cmd.cache_cleaner:main - glance-control = glance.cmd.control:main - glance-manage = glance.cmd.manage:main - glance-registry = glance.cmd.registry:main - glance-replicator = glance.cmd.replicator:main - glance-scrubber = glance.cmd.scrubber:main -wsgi_scripts = - glance-wsgi-api = glance.common.wsgi_app:init_app -glance.common.image_location_strategy.modules = - location_order_strategy = glance.common.location_strategy.location_order - store_type_strategy = glance.common.location_strategy.store_type -oslo.config.opts = - glance.api = glance.opts:list_api_opts - glance.registry = glance.opts:list_registry_opts - glance.scrubber = glance.opts:list_scrubber_opts - glance.cache= glance.opts:list_cache_opts - glance.manage = glance.opts:list_manage_opts -oslo.config.opts.defaults = - glance.api = glance.common.config:set_cors_middleware_defaults -glance.database.migration_backend = - sqlalchemy = oslo_db.sqlalchemy.migration -glance.database.metadata_backend = - sqlalchemy = glance.db.sqlalchemy.metadata - -glance.flows = - import = glance.async.flows.base_import:get_flow - -glance.flows.import = - convert = glance.async.flows.convert:get_flow - introspect = glance.async.flows.introspect:get_flow - ovf_process = glance.async.flows.ovf_process:get_flow - -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source -warning-is-error = 1 - -[egg_info] -tag_build = -tag_date = 0 -tag_svn_revision = 0 - -[compile_catalog] -directory = glance/locale -domain = glance - -[update_catalog] -domain = glance -output_dir = glance/locale -input_file = glance/locale/glance.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = glance/locale/glance.pot - -[pbr] -autodoc_index_modules = True -autodoc_exclude_modules = - glance.tests.* - glance.db.sqlalchemy.* -api_doc_dir = contributor/api diff --git a/setup.py b/setup.py deleted file mode 100644 index 566d8443..00000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 0f9baa3b..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,38 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -# Hacking already pins down pep8, pyflakes and flake8 -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 - -# For translations processing -Babel!=2.4.0,>=2.3.4 # BSD - -# Needed for testing -bandit>=1.1.0 # Apache-2.0 -coverage!=4.4,>=4.0 # Apache-2.0 -ddt>=1.0.1 # MIT -fixtures>=3.0.0 # Apache-2.0/BSD -mock>=2.0 # BSD -sphinx>=1.6.2 # BSD -requests>=2.14.2 # Apache-2.0 -testrepository>=0.0.18 # Apache-2.0/BSD -testresources>=0.2.4 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=1.4.0 # MIT -psutil>=3.2.2 # BSD -oslotest>=1.10.0 # Apache-2.0 -os-testr>=0.8.0 # Apache-2.0 - -# Optional packages that should be installed when testing -PyMySQL>=0.7.6 # MIT License -psycopg2>=2.5 # LGPL/ZPL -pysendfile>=2.0.0 # MIT -qpid-python;python_version=='2.7' # Apache-2.0 -xattr>=0.4 # MIT -python-swiftclient>=3.2.0 # Apache-2.0 - -# Documentation -os-api-ref>=1.0.0 # Apache-2.0 -openstackdocstheme>=1.11.0 # Apache-2.0 -reno!=2.3.1,>=1.8.0 # Apache-2.0 diff --git a/tools/test-setup.sh b/tools/test-setup.sh deleted file mode 100755 index 07a07854..00000000 --- a/tools/test-setup.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash -xe - -# This script will be run by OpenStack CI before unit tests are run, -# it sets up the test system as needed. -# Developers should setup their test systems in a similar way. - -# This setup needs to be run as a user that can run sudo. - -# The root password for the MySQL database; pass it in via -# MYSQL_ROOT_PW. -DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} - -# This user and its password are used by the tests, if you change it, -# your tests might fail. -DB_USER=openstack_citest -DB_PW=openstack_citest - -sudo -H mysqladmin -u root password $DB_ROOT_PW - -# It's best practice to remove anonymous users from the database. If -# a anonymous user exists, then it matches first for connections and -# other connections from that host will not work. -sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " - DELETE FROM mysql.user WHERE User=''; - FLUSH PRIVILEGES; - GRANT ALL PRIVILEGES ON *.* - TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" - -# Now create our database. -mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " - SET default_storage_engine=MYISAM; - DROP DATABASE IF EXISTS openstack_citest; - CREATE DATABASE openstack_citest CHARACTER SET utf8;" - -# Same for PostgreSQL -# The root password for the PostgreSQL database; pass it in via -# POSTGRES_ROOT_PW. -DB_ROOT_PW=${POSTGRES_ROOT_PW:-insecure_slave} - -# Setup user -root_roles=$(sudo -H -u postgres psql -t -c " - SELECT 'HERE' from pg_roles where rolname='$DB_USER'") -if [[ ${root_roles} == *HERE ]];then - sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" -else - sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" -fi - -# Store password for tests -cat << EOF > $HOME/.pgpass -*:*:*:$DB_USER:$DB_PW -EOF -chmod 0600 $HOME/.pgpass - -# Now create our database -psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" -createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 03e3b6bd..00000000 --- a/tox.ini +++ /dev/null @@ -1,83 +0,0 @@ -[tox] -minversion = 2.3.1 -envlist = py35,py27,pep8 -skipsdist = True - -[testenv] -basepython = - {py27,debug-py27,debug,venv,cover,docs,bandit,pep8,api-ref,genconfig,releasenotes,py27-oslo-master}: python2.7 - {py35,debug-py35,py35-oslo-master}: python3.5 -setenv = - VIRTUAL_ENV={envdir} - PYTHONWARNINGS=default::DeprecationWarning -# NOTE(hemanthm): The environment variable "OS_TEST_DBAPI_ADMIN_CONNECTION" -# must be set to force oslo.db tests to use a file-based sqlite database -# instead of the default in-memory database, which doesn't work well with -# alembic migrations. The file-based database pointed by the environment -# variable itself is not used for testing. Neither is it ever created. Oslo.db -# creates another file-based database for testing purposes and deletes it as a -# part of its test clean-up. Think of setting this environment variable as a -# clue for oslo.db to use file-based database. - OS_TEST_DBAPI_ADMIN_CONNECTION=sqlite:////tmp/placeholder-never-created-nor-used.db -usedevelop = True -install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} -deps = -r{toxinidir}/test-requirements.txt -commands = - bandit: bandit -c bandit.yaml -r glance -n5 -p gate - cover: python setup.py testr --coverage --testr-args='^(?!.*test.*coverage).*$' - debug: oslo_debug_helper {posargs} - debug{-py27,-py35}: oslo_debug_helper {posargs} - docs: python setup.py build_sphinx - releasenotes: sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - venv: {posargs} -whitelist_externals = bash -passenv = *_proxy *_PROXY - -[testenv:py27] -# The periodic oslo-with-master test does not work with the -# conditionals in [testenv], use separate environment. -commands = ostestr --slowest {posargs} - -[testenv:py35] -# The periodic oslo-with-master test does not work with the -# conditionals in [testenv], use separate environment. -commands = ostestr --slowest {posargs} - -[testenv:pep8] -commands = - flake8 {posargs} - # Run security linter - bandit -c bandit.yaml -r glance -n5 -p gate - # Check that .po and .pot files are valid: - bash -c "find glance -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" - -[testenv:genconfig] -commands = - oslo-config-generator --config-file etc/oslo-config-generator/glance-api.conf - oslo-config-generator --config-file etc/oslo-config-generator/glance-registry.conf - oslo-config-generator --config-file etc/oslo-config-generator/glance-scrubber.conf - oslo-config-generator --config-file etc/oslo-config-generator/glance-cache.conf - oslo-config-generator --config-file etc/oslo-config-generator/glance-manage.conf - -[testenv:api-ref] -# This environment is called from CI scripts to test and publish -# the API Ref to developer.openstack.org. -# -whitelist_externals = bash - rm -commands = - rm -rf api-ref/build - sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html - -[flake8] -# TODO(dmllr): Analyze or fix the warnings blacklisted below -# E711 comparison to None should be 'if cond is not None:' -# E712 comparison to True should be 'if cond is True:' or 'if cond:' -# H404 multi line docstring should start with a summary -# H405 multi line docstring summary not separated with an empty line -ignore = E711,E712,H404,H405 -exclude = .venv,.git,.tox,dist,doc,etc,*glance/locale*,*lib/python*,*egg,build - -[hacking] -local-check-factory = glance.hacking.checks.factory -import_exceptions = glance.i18n